repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
AWPorter/WebProject_APorter
|
https://github.com/AWPorter/WebProject_APorter
|
6033148f8fb11a78160ff4d92959e19cf46ab806
|
1ac17a6cf5ef0be5598c82867e2d35c13569ca6f
|
c5fa098310cb81a8d9b451c20062f03a2288500b
|
refs/heads/master
| 2017-12-08T17:49:22.353019 | 2017-04-25T02:52:55 | 2017-04-25T02:52:55 | 79,261,565 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6646341681480408,
"alphanum_fraction": 0.6707317233085632,
"avg_line_length": 22.4761905670166,
"blob_id": "af3ad302addba64a62b0934436c557ea0dff2f99",
"content_id": "4b611593c5bc03440c591739fb0101b357fc33ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 21,
"path": "/ScrapeWithSelenium.py",
"repo_name": "AWPorter/WebProject_APorter",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nimport datetime\n\ntoday = str(datetime.datetime.now().date())\n\nsites = {'Solaris With Selenium ': 'http://www.solarisnetwork.com/cat2/used-sound-consoles?page=1&sort=pl2h&f_type=all',\n\n }\n\nbrowser = webdriver.Chrome()\n\nfor name, link in sites.items():\n response = browser.get(link)\n html = browser.page_source\n\n fileName = today + '.' + name + '.html'\n outfile = open(fileName, 'w')\n outfile.write(html)\n outfile.close()\n\nbrowser.quit()"
},
{
"alpha_fraction": 0.5424032807350159,
"alphanum_fraction": 0.5526518225669861,
"avg_line_length": 34.97235107421875,
"blob_id": "528583bfbae7294b47ba780c0a9a53419f55a0b8",
"content_id": "d22774cf17c2d54ecab034b943b75b626cacdb99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7806,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 217,
"path": "/SitesToJSON.py",
"repo_name": "AWPorter/WebProject_APorter",
"src_encoding": "UTF-8",
"text": "import requests\nimport datetime\nfrom bs4 import BeautifulSoup\nimport json\nfrom jsonmerge import merge\n# import DataAverage\nimport string\nimport csv\n\nWorkingDirectory = 'Users/Andy/desktop/Belmont/Spring_2017/Data_Structures/CSC_3130/WebProject_APorter/'\n# Dictionary of sites to scrape\nsites = { 'Solaris': 'http://www.solarisnetwork.com/cat2/used-sound-consoles?page=1&sort=pl2h&f_type=all',\n 'SoundBroker': 'http://www.soundbroker.com/console/?start=0&sort=7'}\n\n# List of scraped sites to be written to JSON Files at end of script\npagesToJSON = []\n\n# Create a list of lists for responses\n\nlistOfResponses = []\n\ntoday = str(datetime.datetime.now().date())\n\nSolarisPages = []\nBrokerPages = []\n\n\n#Main Run Loop. Steps through methods in order\n\n\ndef scrape():\n x = 1\n #SolarisPages = []\n while x <= 20:\n urlSolaris = 'http://www.solarisnetwork.com/cat2/used-sound-consoles?page=' + str(x) + '&sort=pl2h&f_type=all'\n pageSolaris = requests.get(urlSolaris)\n SolarisPages.append(pageSolaris)\n x += 1\n\n\n y = 1\n # x = (y*40)-40\n #BrokerPages = []\n while y <= 30:\n x = (y * 40) - 40\n urlBroker = 'http://www.soundbroker.com/console/?start=' + str(x) + '&sort=7'\n pageBroker = requests.get(urlBroker)\n BrokerPages.append(pageBroker)\n # x += 40\n y += 1\n\n\n# Parse Solaris url\n# 'position' marks the object which contains all the text\n# All other data is found in its relationship to 'position'\n#responses = []\n\ndef parse():\n responses = []\n for name in sites.keys():\n if 'Solaris' in name:\n for page in SolarisPages:\n soupSolaris = BeautifulSoup(page.content, 'lxml')\n\n for position in soupSolaris.find_all('table', class_='product-thumb-info'):\n titleLocate = position.find('h4')\n itemTitle = titleLocate.find('a').string\n manLocate = position.find('td', height = '20')\n manufacturer = manLocate.find('a').string\n # manufacturer = str.upper(manufacturer)\n try:\n availablelocate = position.find('td', height = '20', class_= 'text-left')\n available = availablelocate.find_next('td', height = '20', class_= 'text-left').string.split(' Available')[0]\n except:\n available = ('Call for Info')\n\n link = position.find('a')['href']\n\n try:\n priceLocate = position.find('td', height='20', class_='text-left price')\n price = priceLocate.find('b').string.split(' USD')[0]\n except:\n price = ('Call for Info')\n\n\n\n # Make changes to response for Solaris\n responses.append({'Item': itemTitle, 'Manufacturer': manufacturer, 'Number Available': available,\n 'Price': price, 'Link': link, 'ID': '-1', 'Site': 'Solaris', 'Date Collected': today\n })\n\n # Write response to JSON file\n SolarispostingsFile = '/Users/Andy/Desktop/Belmont/Spring_2017/Data_Structures/CSC_3130/WebProject_APorter/JSON/Solaris/' + today + '.' + name + '.json'\n pagesToJSON.append(SolarispostingsFile)\n listOfResponses.append(responses)\n\n# Parse Sound Broker url\n#'position' marks the object which contains all the text\n# All other data is found in its relationship to 'position'\n responses = []\n BrokerHome = sites['SoundBroker']\n for name in sites.keys():\n if 'SoundBroker' in name:\n for page in BrokerPages:\n soupBroker = BeautifulSoup(page.content, 'lxml')\n\n\n for position in soupBroker.find_all('div', class_='row-fluid bos-row-1'):\n position = position.find('div', class_='span9')\n try:\n titleLocate = position.find('div', class_='payingListing')\n link = titleLocate.find('a')['href']\n link = BrokerHome + link\n titleLocate = titleLocate.find('a', class_='title-link')\n itemTitle = titleLocate.string\n\n except:\n titleLocate = position.find('div', class_='listing')\n link = titleLocate.find('a')['href']\n link = BrokerHome + link\n titleLocate = titleLocate.find('a', class_='title-link')\n itemTitle = titleLocate.string\n\n manLocate = position.find('table', class_ = 'listing-info')\n manLocate = manLocate.find('td')\n manLocate = manLocate.find('strong')\n manLocate = manLocate.find_next('strong')\n manufacturer = manLocate.string\n try:\n pricelocate = position.find('td', class_='listing-info-price')\n pricelocate = pricelocate.find('strong')\n price = pricelocate.find('a').string\n except:\n pricelocate = position.find('td', class_='listing-info-price')\n price = pricelocate.find('strong').string\n else:\n price = ('Call for Info')\n IDLocate = position.find('td', class_='listing-info-id')\n ID = IDLocate.find('strong').string\n\n # linkLocate = position.find('div', class_='payingListing')\n # link = linkLocate.find('a')['href']\n\n\n # Make changes to response for Solaris\n responses.append(\n {'Item': itemTitle, 'Manufacturer': manufacturer, 'ID': ID, 'Number Available': '-1',\n 'Price': price, 'Link': link, 'Site': 'SoundBroker', 'Date Collected': today\n })\n\n # Write response to JSON file\n BrokerpostingsFile = '/Users/Andy/Desktop/Belmont/Spring_2017/Data_Structures/CSC_3130/WebProject_APorter/JSON/SoundBroker/' + today + '.' + name + '.json'\n pagesToJSON.append(BrokerpostingsFile)\n listOfResponses.append(responses)\n\n#Write response to JSON file in another location\n#postingsFile = '/APBriefs/' + today + '.APNewsBriefs.json'\n\n for site in pagesToJSON:\n ref = pagesToJSON.index(site)\n response = listOfResponses[ref]\n with open(site, 'w') as outfile:\n json.dump(response, outfile, sort_keys=True, indent=2)\n\n outfile.close()\n\ndef jsonDump():\n\n filesToPrint = []\n path = '/Users/Andy/Desktop/Belmont/Spring_2017/Data_Structures/CSC_3130/WebProject_APorter/JSON'\n\n file1 = path + '/Solaris/' + today + '.Solaris.json'\n file2 = path + '/SoundBroker/' + today + '.SoundBroker.json'\n\n\n with open(file1) as Sol:\n Solaris = json.load(Sol)\n for x in Solaris:\n filesToPrint.append(x)\n\n with open(file2) as Sou:\n SoundBroker = json.load(Sou)\n for x in SoundBroker:\n filesToPrint.append(x)\n\n\n # If combo file already exists append it, if not pass this and create a new file\n try:\n fileCombo = path + '/Combined.json'\n with open(fileCombo) as Com:\n Combo = json.load(Com)\n for x in Combo:\n filesToPrint.append(x)\n\n except:\n pass\n\n\n Location = path + '/Combined.json'\n with open(Location, 'w') as out:\n json.dump(filesToPrint, out, sort_keys=True, indent=2)\n\n out.close()\n\n\n\nif __name__ == '__main__':\n\n scrape()\n parse()\n jsonDump()\n\n\n\n\n\n# list(list(dict), list(dict))\n"
},
{
"alpha_fraction": 0.661596953868866,
"alphanum_fraction": 0.6768060922622681,
"avg_line_length": 31.9375,
"blob_id": "12910e29a11ff3021eaeac5b014224e5c2766f11",
"content_id": "a5c3d532a109d08e9a657df3f1abdf9bad372ffd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 16,
"path": "/ScrapeWithRequests.py",
"repo_name": "AWPorter/WebProject_APorter",
"src_encoding": "UTF-8",
"text": "import requests\nimport datetime\n\ntoday = str(datetime.datetime.now()).split(' ')[0]\n\nsites = { 'Solaris Consoles_Low2High': 'http://www.solarisnetwork.com/cat2/used-sound-consoles?page=1&sort=pl2h&f_type=all',\n 'SoundBroker Consoles_Low2High': 'http://www.soundbroker.com/console/?start=0&sort=7'}\n\nfor name, link in sites.items():\n response = requests.get(link)\n html = response.content\n\n fileName = today + '.' + name + '.html'\n outfile = open(fileName, \"wb\")\n outfile.write(html)\n outfile.close()"
},
{
"alpha_fraction": 0.713168203830719,
"alphanum_fraction": 0.7222946286201477,
"avg_line_length": 26.428571701049805,
"blob_id": "dde378fb2c94237b677c42cc45929e3a4fb894dd",
"content_id": "547ab1d7a41de231c4df77e24adb759a76c9d27a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 767,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 28,
"path": "/ScrapeToHTML.py",
"repo_name": "AWPorter/WebProject_APorter",
"src_encoding": "UTF-8",
"text": "import requests\nimport datetime\nfrom bs4 import BeautifulSoup\n\nnext = True\nSolarisPages = []\nurlSolaris = 'http://www.solarisnetwork.com/cat2/used-sound-consoles?page=0&sort=pl2h&f_type=all'\npageSolaris = requests.get(urlSolaris)\nSolarisPages.append(pageSolaris)\nif #link = null\n next = False\nwhile next:\n urlSolaris = 'http://www.solarisnetwork.com/cat2/used-sound-consoles?page=0&sort=pl2h&f_type=all'\n pageSolaris = requests.get(urlSolaris)\n SolarisPages.append(pageSolaris)\n if #link = null\n next = False\n\n\ndef soup(url):\n browser = webdriver.Chrome()\n\n urlSel = url\n urlSel = browser.get(urlSel)\n soupAPNewsBriefs = BeautifulSoup(browser.page_source, 'lxml')\n\n browser.quit()\n pageSoup = BeautifulSoup(url.content, 'lxml')"
},
{
"alpha_fraction": 0.7465753555297852,
"alphanum_fraction": 0.8150684833526611,
"avg_line_length": 48,
"blob_id": "58552808addf42efeffa618073c62a59bd45161f",
"content_id": "e6c9a7a0a2ca9eb76cc3a545c7066e5ae7d545ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 3,
"path": "/ScheduleScrape",
"repo_name": "AWPorter/WebProject_APorter",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n/Users/Andy/anaconda/bin/python3.5 /Users/Andy/Desktop/Belmont/Spring_2017/Data_Structures/CSC_3130/WebProject_APorter/SitesToJSON.py"
},
{
"alpha_fraction": 0.7231638431549072,
"alphanum_fraction": 0.7457627058029175,
"avg_line_length": 38.33333206176758,
"blob_id": "01f5a4f3d2742d9ac76a4ce373e1711ff14b2ffb",
"content_id": "6fc46faec0192c6aa861ad69e724225f9306e098",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 9,
"path": "/DataAverage.py",
"repo_name": "AWPorter/WebProject_APorter",
"src_encoding": "UTF-8",
"text": "import json\nimport datetime\n\nSolarisPast = '/Users/Andy/Desktop/Belmont/Spring_2017/Data_Structures/CSC_3130/WebProject_APorter/JSON/SoundBroker/''\ndef run(SolarisFile, BrokerFile):\n with open(SolarisFile) as data_file:\n SolarisNewData = json.load(data_file)\n with open(BrokerFile) as data_file:\n BrokerNewData = json.load(data_file)\n"
},
{
"alpha_fraction": 0.6229507923126221,
"alphanum_fraction": 0.6229507923126221,
"avg_line_length": 13.75,
"blob_id": "3887ba4762b207a4b1d13e48186a182fc6c25547",
"content_id": "2ce1f2c6f4d4bfe9cfc884e08aa6be4d8984658e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 61,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 4,
"path": "/JSONMerge.py",
"repo_name": "AWPorter/WebProject_APorter",
"src_encoding": "UTF-8",
"text": "import JSON\n\ndef Merge(filesToMerge = [], *args):\n pass\n\n\n"
}
] | 7 |
long9930/6250-Project
|
https://github.com/long9930/6250-Project
|
330df9be45a2d0736734ab51a3e5abf51e51820d
|
ef6a1424ce5f8be64d25dc13afe7321ed56e75a3
|
9d9942fee1224bc07f4075cc29a31cc2d42faf3c
|
refs/heads/master
| 2020-04-10T15:14:00.261682 | 2018-12-10T05:34:30 | 2018-12-10T05:34:30 | 161,102,090 | 0 | 0 | null | 2018-12-10T01:57:35 | 2018-12-09T16:57:17 | 2018-12-09T16:57:12 | null |
[
{
"alpha_fraction": 0.5894448757171631,
"alphanum_fraction": 0.6028146743774414,
"avg_line_length": 40.64495086669922,
"blob_id": "8cf6915c51850674d729a46013c18e51fff8950a",
"content_id": "93c14e7c74630ef4b771a0e7e81b449ab895e1fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12790,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 307,
"path": "/MyModel_v3.py",
"repo_name": "long9930/6250-Project",
"src_encoding": "UTF-8",
"text": "\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom sklearn.model_selection import train_test_split\nimport sklearn as sk\nimport tensorflow as tf\nimport os, random, time\nfrom plots import plot_learning_curves\nfrom methods import cnn_AlexNet, cnn_LeNet5\n\nill_size = 10000 # size of ill data to be used\nnormal_size = 10000 # size of normal data to be used\ndisease = \"Cardiomegaly\" # name of target disease\nmetadata_filepath = \"data/Data_Entry_2017.csv\" # metadata file path\nimage_path = \"images\" # images directory\nsummaries_dir = \"logs\" # training and testing logs directory\nsave_dir = \"models\" # direcotry to save and reload model parameters\nGPU = True # whether GPU is avaliable or not\nmethod=\"LeNet5\" #AlexNet, LeNet5\"\n# Hyperparameters\nn_classes = 2 # number of classes, here only 2 for normal and ill cases\nbatch_size = 32 # batch size for batch training\nlearning_rate = 0.001\nalpha = 0.5\ntraining_epochs = 1 # repeat training and testing times\ndisplay_step = 100 # number of steps to print traing/testing accuracy result\ndropout = 0.1 # To prevent overfitting\nratio = 0.01 # variable initalization ratio \n\n\n\"\"\"\nSectio below load metadata and generate lables\n\"\"\"\nmetadata = pd.read_csv(metadata_filepath)\nfull_path = np.vectorize(lambda image_path, image_name: os.path.join(image_path, image_name))\nmetadata[\"Image Index\"] = full_path(image_path, metadata[\"Image Index\"])\n\nmetadata[\"class\"] = \"NORMAL\"\nmetadata.loc[metadata[\"Finding Labels\"].str.contains(disease), [\"class\"]] = \"ILL\"\nmetadata = metadata[[\"Image Index\",\"class\"]]\nill_data = metadata[metadata[\"class\"]=='ILL'][:ill_size] #range, take out if run for all samples\nnormal_data = metadata[metadata[\"class\"]==\"NORMAL\"][:normal_size] #range\nmetadata = ill_data.append(normal_data)\ntrain_metadata, test_metadata = train_test_split(metadata, test_size=0.05, shuffle=True)#split for train,test\ntrain_metadata, valid_metadata = train_test_split(train_metadata, test_size=0.1, shuffle=True)#split for train,valid\ntotal_count = metadata.shape[0]\nnormal_count = metadata[metadata[\"class\"] == \"NORMAL\"].count()[0]\nill_count = metadata[metadata[\"class\"] == \"ILL\"].count()[0]\n\nclass_weight = tf.constant([normal_count/total_count, ill_count/total_count])\n\n\n\"\"\"\nSection below define the procedure of slicing total data into batches and then laod image data\n\"\"\"\nwith tf.device('/device:GPU:0' if GPU else '/cpu:0'):\n\n # Reads an image from a file, decodes it into a dense tensor, and resizes it\n # to a fixed shape.\n def _parse_function(filename, label):\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_png(image_string, channels = 1)\n image_decoded = tf.image.resize_images(image_decoded,[256, 256])\n image_decoded = tf.cast(image_decoded, tf.float32)\n image_decoded = tf.image.per_image_standardization(image_decoded)\n #image_decoded.set_shape((256, 256, 1))\n return image_decoded, label\n\n train_data = tf.data.Dataset.from_tensor_slices(\n (train_metadata[\"Image Index\"].values, \n pd.get_dummies(train_metadata[\"class\"]).values))\n\n # for a small batch size\n train_data = train_data.map(_parse_function, num_parallel_calls=4)\n train_data = train_data.batch(batch_size)\n train_data = train_data.prefetch(1)\n \n evaluation_data = tf.data.Dataset.from_tensor_slices(\n (valid_metadata[\"Image Index\"].values, \n pd.get_dummies(valid_metadata[\"class\"]).values))\n evaluation_data = evaluation_data.map(_parse_function, num_parallel_calls=4)\n evaluation_data = evaluation_data.batch(batch_size)\n evaluation_data = evaluation_data.prefetch(1) \n \n\n test_data = tf.data.Dataset.from_tensor_slices(\n (test_metadata[\"Image Index\"].values, \n pd.get_dummies(test_metadata[\"class\"]).values))\n test_data = test_data.map(_parse_function, num_parallel_calls=4)\n test_data = test_data.batch(batch_size)\n test_data = test_data.prefetch(1)\n \n iterator = tf.data.Iterator.from_structure(train_data.output_types, \n train_data.output_shapes)\n x, y = iterator.get_next()\n\n train_init = iterator.make_initializer(train_data) # Inicializador para train_data\n evaluate_init = iterator.make_initializer(evaluation_data) # Inicializador para valid_data\n test_init = iterator.make_initializer(test_data) # Inicializador para test_data\n \n # Visualize input x\n tf.summary.image(\"input\", x, batch_size)\n\n \n\n\"\"\"\nSection below define CNN structure, train procedure and test procedure\n\"\"\"\nwith tf.device('/device:GPU:0' if GPU else '/cpu:0'):\n if method==\"AlexNet\":\n pred, keep_prob=cnn_AlexNet(x, ratio, n_classes)\n \n if method==\"LeNet5\":\n pred, keep_prob=cnn_LeNet5(x, ratio, n_classes)\n \n with tf.name_scope(\"cross_entropy\"):\n # softmax\n softmax = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)\n cost = tf.reduce_mean(softmax)\n tf.summary.scalar(\"cross_entropy\", cost)\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n with tf.name_scope(\"accuracy\"):\n # Accuracy\n predicted = tf.argmax(pred, 1)\n actual = tf.argmax(y, 1)\n correct_pred = tf.equal(predicted, actual)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar(\"accuracy\", accuracy)\n\n\n\n\n\"\"\"\nSection below initialize CNN glopbal variables and configuration\n\"\"\"\nwith tf.device('/device:GPU:0' if GPU else '/cpu:0'):\n # Get all summary\n summ = tf.summary.merge_all()\n init = tf.global_variables_initializer()\n config = tf.ConfigProto(allow_soft_placement = True)\n saver = tf.train.Saver()\n save_name = disease + \"_model\"\n save_path = os.path.join(save_dir, save_name)\n\n\n\n\n \n\"\"\"\nSection below starts iteration of training and testing\n\"\"\"\n# Session start\nwith tf.Session(config=config) as sess:\n train_costs, train_accuracies=[],[]\n valid_costs, valid_accuracies=[],[]\n\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n train_writer = tf.summary.FileWriter(summaries_dir + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(summaries_dir + '/test', sess.graph)\n \n # Required to get the filename matching to run.\n try:\n saver.restore(sess, save_path)\n print(\"Model loaded with file path: %s\" % save_path)\n except:\n sess.run(init)\n print(\"Saved model file path: %s doesn't exit, using random initialization\" % save_path)\n\n total_epoch_time = 0\n step = 1\n # Compute epochs.\n for i in range(training_epochs):\n print(\"epoch: {}\\n\".format(i)) \n print(\"\\n\")\n epoch_start = time.time()\n sess.run(train_init)\n epoch_step = 0\n avg_acc = 0\n avg_precision = 0\n avg_recall = 0\n avg_f1 = 0\n avg_loss = 0\n try:\n while True:\n _, acc, y_pred, y_true, loss, summary_str = sess.run(\n [optimizer, accuracy, predicted, actual, cost, summ],\n feed_dict={keep_prob: 1-dropout},\n options=run_options,\n run_metadata = run_metadata) \n precision = sk.metrics.precision_score(y_true, y_pred)\n recall = sk.metrics.recall_score(y_true, y_pred)\n f1 = sk.metrics.f1_score(y_true, y_pred)\n train_writer.add_summary(summary_str, step)\n \n if step % display_step == 0:\n train_writer.add_run_metadata(run_metadata,\"step {}\".format(step))\n print(\"step: {}\".format(step))\n print(\"accuracy: {}\".format(acc))\n print(\"precision: {}\".format(precision))\n print(\"recall: {}\".format(recall))\n print(\"f1_score: {}\".format(f1))\n print(\"loss: {}\".format(loss))\n print(\"\\n\")\n avg_acc += acc\n avg_precision += precision\n avg_recall += recall\n avg_f1 += f1\n avg_loss += loss\n step += 1\n epoch_step += 1\n train_costs.append(avg_loss/epoch_step)\n train_accuracies.append(avg_acc/epoch_step)\n except tf.errors.OutOfRangeError:\n\n epoch_time = time.time() - epoch_start\n total_epoch_time += epoch_time\n print(\"epoch finished in {} seconds\".format(epoch_time))\n print(\"Average epoch accuracy is {:.2f}%\".format((avg_acc / epoch_step) * 100))\n print(\"Average epoch precision is {:.2f}%\".format((avg_precision / epoch_step) *100))\n print(\"Average epoch recall is {:.2f}%\".format((avg_recall / epoch_step) *100))\n print(\"Average epoch f1-score is {:.2f}\".format((avg_f1 / epoch_step)))\n print(\"Average epoch loss is {:.2f}\".format(avg_loss / epoch_step))\n # save trained model parameters\n save_name = disease + \"_model\"\n save_path = os.path.join(save_dir, save_name)\n saver.save(sess, save_path)\n print(\"Model saved in path: %s\" % save_path)\n\n #Evaluation\n print(\"Evaluation\\n\")\n sess.run(evaluate_init)\n avg_acc = 0\n avg_precision = 0\n avg_recall = 0\n avg_f1 = 0\n avg_loss = 0\n test_step=0\n try:\n while True:\n acc, y_pred, y_true, loss, summary_str = sess.run(\n [accuracy, predicted, actual, cost, summ],\n feed_dict={keep_prob: 1.})\n precision = sk.metrics.precision_score(y_true, y_pred)\n recall = sk.metrics.recall_score(y_true, y_pred)\n f1 = sk.metrics.f1_score(y_true, y_pred)\n avg_acc += acc\n avg_precision += precision\n avg_recall += recall\n avg_f1 += f1\n avg_loss += loss\n test_step += 1\n test_writer.add_summary(summary_str, test_step)\n valid_costs.append(avg_loss/epoch_step)\n valid_accuracies.append(acc/epoch_step) \n except tf.errors.OutOfRangeError: \n print(\"Average test set accuracy over {} iterations is {:.2f}%\".format(test_step,(avg_acc / test_step) * 100))\n print(\"Average epoch precision is {:.2f}%\".format((avg_precision / test_step) *100))\n print(\"Average epoch recall is {:.2f}%\".format((avg_recall / test_step) *100))\n print(\"Average epoch f1-score is {:.2f}\".format((avg_f1 / test_step)))\n print(\"Average test set loss over {} iterations is {:.2f}\".format(test_step,(avg_loss / test_step)))\n print(\"\\n\")\n \n #plot\n plot_learning_curves(train_costs, valid_costs, train_accuracies, valid_accuracies, loss_fig=disease+\"_Loss.png\", accuracy_fig=disease+\"_accuracy.png\") \n \n # Test \n \n print(\"Test\\n\")\n sess.run(test_init)\n avg_acc = 0\n avg_precision = 0\n avg_recall = 0\n avg_f1 = 0\n avg_loss = 0\n test_step=0\n try:\n while True:\n acc, y_pred, y_true, loss, summary_str = sess.run(\n [accuracy, predicted, actual, cost, summ],\n feed_dict={keep_prob: 1.})\n precision = sk.metrics.precision_score(y_true, y_pred)\n recall = sk.metrics.recall_score(y_true, y_pred)\n f1 = sk.metrics.f1_score(y_true, y_pred)\n avg_acc += acc\n avg_precision += precision\n avg_recall += recall\n avg_f1 += f1\n avg_loss += loss\n test_step += 1\n test_writer.add_summary(summary_str, test_step) \n except tf.errors.OutOfRangeError: \n print(\"Average test set accuracy over {} iterations is {:.2f}%\".format(test_step,(avg_acc / test_step) * 100))\n print(\"Average epoch precision is {:.2f}%\".format((avg_precision / test_step) *100))\n print(\"Average epoch recall is {:.2f}%\".format((avg_recall / test_step) *100))\n print(\"Average epoch f1-score is {:.2f}\".format((avg_f1 / test_step)))\n print(\"Average test set loss over {} iterations is {:.2f}\".format(test_step,(avg_loss / test_step)))\n print(\"\\n\")\n \n\n print(\"Average epoch time: {} seconds\".format(total_epoch_time/training_epochs))\n train_writer.add_run_metadata(run_metadata,\"mySess\")\n train_writer.close()\n test_writer.close()\n sess.close()\n\n\n\n\n"
},
{
"alpha_fraction": 0.7435897588729858,
"alphanum_fraction": 0.7948718070983887,
"avg_line_length": 14.600000381469727,
"blob_id": "8764d6d6cd28a3cb3ad9d76d4d10814adc2aad01",
"content_id": "2d4819078dbbccf4e5e602c9a3e811b7a2efdf4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 78,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 5,
"path": "/README.md",
"repo_name": "long9930/6250-Project",
"src_encoding": "UTF-8",
"text": "# 6250-Project\nX-ray data set for diagnosis\n\nInstruction:\nFiles and functions\n"
},
{
"alpha_fraction": 0.5569993853569031,
"alphanum_fraction": 0.6229572892189026,
"avg_line_length": 36.33088302612305,
"blob_id": "04054789787adf314893ddbbe7e094a2c67c0be1",
"content_id": "bcaac915d31375b175dd2838007d11574e822e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5079,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 136,
"path": "/methods.py",
"repo_name": "long9930/6250-Project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 23:16:29 2018\n\n@author: chenming\n\"\"\"\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom sklearn.model_selection import train_test_split\nimport sklearn as sk\nimport tensorflow as tf\nimport os, random, time\nfrom plots import plot_learning_curves\n\ndef cnn_AlexNet(x,ratio = 0.01,n_classes=2):\n def conv2d(img, w, b, k = 1):\n return tf.nn.tanh(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, k, k, 1], padding='SAME'),b))\n\n def max_pool(img, k):\n return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n wc1 = tf.Variable(tf.random_normal([11, 11, 1, 32])*ratio, name=\"wc1\")\n bc1 = tf.Variable(tf.random_normal([32])*ratio, name=\"bc1\")\n # stride 64 x 64\n # pool 32 x 32\n wc2 = tf.Variable(tf.random_normal([3, 3, 32, 128])*ratio, name=\"wc2\")\n bc2 = tf.Variable(tf.random_normal([128])*ratio, name=\"bc2\")\n # pool 16 x 16\n wc3 = tf.Variable(tf.random_normal([3, 3, 128, 96])*ratio, name=\"wc3\")\n bc3 = tf.Variable(tf.random_normal([96])*ratio, name=\"bc3\")\n # pool 16 x 16\n wc4 = tf.Variable(tf.random_normal([3, 3, 96, 64])*ratio, name=\"wc4\")\n bc4 = tf.Variable(tf.random_normal([64])*ratio, name=\"bc4\")\n # pool 8x8\n wd1 = tf.Variable(tf.random_normal([8*8*64, 512])*ratio, name=\"wd1\")\n bd1 = tf.Variable(tf.random_normal([512])*ratio, name=\"bd1\")\n wd2 = tf.Variable(tf.random_normal([512, 256])*ratio, name=\"wd2\")\n bd2 = tf.Variable(tf.random_normal([256])*ratio, name=\"bd2\")\n wout = tf.Variable(tf.random_normal([256, n_classes])*ratio, name=\"wout\")\n bout = tf.Variable(tf.random_normal([n_classes])*ratio, name=\"bout\")\n\n # conv layer\n #x = tf.Print(x, [x])\n conv1 = conv2d(x,wc1,bc1, k = 4)\n conv1 = max_pool(conv1, k=2)\n # conv layer\n conv2 = conv2d(conv1,wc2,bc2)\n conv2 = max_pool(conv2, k=2)\n # conv2 = avg_pool(conv2, k=2)\n\n # dropout to reduce overfitting\n keep_prob = tf.placeholder(tf.float32)\n conv2 = tf.nn.dropout(conv2, keep_prob)\n\n # conv layer\n conv3= conv2d(conv2,wc3,bc3)\n # dropout to reduce overfitting\n conv3 = tf.nn.dropout(conv3, keep_prob)\n\n # conv layer\n conv4 = conv2d(conv3,wc4,bc4)\n # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 64*64 matrix.\n conv4 = max_pool(conv4, k=2)\n\n # dropout to reduce overfitting\n conv4 = tf.nn.dropout(conv4, keep_prob)\n\n # fc 1\n dense1 = tf.reshape(conv4, [-1, wd1.get_shape().as_list()[0]])\n dense1 = tf.nn.tanh(tf.add(tf.matmul(dense1, wd1),bd1))\n dense1 = tf.nn.dropout(dense1, keep_prob)\n\n # fc 2\n dense2 = tf.reshape(dense1, [-1, wd2.get_shape().as_list()[0]])\n dense2 = tf.nn.tanh(tf.add(tf.matmul(dense2, wd2),bd2))\n dense2 = tf.nn.dropout(dense2, keep_prob)\n\n # prediction\n pred = tf.add(tf.matmul(dense2, wout), bout)\n pred = tf.Print(pred, [pred])\n return pred, keep_prob\n\n\ndef cnn_LeNet5(x,ratio = 0.01,n_classes=2):\n def conv2d(img, w, b, k = 1):\n return tf.nn.tanh(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, k, k, 1], padding='SAME'),b))\n def avg_pool(img, k):\n return tf.nn.avg_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n\n wc1 = tf.Variable(tf.random_normal([11, 11, 1, 16])*ratio, name=\"wc1\")\n bc1 = tf.Variable(tf.random_normal([16])*ratio, name=\"bc1\")\n # stride 64 x 64\n # pool 32 x 32\n wc2 = tf.Variable(tf.random_normal([11, 11, 16, 32])*ratio, name=\"wc2\")\n bc2 = tf.Variable(tf.random_normal([32])*ratio, name=\"bc2\")\n # pool 16 x 16\n\n wd1 = tf.Variable(tf.random_normal([8*8*32, 256])*ratio, name=\"wd1\")\n bd1 = tf.Variable(tf.random_normal([256])*ratio, name=\"bd1\")\n wd2 = tf.Variable(tf.random_normal([256, 128])*ratio, name=\"wd2\")\n bd2 = tf.Variable(tf.random_normal([128])*ratio, name=\"bd2\")\n wout = tf.Variable(tf.random_normal([128, n_classes])*ratio, name=\"wout\")\n bout = tf.Variable(tf.random_normal([n_classes])*ratio, name=\"bout\")\n\n # conv layer\n #x = tf.Print(x, [x])\n conv1 = conv2d(x,wc1,bc1, k = 1)\n conv1 = avg_pool(conv1, k=2)\n \n \n # conv layer\n conv2 = conv2d(conv1,wc2,bc2,k=1)\n # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 64*64 matrix.\n conv2 = avg_pool(conv2, k=2)\n # conv2 = avg_pool(conv2, k=2)\n\n # dropout to reduce overfitting\n keep_prob = tf.placeholder(tf.float32)\n conv2 = tf.nn.dropout(conv2, keep_prob)\n\n # fc 1\n dense1 = tf.reshape(conv2, [-1, wd1.get_shape().as_list()[0]])\n dense1 = tf.nn.tanh(tf.add(tf.matmul(dense1, wd1),bd1))\n dense1 = tf.nn.dropout(dense1, keep_prob)\n\n # fc 2\n dense2 = tf.reshape(dense1, [-1, wd2.get_shape().as_list()[0]])\n dense2 = tf.nn.tanh(tf.add(tf.matmul(dense2, wd2),bd2))\n dense2 = tf.nn.dropout(dense2, keep_prob)\n\n # prediction\n pred = tf.add(tf.matmul(dense2, wout), bout)\n pred = tf.Print(pred, [pred])\n return pred, keep_prob\n\n\n"
},
{
"alpha_fraction": 0.6796259880065918,
"alphanum_fraction": 0.6840550899505615,
"avg_line_length": 38.72549057006836,
"blob_id": "a992f74e569efaad7aead187f8d846cf1c2aa5f9",
"content_id": "8f3a7939ed0e164821372c077db22b72227304ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2032,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 51,
"path": "/plots.py",
"repo_name": "long9930/6250-Project",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\nfrom sklearn.metrics import confusion_matrix\n# TODO: You can use other packages if you want, e.g., Numpy, Scikit-learn, etc.\n\n\ndef plot_learning_curves(train_losses, valid_losses, train_accuracies, valid_accuracies, loss_fig=\"Loss.png\", accuracy_fig=\"accuracy.png\"):\n plt.plot(np.arange(len(train_losses)), train_losses, label='Training loss')\n plt.plot(np.arange(len(valid_losses)), valid_losses, label='Validation loss')\n plt.ylabel('Loss')\n plt.xlabel('epoch')\n plt.legend(loc=\"best\")\n plt.savefig(loss_fig)\n plt.show()\n\n\n plt.plot(np.arange(len(train_accuracies)), train_accuracies, label='Train Accuracy')\n plt.plot(np.arange(len(valid_accuracies)), valid_accuracies, label='Validation Accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('epoch')\n plt.legend(loc=\"best\")\n plt.savefig(accuracy_fig)\n plt.show()\n\n\ndef plot_confusion_matrix(results, class_names):\n\t# TODO: Make a confusion matrix plot.\n\t# TODO: You do not have to return the plots.\n\t# TODO: You can save plots as files by codes here or an interactive way according to your preference.\n y_label=[i[0] for i in results]\n y_pred= [i[1] for i in results]\n results=confusion_matrix(y_label,y_pred)\n results=results.astype(\"float\")/results.sum(axis=1)[:,np.newaxis]\n plt.imshow(results,interpolation='nearest',cmap=plt.cm.Blues)\n plt.title('Normalized Confusion Matrix')\n plt.colorbar()\n tick_marks=np.arange(len(class_names))\n plt.xticks(tick_marks,class_names,rotation=45)\n plt.yticks(tick_marks,class_names) \n \n fmt='.2f' \n thresh=results.max()/2.\n for i,j in itertools.product(range(results.shape[0]),range(results.shape[1])):\n plt.text(j, i, format(results[i,j],fmt),\n horizontalalignment=\"center\",\n color=\"white\" if results[i,j]>thresh else \"black\")\n plt.ylabel(\"True\")\n plt.xlabel(\"Predicted\")\n plt.tight_layout()\n plt.savefig(\"confusion_matrix.png\")\n\n \n"
},
{
"alpha_fraction": 0.5872607827186584,
"alphanum_fraction": 0.6141102313995361,
"avg_line_length": 34.80887222290039,
"blob_id": "7250344fe537a6a71ebfdd8217ab4f43d7b2518d",
"content_id": "36293d5f016f336c4751470449a4710b1f2cc740",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10503,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 293,
"path": "/test_model.py",
"repo_name": "long9930/6250-Project",
"src_encoding": "UTF-8",
"text": "\nimport numpy as np \nimport pandas as pd \nimport sklearn as sk\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nimport os, random, time\n\ntest_size = 1000 # number of test data points\ndisease = \"Infiltration\" # name of target disease\nmetadata_filepath = \"./data/Data_Entry_2017.csv\" # metadata file path\ntest_result_filepath = \"test_result.csv\" # file to save test result \nimage_path = \"./images\" # images directory\nsave_dir = \"./models\" # direcotry to save and reload model parameters\nsummaries_dir = \"./logs\" # training and testing logs directory\nGPU = False # whether GPU is avaliable or not\nlocalization = True\n\n\n\n# Hyperparameters\nn_classes = 2 # number of classes, here only 2 for normal and ill cases\nbatch_size = 50 # batch size for batch training\nlearning_rate = 0.001\nalpha = 0.5\nepochs = 1 # repeat training and testing times\ndisplay_step = 10 # number of steps to print traing/testing accuracy result\ndropout = 0.1 # To prevent overfitting\nratio = 0.01 # variable initalization ratio \n\n\n\"\"\"\nSectio below load metadata and generate lables\n\"\"\"\nmetadata = pd.read_csv(metadata_filepath)\nvalid_images = pd.DataFrame(os.listdir(image_path), columns = [\"Image Index\"])\nmetadata = metadata.join(valid_images.set_index('Image Index'), on='Image Index', how = 'inner')\nmetadata = metadata[:test_size]\nfull_path = np.vectorize(lambda image_path, image_name: os.path.join(image_path, image_name))\nmetadata[\"Image Index\"] = full_path(image_path, metadata[\"Image Index\"])\n\nmetadata[\"class\"] = \"NORMAL\"\nmetadata.loc[metadata[\"Finding Labels\"].str.contains(disease), [\"class\"]] = \"ILL\"\nmetadata = metadata[[\"Image Index\",\"class\"]]\ntest_metadata = metadata\n\ntotal_count = metadata.shape[0]\nnormal_count = metadata[metadata[\"class\"] == \"NORMAL\"].count()[0]\nill_count = metadata[metadata[\"class\"] == \"ILL\"].count()[0]\n\nclass_weight = tf.constant([normal_count/total_count, ill_count/total_count])\n\n\n\n\n\n\"\"\"\nSection below define the procedure of slicing total data into batches and then laod image data\n\"\"\"\nwith tf.device('/cpu:0'):\n\n # Reads an image from a file, decodes it into a dense tensor, and resizes it\n # to a fixed shape.\n def _parse_function(filename, label):\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_png(image_string, channels = 1)\n image_decoded = tf.image.resize_images(image_decoded,[256, 256])\n image_decoded = tf.cast(image_decoded, tf.float32)\n image_decoded = tf.image.per_image_standardization(image_decoded)\n #image_decoded.set_shape((256, 256, 1))\n return image_decoded, label\n\n\n test_data = tf.data.Dataset.from_tensor_slices(\n (test_metadata[\"Image Index\"].values,\n pd.concat([pd.get_dummies(test_metadata[\"class\"]).NORMAL,\n pd.get_dummies(test_metadata[\"class\"]).ILL],axis =1).values))\n test_data = test_data.map(_parse_function, num_parallel_calls=4)\n test_data = test_data.batch(batch_size)\n test_data = test_data.prefetch(1)\n \n iterator = tf.data.Iterator.from_structure(test_data.output_types, \n test_data.output_shapes)\n x, y = iterator.get_next()\n\n test_init = iterator.make_initializer(test_data) # Inicializador para test_data\n \n # Visualize input x\n tf.summary.image(\"input\", x, batch_size)\n\n \n\n\"\"\"\nSection below define CNN structure, train procedure and test procedure\n\"\"\"\nwith tf.device('/device:GPU:0' if GPU else '/cpu:0'):\n def conv2d(img, w, b, k = 1):\n return tf.nn.tanh(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, k, k, 1], padding='SAME'),b))\n\n def max_pool(img, k):\n return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n wc1 = tf.Variable(tf.random_normal([11, 11, 1, 32])*ratio, name=\"wc1\")\n bc1 = tf.Variable(tf.random_normal([32])*ratio, name=\"bc1\")\n # stride 64 x 64\n # pool 32 x 32\n wc2 = tf.Variable(tf.random_normal([3, 3, 32, 128])*ratio, name=\"wc2\")\n bc2 = tf.Variable(tf.random_normal([128])*ratio, name=\"bc2\")\n # pool 16 x 16\n wc3 = tf.Variable(tf.random_normal([3, 3, 128, 96])*ratio, name=\"wc3\")\n bc3 = tf.Variable(tf.random_normal([96])*ratio, name=\"bc3\")\n # pool 16 x 16\n wc4 = tf.Variable(tf.random_normal([3, 3, 96, 64])*ratio, name=\"wc4\")\n bc4 = tf.Variable(tf.random_normal([64])*ratio, name=\"bc4\")\n # pool 8x8\n \n wd1 = tf.Variable(tf.random_normal([8*8*64, 512])*ratio, name=\"wd1\")\n bd1 = tf.Variable(tf.random_normal([512])*ratio, name=\"bd1\")\n wd2 = tf.Variable(tf.random_normal([512, 256])*ratio, name=\"wd2\")\n bd2 = tf.Variable(tf.random_normal([256])*ratio, name=\"bd2\")\n wout = tf.Variable(tf.random_normal([256, n_classes])*ratio, name=\"wout\")\n bout = tf.Variable(tf.random_normal([n_classes])*ratio, name=\"bout\")\n \n # conv layer\n #x = tf.Print(x, [x])\n conv1 = conv2d(x,wc1,bc1, k = 4)\n conv1 = max_pool(conv1, k=2)\n # conv layer\n conv2 = conv2d(conv1,wc2,bc2)\n\n # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 64*64 matrix.\n conv2 = max_pool(conv2, k=2)\n # conv2 = avg_pool(conv2, k=2)\n\n # dropout to reduce overfitting\n keep_prob = tf.placeholder(tf.float32)\n conv2 = tf.nn.dropout(conv2, keep_prob)\n\n # conv layer\n conv3= conv2d(conv2,wc3,bc3)\n\n\n # dropout to reduce overfitting\n conv3 = tf.nn.dropout(conv3, keep_prob)\n\n # conv layer\n conv4 = conv2d(conv3,wc4,bc4)\n\n # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 64*64 matrix.\n conv4 = max_pool(conv4, k=2)\n\n # dropout to reduce overfitting\n conv4 = tf.nn.dropout(conv4, keep_prob)\n \n # conv4 shape [batch_size, 8, 8, 64]\n \n\n # fc 1\n dense1 = tf.reshape(conv4, [-1, wd1.get_shape().as_list()[0]])\n dense1 = tf.nn.tanh(tf.add(tf.matmul(dense1, wd1),bd1))\n dense1 = tf.nn.dropout(dense1, keep_prob)\n\n # fc 2\n dense2 = tf.reshape(dense1, [-1, wd2.get_shape().as_list()[0]])\n dense2 = tf.nn.tanh(tf.add(tf.matmul(dense2, wd2),bd2))\n dense2 = tf.nn.dropout(dense2, keep_prob)\n \n\n # prediction\n pred = tf.add(tf.matmul(dense2, wout), bout)\n\n\n\n \n \n\n #weighted_pred = tf.multiply(pred, class_weight)\n\n with tf.name_scope(\"cross_entropy\"):\n # softmax\n softmax = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)\n cost = tf.reduce_mean(softmax)\n tf.summary.scalar(\"cross_entropy\", cost)\n\n\n with tf.name_scope(\"accuracy\"):\n # Accuracy\n predicted = tf.argmax(pred, 1)\n actual = tf.argmax(y, 1)\n correct_pred = tf.equal(predicted, actual)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar(\"accuracy\", accuracy)\n\n\n\n\n\"\"\"\nSection below initialize CNN glopbal variables and configuration\n\"\"\"\nwith tf.device('/cpu:0'):\n # Get all summary\n summ = tf.summary.merge_all()\n init = tf.global_variables_initializer()\n config = tf.ConfigProto(allow_soft_placement = True)\n saver = tf.train.Saver()\n save_name = disease + \"_model\"\n save_path = os.path.join(save_dir, save_name)\n\n\n\n\n \n\"\"\"\nSection below starts iteration of training and testing\n\"\"\"\n# Session start\nwith tf.Session(config=config) as sess:\n \n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n train_writer = tf.summary.FileWriter(summaries_dir + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(summaries_dir + '/test', sess.graph)\n \n # Required to get the filename matching to run.\n try:\n saver.restore(sess, save_path)\n print(\"Model loaded with file path: %s\" % save_path)\n except:\n sess.run(init)\n print(\"Saved model file path: %s doesn't exit, using random initialization\" % save_path)\n\n total_epoch_time = 0\n step = 1\n # Compute epochs.\n for i in range(epochs):\n print(\"epoch: {}\\n\".format(i))\n print(\"\\n\")\n \n # Test \n\n print(\"Test\\n\")\n sess.run(test_init)\n test_predict = []\n avg_acc = 0\n avg_precision = 0\n avg_recall = 0\n avg_f1 = 0\n avg_loss = 0\n test_step=0\n try:\n while True:\n if (localization):\n acc, y_pred, y_true, loss, summary_str = sess.run(\n [accuracy, predicted, actual, cost, summ],\n feed_dict={keep_prob: 1.})\n else :\n acc, y_pred, y_true, loss, summary_str = sess.run(\n [accuracy, predicted, actual, cost, summ],\n feed_dict={keep_prob: 1.})\n precision = sk.metrics.precision_score(y_true, y_pred)\n recall = sk.metrics.recall_score(y_true, y_pred)\n f1 = sk.metrics.f1_score(y_true, y_pred)\n avg_acc += acc\n avg_precision += precision\n avg_recall += recall\n avg_f1 += f1\n avg_loss += loss\n test_step += 1\n test_predict += y_pred.tolist()\n test_writer.add_summary(summary_str, test_step) \n print(\"accuracy: {}\".format(acc))\n print(\"precision: {}\".format(precision))\n print(\"recall: {}\".format(recall))\n print(\"f1_score: {}\".format(f1))\n print(\"loss: {}\".format(loss))\n print(\"\\n\")\n except tf.errors.OutOfRangeError:\n print(\"Average test set accuracy over {} iterations is {:.2f}%\".format(test_step,(avg_acc / test_step) * 100))\n print(\"Average epoch precision is {:.2f}%\".format((avg_precision / test_step) *100))\n print(\"Average epoch recall is {:.2f}%\".format((avg_recall / test_step) *100))\n print(\"Average epoch f1-score is {:.2f}\".format((avg_f1 / test_step)))\n print(\"Average test set loss over {} iterations is {:.2f}\".format(test_step,(avg_loss / test_step)))\n print(\"\\n\")\n \n if(i == epochs - 1):\n # save result result\n test_metadata[\"pred\"] = test_predict\n \n\n\n print(\"Average epoch time: {} seconds\".format(total_epoch_time/epochs))\n train_writer.add_run_metadata(run_metadata,\"mySess\")\n train_writer.close()\n test_writer.close()\n test_metadata.to_csv(disease+'_'+test_result_filepath)\n\n\n\n\n\n\n\n\n\n\n"
}
] | 5 |
sirvict0r/mad-project
|
https://github.com/sirvict0r/mad-project
|
d35d0ad0083be547cd856f08bedc685af18e1c23
|
03fc1f3501165fa5509768ebf3740eff493e75a9
|
85f6a0420dc0feebadfbab39371e2f642ca9ec29
|
refs/heads/master
| 2020-03-16T17:21:19.270950 | 2018-06-14T03:52:24 | 2018-06-14T03:52:24 | 132,828,277 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4615384638309479,
"alphanum_fraction": 0.4615384638309479,
"avg_line_length": 10,
"blob_id": "c11942c3f757910ea7695ab9f1eb03cbe6a8ee1b",
"content_id": "96e4161e4115bd14d4161e5ee3f5115402fa63ac",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26,
"license_type": "permissive",
"max_line_length": 12,
"num_lines": 2,
"path": "/jmad/solos/views.py",
"repo_name": "sirvict0r/mad-project",
"src_encoding": "UTF-8",
"text": "\n\ndef index():\n pass\n\n\n"
},
{
"alpha_fraction": 0.6162943243980408,
"alphanum_fraction": 0.6169514060020447,
"avg_line_length": 30.6875,
"blob_id": "e231159ff5d05af551b3756cbba86334a9bf0f67",
"content_id": "2163a8c626d41a9dab0683607a5f38c1d47662dd",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1522,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 48,
"path": "/jmad/jmad/tests.py",
"repo_name": "sirvict0r/mad-project",
"src_encoding": "UTF-8",
"text": "from django.test import LiveServerTestCase\nfrom selenium import webdriver\n\n\nclass StudentTestCase(LiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Firefox()\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_student_find_solos(self):\n \"\"\"\n Test that a User can search for engine\n :return: This search engine should return solos\n \"\"\"\n\n # Steve is a jazz student who would like to find more\n # examples of solos so he can improve his own\n # improvisation. He visits the home page of JMAD.\n home_page = self.browser.get(self.live_server_url + '/')\n\n # He knows he's in the right place because he can see\n # the name of the site in the heading.\n brand_element = self.browser\\\n .find_element_by_css_selector('.navbar-brand')\n\n self.assertEqual('JMAD', brand_element.text)\n self.fail('Incomplete test')\n # He sees the inputs of the search form, including\n # labels and placeholders.\n\n # He types in the name of his instrument and submits\n # it.\n\n # He sees too many search results....\n\n # ... so he adds an artist to his search query and\n # gets a more manageable list.\n\n # He clicks on a search result.\n\n # the solo page has the title, artist and album for\n # this particular solo.\n\n # He also sees the start time and end time of the\n # solo.\n\n"
},
{
"alpha_fraction": 0.6510416865348816,
"alphanum_fraction": 0.6510416865348816,
"avg_line_length": 26.35714340209961,
"blob_id": "cff63e17f3d45e60a289a44be4bfa3405d9b86fe",
"content_id": "5499da2c19f41af56fb9a9b164221854df68604a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 14,
"path": "/jmad/solos/test_url.py",
"repo_name": "sirvict0r/mad-project",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom django.core.urlresolvers import resolve\n\nfrom .views import index\n\nclass SolosUrlsTestCase(TestCase):\n def test_root_url_uses_index_view(self):\n \"\"\"\n Test that the root of the site resolvers to the\n correct view function\n :return:\n \"\"\"\n root = resolve('/')\n self.assertEqual(root.func, index)\n\n"
}
] | 3 |
fin4le-p/prcn-dam-by-discord
|
https://github.com/fin4le-p/prcn-dam-by-discord
|
14722d0496b1225a62c2698c445e436f4569ce7b
|
26070717417fe7cf022bd358f7db25c77e900fa4
|
62a65e643a030e24d7c77f9a62d1fbd0c098c086
|
refs/heads/master
| 2023-03-03T15:50:26.504252 | 2021-02-09T11:11:12 | 2021-02-09T11:11:12 | 329,274,792 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 11.600000381469727,
"blob_id": "31d1de1dded0375d9dcc8c911f66db2d862b0d49",
"content_id": "461aa474ec774cc8eca438cb73718f5e6f367339",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 5,
"path": "/README.md",
"repo_name": "fin4le-p/prcn-dam-by-discord",
"src_encoding": "UTF-8",
"text": "# プリコネ持ち越し時間計算DiscordBot\r\n/helpで使い方\r\n\r\n/moti ボスの残HP 実際のダメージ\r\nで使える。\r\n"
},
{
"alpha_fraction": 0.5694531798362732,
"alphanum_fraction": 0.5945945978164673,
"avg_line_length": 25.516666412353516,
"blob_id": "6081c01f11bb2d625d10f3417cd7c727e2256a58",
"content_id": "e00984a2978cc1a295ccbe0c55f4728812149b50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1913,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 60,
"path": "/prcn-dam.py",
"repo_name": "fin4le-p/prcn-dam-by-discord",
"src_encoding": "UTF-8",
"text": "import discord\nimport re\nimport math\nimport os\n\nTOKEN = os.environ['DISCORD_BOT_TOKEN']\n\n#反応\nfastHant = \"/\"\nclient = discord.Client()\n\[email protected]\nasync def on_ready():\n\n print(\"The bot has logged in\")\n game = discord.Game(\"https://prcn-dam.fin4le.com\")\n await client.change_presence(activity=game)\n\[email protected]\nasync def on_message(message):\n\n try:\n\n if message.author.bot:\n return\n\n print(message.author.name + '#' + message.author.discriminator + ' : ' + message.content)\n\n if message.content == fastHant + 'help':\n await message.channel.send(\"[/moti ボスの残HP 実際のダメージ]\\nを入力することで、持ち越し時間を計算することができます。\\n例として[/moti 500 900]を入力すると\\nボスの残HP500、与えたダメージ900となり、持ち越し時間60秒が計算結果として吐き出されます。\\n\\n正しく動作しない場合は下記をご利用ください。\\nhttps://prcn-dam.fin4le.com\")\n return\n\n if message.content.startswith(fastHant + 'moti'):\n gtMsg = re.split(\" | \",message.content)\n\n if len(gtMsg) != 3:\n await message.channel.send(\"入力が正しくありません。\")\n return\n\n itMsg1 = int(gtMsg[1])\n itMsg2 = int(gtMsg[2])\n\n if itMsg1 >= itMsg2:\n await message.channel.send(\"持ち越し時間はありません。\")\n return\n\n calcNo1 = itMsg2 - itMsg1\n calcNo2 = math.ceil(calcNo1 / itMsg2 * 90 + 20)\n\n if calcNo2 > 90:\n calcNo2 = 90;\n\n await message.channel.send(\"持ち越し時間は \" + str(calcNo2) + \" 秒です。\")\n return\n\n except ValueError:\n await message.channel.send(\"入力が正しくありません。\")\n return\n\nclient.run(TOKEN)\n"
}
] | 2 |
mayank408/susi_api_wrapper
|
https://github.com/mayank408/susi_api_wrapper
|
6545a47140da5bca6476695d00c0e1b99e507521
|
2c21876e732bceeb9a2b333df45283745394a5fc
|
3371e33635fc640e4e4e8e32504841dfb2872fda
|
refs/heads/master
| 2021-01-19T10:14:31.020138 | 2017-04-07T15:23:31 | 2017-04-07T15:23:31 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.571195662021637,
"alphanum_fraction": 0.572282612323761,
"avg_line_length": 24.20547866821289,
"blob_id": "789c7da1fca2884fb81c6bb25afb9f7348828bb5",
"content_id": "d33f3a2fcd1039e464ea7c661acf7944763ba4e8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1840,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 73,
"path": "/python_wrapper/susi_python/models.py",
"repo_name": "mayank408/susi_api_wrapper",
"src_encoding": "UTF-8",
"text": "class QueryResponse:\n def __init__(self, json, answer, session):\n self.query = json['query']\n self.count = json['count']\n self.client_id = json['client_id']\n self.query_date = json['query_date']\n self.answer_time = json['answer_time']\n self.session = session\n self.answer = answer\n\n\nclass LoginResponse:\n def __init__(self, json, session):\n self.message = json['message']\n self.session = session\n self.valid_seconds = json['valid_seconds']\n self.access_token = json['access_token']\n\n\nclass SignUpResponse:\n def __init__(self, json, session):\n self.session = session\n self.message = json['message']\n\n\nclass ForgotPasswordResponse:\n def __init__(self, json):\n self.message = json['message']\n\n\nclass Answer:\n def __init__(self, data, metadata, actions):\n self.data = data\n self.metadata = metadata\n self.actions = actions\n\n\nclass Datum:\n def __init__(self, json):\n #\n # self.zero = json['0']\n # self.one = json['1']\n # self.intent_original = json['intent_original']\n # self.intent_canonical = json['intent_canonical']\n # self.timezoneOffset = json['timezoneOffset']\n #\n self.answer = json['answer']\n self.query = json['query']\n\n\nclass Metadata:\n def __init__(self, json):\n self.count = json['count']\n\n\nclass Action:\n def __init__(self, json):\n if 'type' in json:\n self.type = json['type']\n if 'expression' in json:\n self.expression = json['expression']\n\n\nclass Session:\n def __init__(self, identity):\n self.identity = identity\n\n\nclass Identity:\n def __init__(self, json):\n self.name = json['name']\n self.type = json['type']\n self.anonymous = json['anonymous']\n"
}
] | 1 |
house-ingetrac/flask-u-sumpn
|
https://github.com/house-ingetrac/flask-u-sumpn
|
bd414370adb6c3ea5fbf0f48659a5b7c5ba0be48
|
6d5c74a1b72bb5ba91ff616de8d324d7c38e753c
|
efaddbc2b4c0d792b4ac26a994777852c7e586e5
|
refs/heads/master
| 2021-07-03T10:42:22.928875 | 2017-09-25T11:16:39 | 2017-09-25T11:16:39 | 104,692,245 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5272727012634277,
"alphanum_fraction": 0.5272727012634277,
"avg_line_length": 5.111111164093018,
"blob_id": "280477057343159a595c01b280c043cde9a79e88",
"content_id": "bce13f39ae504218a09f736a4e2ba10dfaa43831",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 61,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 9,
"path": "/README.md",
"repo_name": "house-ingetrac/flask-u-sumpn",
"src_encoding": "UTF-8",
"text": "# flask-u-sumpn\n\n## Routes:\n\n• /\n\n• /bye\n\n• /inception\n"
},
{
"alpha_fraction": 0.6145985126495361,
"alphanum_fraction": 0.6145985126495361,
"avg_line_length": 31.619047164916992,
"blob_id": "969e9de606a2563b7e29c47f463df4df7a078763",
"content_id": "d5b8338abf45f65bfe410acfe7e8321d4cdb77a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 21,
"path": "/app.py",
"repo_name": "house-ingetrac/flask-u-sumpn",
"src_encoding": "UTF-8",
"text": "from flask import Flask\napp = Flask(__name__) #create instance of class\n\n#assign following fxn to run when root route requested\[email protected](\"/\")\ndef hello_world():\n return \"<i><b><small><small>No</small> hablo</small> queso!</i></b>\"\n\n#/bye\[email protected](\"/bye\")\ndef goodbye():\n return \"<i><b>Thanks for visiting!</i></b>\"\n\n#/inception\[email protected](\"/inception\")\ndef inception():\n return \"<code>@app.route(inception)<BR>def inception():<small><BR> @app.route(inception)<BR def inception():<BR><small> @app.route(inception):<BR> def inception():<BR> <small>...</small></small></small></code>\"\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n"
}
] | 2 |
qharo/Flappy_Birds_NEAT
|
https://github.com/qharo/Flappy_Birds_NEAT
|
d4d967a6bbe897ac489d79f10c8d61dbe8b497ae
|
e76fae2d5bba64658626dee4c29745bad9b7086b
|
8c0e2968de14c33075c00044929e3112adb40dc6
|
refs/heads/master
| 2023-06-22T04:05:23.449061 | 2021-07-21T14:51:50 | 2021-07-21T14:51:50 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5156686305999756,
"alphanum_fraction": 0.5631399154663086,
"avg_line_length": 30.30097007751465,
"blob_id": "b061fc6ed31aa0f3065fe135d668bd795556be2e",
"content_id": "bf09433eba160abd7f5e8dfc98e71ef256398457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3223,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 103,
"path": "/controller.py",
"repo_name": "qharo/Flappy_Birds_NEAT",
"src_encoding": "UTF-8",
"text": "#imports\nimport neat\nfrom neat import genome\nimport pygame\nimport UI\nimport time\nimport ENV\nimport os\nimport math\nimport matplotlib.pyplot as plt\n\nscores = []\ngen = []\ngener = 0\n\ndef main(genomes, config):\n global gener\n global gen\n global scores\n pygame.init()\n screen = pygame.display.set_mode((800,600))\n pygame.display.set_caption(\"Flappy Bird Training!\")\n ui = UI.UI(screen, genomes, config, gener)\n env = ENV.ENV(ui)\n\n\n running = True\n clock = pygame.time.Clock()\n\n while running:\n clock.tick(60)\n\n eventList = pygame.event.get()\n\n pipe_ind = 0\n if len(ui.birds) > 0:\n if len(ui.pipes) > 1 and ui.birds[0].x > ui.pipes[0].x + ui.pipes[0].topImg.get_width():\n pipe_ind = 1\n \n ui.move()\n for x, bird in enumerate(ui.birds):\n bird.move()\n ui.ge[x].fitness += 0.1\n\n topDist = math.sqrt((bird.y - ui.pipes[pipe_ind].height)**2 + (bird.x - ui.pipes[pipe_ind].x)**2)\n botDist = math.sqrt((bird.y - ui.pipes[pipe_ind].bottom)**2 + (bird.x - ui.pipes[pipe_ind].x)**2)\n topDist2 = math.sqrt((bird.y - ui.pipes[pipe_ind].height)**2 + (bird.x - (ui.pipes[pipe_ind].x + ui.pipes[pipe_ind].topImg.get_width()))**2)\n botDist2 = math.sqrt((bird.y - ui.pipes[pipe_ind].bottom)**2 + (bird.x - (ui.pipes[pipe_ind].x + ui.pipes[pipe_ind].topImg.get_width()))**2)\n #print(topDist2)\n #output = ui.nets[x].activate((bird.y, abs(bird.y - ui.pipes[pipe_ind].height), abs(bird.y - ui.pipes[pipe_ind].bottom)))\n \n output = ui.nets[x].activate((bird.y, topDist, botDist))\n\n if output[0] > 0.5:\n bird.jump()\n\n #NEURAL NET\n # node2 = math.tanh(1.0064226615518481*bird.y-0.22198042549516955)\n # node1 = math.tanh(botDist*-1.9515456087270615 + 1.5153790044669881*topDist + 0.5712259901747898*node2 + 0.10553905953398524)\n # value = math.tanh(node1 + node2)\n # if value > 0.5:\n # bird.jump() \n\n\n\n for event in eventList:\n if event.type == pygame.QUIT:\n running = False\n # if event.type == pygame.KEYDOWN:\n # if event.key == pygame.K_SPACE:\n # ui.birds[0].jump()\n # if event.key == pygame.K_b and len(ui.birds) == 2:\n # ui.birds[1].jump()\n \n\n if len(ui.birds) == 0:\n gener += 1\n gen.append(gener)\n scores.append(ui.fscore)\n running = False\n\n #ui.move()\n ui.update()\n pygame.display.update()\n\n#main()\ndef run(conPath):\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, conPath)\n pop = neat.Population(config)\n pop.add_reporter(neat.StdOutReporter(True))\n pop.add_reporter(neat.StatisticsReporter())\n\n winner = pop.run(main, 100)\n print(winner)\n plt.plot(gen, scores)\n plt.show()\n\n\nif __name__ == \"__main__\":\n local_dir = os.path.dirname(__file__)\n conPath = os.path.join(local_dir, \"config-feedforward.txt\")\n run(conPath)\n #main(1, 2)"
}
] | 1 |
replicatedhq/ansible
|
https://github.com/replicatedhq/ansible
|
60c62074b3d67e6c06c909c91b77b98ef15da90b
|
24b868824520cb9392f860b96962a6b504cacfa2
|
a45b51388fd53b72fd13827a7c6012ccd8adebf3
|
refs/heads/master
| 2021-01-20T08:56:53.452016 | 2017-05-11T19:30:29 | 2017-05-11T19:30:29 | 90,205,738 | 4 | 4 | null | 2017-05-04T00:44:01 | 2017-05-11T19:30:31 | 2017-05-11T19:30:30 |
Shell
|
[
{
"alpha_fraction": 0.7030355334281921,
"alphanum_fraction": 0.7133448123931885,
"avg_line_length": 33.91999816894531,
"blob_id": "1ad14d92c46bcc493027c7caa940e24f9e92ee39",
"content_id": "420674f9e1cca7e41e70639f29d8efaa90781efb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3492,
"license_type": "no_license",
"max_line_length": 295,
"num_lines": 100,
"path": "/README.md",
"repo_name": "replicatedhq/ansible",
"src_encoding": "UTF-8",
"text": "Replicated\n==========\n\nThis role helps install Replicated and provides install automation for applications.\n\nRequirements\n------------\n\nThis role requires Ansible 2.3 or higher. The platform platform requirements are found in the metadata file.\n\nRequired playbooks are documented in `requirements.yml` and can be installed via\n\n```\nansible-galaxy install -r /etc/ansible/roles/replicated.ansible/requirements.yml\n```\n\nRole Variables\n--------------\n\nThe following variables can be passed to the role:\n\n```\n #\n # Replicated or Replicated Operator install\n #\n replicated_version: 2.7 # optional replicated version\n replicated_docker_version: 1.12.3 # optional docker version\n replicated_install: replicated # optional what to install, either replicated (the default) or operator\n replicated_private_address: 10.0.7.20 # server private address, if left blank defaults to ansible_default_ipv4.address\n replicated_public_addres: 10.0.7.20 # server public address, if left blank defaults to ansible_default_ipv4.address\n replicated_daemon_token: JSWSOzcOmiaeN # optional daemon_token, if left blank one will be generated\n replicated_daemon_address: 10.0.7.20 # required if performing an operator only install\n\n #\n # Automation support to install both Replicated and your application and\n # settings by providing the replicated configuration file, settings file\n # any any extra files such as keys/certs and a license.\n # \n # To learn more about Replicated automation see\n # https://www.replicated.com/docs/kb/developer-resources/automate-install\n #\n replicated_automation_settings_file: settings.conf # written to /etc/settings.conf\n replicated_automation_conf_file: replicated.conf # written to /etc/replicated.conf\n replicated_automation_extra_files: # extra files needed for automation\n - { src: '/tmp/cert', dest: '/tmp/http-cert' }\n - { src: '/tmp/key', dest: '/tmp/http-key' }\n - { src: '/tmp/license.rli', dest: '/tmp/license.rli' }\n```\n \nExamples\n--------\n\n1) Install the latest Replicated.\n\n```\n- name: Install latest Replicated\n hosts: replicated\n roles:\n - replicated\n```\n\n2) Install a Replicated Operator node pointing back to a previously setup Replicated host\n\n```\n- name: Install Replicated\n hosts: replicated-operator\n vars:\n replicated_install: operator\n replicated_daemon_address: 10.0.7.3\n replicated_daemon_token: JSWSOzcOmiaeNdt3Bb1Xm7B7Kakj\n roles:\n - replicated\n```\n\n3) Install Replicated with an existing application\n\nAutomation support files are put into place prior to Replicated installing and on the install used to configure and start the application. To use the Replicated automation start by copying the sample replicated.conf and settings.conf from the templates folder and customize for your application.\n\nSettings can be gathered from a running instance via the [Replicated CLI](https://www.replicated.com/docs/reference/replicated-cli) by running `replicated app <appid> settings`.\n\n```\n- name: Install Replicated\n hosts: replicated\n vars:\n replicated_daemon_token: JSWSOzcOmiaeNdt3Bb1Xm7B7Kakj\n replicated_automation_settings_file: /tmp/settings.conf\n replicated_automation_conf_file: /tmp/replicated.conf\n replicated_automation_extra_files:\n - { src: '/tmp/key', dest: '/tmp/key' }\n - { src: '/tmp/cert', dest: '/tmp/cert' }\n - { src: '/tmp/license.rli', dest: '/tmp/license.rli' }\n roles:\n - replicated\n```\n\n\nDependencies\n------------\n\nNone\n"
},
{
"alpha_fraction": 0.6763848662376404,
"alphanum_fraction": 0.7069970965385437,
"avg_line_length": 23.428571701049805,
"blob_id": "954aa69b7a446af48c336e093e2b3f868b20122d",
"content_id": "e6064de7b1a048bff4f7c861b90f6b3b84f73cf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 28,
"path": "/templates/semver_check",
"repo_name": "replicatedhq/ansible",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#\n# Usage: ./semver-check.py version1 version2\n#\n# Returns\n# 0 if the versions match\n# 1 if version1 > version2\n# -1 if version1 < version2\n#\n\nimport re\nimport semver\nimport sys\n\n#\n# Normalizes Docker semver-like version numbers by taking the first three dot-seperated\n# strings and making each an integer, thus 17.03.1-CE will become 17.3.1\n#\ndef normalize( version ):\n parts = re.split('[.-]', version)\n normalizedParts = []\n for part in parts:\n if len(normalizedParts) < 3:\n normalizedParts.append(str(int(part)))\n normalized = '.'.join(normalizedParts)\n return normalized\n\nprint semver.compare(normalize(sys.argv[1]), normalize(sys.argv[2]))\n\n\n"
}
] | 2 |
Aston-95/MiniProjet-Python-Arduino
|
https://github.com/Aston-95/MiniProjet-Python-Arduino
|
e7af1bf978bc11e3bd14566d4332335784f99e68
|
9aeaecd480198a3bb86f974f093765efce6372d7
|
08703c53743cd3746416d7e62cf0fe2df4a6e155
|
refs/heads/master
| 2021-06-26T18:53:19.174937 | 2017-09-13T21:08:57 | 2017-09-13T21:08:57 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6448000073432922,
"alphanum_fraction": 0.6528000235557556,
"avg_line_length": 26.409090042114258,
"blob_id": "f708e66905ff71892e454a2f948fbf16f8cc0557",
"content_id": "9172979083c2d2f2caaaa59f4c32b98469fff7ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 629,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 22,
"path": "/main.py",
"repo_name": "Aston-95/MiniProjet-Python-Arduino",
"src_encoding": "UTF-8",
"text": "# Interface (Simulateur)\r\nfrom interfaces.simulateur import Simulateur;\r\n\r\nfrom ledtest import ledTest; # Tests sur les LEDs\r\nimport serial; # pySerial\r\n\r\ndebug = True; # Activer / Désactiver le mode débug (ledTest)\r\n\r\n# arduino = serial.Serial('COM3');\r\n# led = ledTest(arduino, debug, \"sandbox\");\r\n\r\ndebug = True; # Activer / Désactiver le mode débug (Interface)\r\ntimeout = 4000; # Actualisation de la tension actuelle\r\n\r\ndef loopVolt():\r\n root = volt.getRoot();\r\n root.after(timeout, loopVolt);\r\n\r\n if (debug == True): print(volt.getVolt(), \"V\");\r\n\r\nvolt = Simulateur(debug);\r\nloopVolt();\r\n"
}
] | 1 |
Jessecomo/PatchyBot
|
https://github.com/Jessecomo/PatchyBot
|
a065b7ea28c06b60d0b54aba0f47cecc4b5bd94e
|
747dd559a03af60921d30048cf2f5dbda0c63183
|
756fc4f07369bd82e6bbe18c5dadbb47d12c4579
|
refs/heads/main
| 2023-03-15T05:14:21.554353 | 2021-03-17T14:54:36 | 2021-03-17T14:54:36 | 348,748,248 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.59609454870224,
"alphanum_fraction": 0.6051387190818787,
"avg_line_length": 36.61111068725586,
"blob_id": "cfde9e050e84dd733730bdc33e7a5256c51c010d",
"content_id": "8e697d65f90a406b8a1195afda0f024b4aeef359",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4865,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 126,
"path": "/patchybot.py",
"repo_name": "Jessecomo/PatchyBot",
"src_encoding": "UTF-8",
"text": "import os\r\nimport requests\r\nimport random\r\nimport discord\r\nfrom dotenv import load_dotenv\r\nfrom discord.ext import commands\r\nfrom urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\nfrom datetime import datetime\r\nimport re\r\n\r\n#Scrape the riot website for the latest patch notes\r\nclass League:\r\n def __init__(self):\r\n self.patch = {\"title\": None, \"url\": None, \"desc\": None, \"img\": None}\r\n\r\n def get_patch_info(self):\r\n base_url = 'https://na.leagueoflegends.com'\r\n fetch_url = 'https://na.leagueoflegends.com/en-us/news/tags/patch-notes'\r\n html_page = urlopen(fetch_url)\r\n html_text = html_page.read().decode(\"utf-8\")\r\n\r\n soup = BeautifulSoup(html_text, \"html.parser\")\r\n\r\n #Find the first link ,ie. the latest, patch notes\r\n for link in soup.find_all(\"a\", limit=1):\r\n self.patch[\"url\"] = base_url + link[\"href\"]\r\n\r\n #Open patch note link\r\n get_url = requests.get(self.patch[\"url\"])\r\n get_text = get_url.text\r\n soup = BeautifulSoup(get_text, \"html.parser\")\r\n\r\n #Get patch info\r\n self.patch[\"title\"] = soup.find('h1').text\r\n self.patch[\"img\"] = soup.find('a',class_ ='skins cboxElement').img['src']\r\n description = soup.find(\"blockquote\",{\"class\":\"blockquote context\"}).text\r\n self.patch[\"desc\"] = description[:500] + (description[500:] and '...')\r\n\r\n def get_tft_patch_info(self):\r\n base_url = 'https://na.leagueoflegends.com'\r\n fetch_url = 'https://na.leagueoflegends.com/en-us/news/game-updates/'\r\n html_page = urlopen(fetch_url)\r\n html_text = html_page.read().decode(\"utf-8\")\r\n soup = BeautifulSoup(html_text, \"html.parser\")\r\n\r\n for link in soup.select('a[href*=\"teamfight-tactics-patch\"]', limit=1):\r\n self.patch[\"url\"] = base_url + link[\"href\"]\r\n\r\n #Open patch note link\r\n get_url = requests.get(self.patch[\"url\"])\r\n get_text = get_url.text\r\n soup = BeautifulSoup(get_text, \"html.parser\")\r\n\r\n #Get patch info\r\n self.patch[\"title\"] = soup.find('h1').text\r\n self.patch[\"img\"] = soup.find('a',class_ ='skins cboxElement').img['src']\r\n description = soup.find(\"blockquote\",{\"class\":\"blockquote context\"}).text\r\n self.patch[\"desc\"] = description[:500] + (description[500:] and '...')\r\n\r\n #Get updated patch notes if current patch title doesn't match their new one\r\n\r\n\r\n\r\ndef get_patch_message(gameMode):\r\n embed = discord.Embed()\r\n LoL = League()\r\n if gameMode == 1:\r\n LoL.get_patch_info()\r\n elif gameMode == 2:\r\n LoL.get_tft_patch_info()\r\n if LoL.patch[\"title\"] is None or LoL.patch[\"url\"] is None:\r\n embed.title = \"Error occurred when retrieving patch notes\"\r\n return embed\r\n embed.title = LoL.patch[\"title\"]\r\n embed.url = LoL.patch[\"url\"]\r\n if LoL.patch[\"desc\"] is not None:\r\n embed.description = description=LoL.patch[\"desc\"]\r\n embed.set_thumbnail(url= 'https://i.imgur.com/PL8pjLM.png')\r\n embed.set_image(url=LoL.patch[\"img\"])\r\n embed.set_footer(text=\"Patchy Notes v1.0\", \\\r\n icon_url = \"https://i.imgur.com/cxPxk9s.png\")\r\n embed.color = 0x0a6fdb\r\n return embed\r\n\r\n#Prefix for all the commands.\r\nbot = commands.Bot(command_prefix='!')\r\n\r\[email protected]\r\nasync def on_ready():\r\n print(f'{bot.user.name} has connected to Discord!')\r\n\r\[email protected]()\r\nasync def patch(ctx, mode = None):\r\n if not mode:\r\n #Send LoL patch notes\r\n await ctx.send(embed = get_patch_message(1))\r\n elif mode == \"tft\":\r\n #Send TFT patch notes\r\n await ctx.send(embed = get_patch_message(2))\r\n elif mode == \"help\":\r\n embed = discord.Embed()\r\n embed.title = \"Patchy Bot\"\r\n embed.description = \"Allows you to easily get the latest\\\r\n League of Legends and Teamfight Tactics patch notes right\\\r\n in your discord server!\"\r\n embed.color = 0x0a6fdb\r\n embed.add_field(name = \"Commands\", value = \"!patch - League of Legends \\n \\\r\n !patch tft - Teamfight Tactics\", inline = False)\r\n embed.set_thumbnail(url = 'https://i.imgur.com/cxPxk9s.png')\r\n embed.set_footer(text = \"Patchy Notes v1.0 by Jesse Como\", \\\r\n icon_url = \"https://i.imgur.com/cxPxk9s.png\")\r\n await ctx.send(embed = embed)\r\n else:\r\n embed = discord.Embed()\r\n embed.title = \"Sorry I didn't understand that...\"\r\n embed.add_field(name = \"Commands\", value = \"!patch - League of Legends \\n \\\r\n !patch tft - Teamfight Tactics\", inline = False)\r\n embed.set_thumbnail(url = 'https://i.imgur.com/cxPxk9s.png')\r\n embed.set_footer(text = \"Patchy Notes v1.0 by Jesse Como\", \\\r\n icon_url = \"https://i.imgur.com/cxPxk9s.png\")\r\n embed.color = 0x0a6fdb\r\n await ctx.send(embed = embed)\r\n\r\nload_dotenv()\r\nbot.run(os.getenv('TOKEN'))\r\n"
}
] | 1 |
karthikshekhar/dropseq_scripts
|
https://github.com/karthikshekhar/dropseq_scripts
|
27d5ecb3acc2b772d7fea515e33ec2c657ab2b30
|
8648aa047ed607fa58a73b0e508c201e7410a19c
|
8b108a5df5bf352c1cec43bcae579ea4a7bdf357
|
refs/heads/master
| 2021-01-17T13:00:59.481831 | 2016-10-31T20:26:05 | 2016-10-31T20:26:05 | 59,797,959 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6362869143486023,
"alphanum_fraction": 0.6405063271522522,
"avg_line_length": 27.39759063720703,
"blob_id": "e5773d07e7bd6391e17a589de23f5475135756f9",
"content_id": "991a53b8169b9d5ca2c308cb1cc6aa9f56b4191a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2370,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 83,
"path": "/build_rnaseq_ref/install_gtf_annot.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Author : Karthik Shekhar, 06/14/2016\n# Installing transcriptomic index/reference for RNA-seq data\n# usage : ./install_gtf_annot.sh\n\n# required arguments\ntophatPath=/seq/regev_genome_portal/SOFTWARE/tophat2/current\nrsemPath=/seq/regev_genome_portal/SOFTWARE/BIN\ngenome_fasta=/path/to/genome.fasta\ngtf=/path/to/gtf\nannot_name=name\nrRNA_fasta=/path/to/rRNA.fasta\nno_replace=F # do not replace already installed files (T/F)\n\n# optional arguments (comment out if not providing)\n#iso_map=/path/to/genetoisoform_map\nigv_genome=/path/to/igv.genome\n#just=tophat|rsem\n# \n\n# check arguments\n# TO DO\n\n# directories\ngenome_fasta=`readlink -f ${genome_fasta}`\nrRNA_fasta=`readlink -f ${rRNA_fasta}`\ngenome_dir=`dirname ${genome_fasta}`\nannot_install_dir=${genome_dir}/Annotations/${annot_name}\nconfig_dir=${genome_dir}/Config\nconfig_file=${config_dir}/${annot_name}.config\n\nmkdir -p ${annot_install_dir}\nmkdir -p ${config_dir}\n\ndest_annot_file=${annot_install_dir}/`basename ${gtf}`\nif [ $no_replace == \"F\" ]\nthen\n cp ${gtf} ${dest_annot_file}\nfi\n\nif [ $no_replace == \"T\" ] && [ -s $config_file ]\nthen\nelse\n echo \"GENOME_FA=${genome_fasta}\" > ${config_file}\n echo \"ANNOT_NAME=${annot_name}\" >> ${config_file}\n echo \"ANNOT_GTF=${dest_annot_file}\" >> ${config_file}\n echo \"RRNA_FA=${rRNA_fasta}\" >> ${config_file}\n \n if [ -s ${igv_genome} ]\n then\n igv_genome=`readlink -f ${igv_genome}`\n echo \"IGV_GENOME=${igv_genome}\" >> ${config_file} \n fi\n \n echo \"TOPHAT_TRANS=${annot_install_dir}/tophat_trans_index\" >> ${config_file}\n echo \"RSEM_TRANS=${annot_install_dir}/rsem_trans_index\" >> ${config_file}\nfi\n\n####################\n## Prep for Tophat\n####################\n\nmkdir -p ./tmp\ncp ~/repo/dropseq_scripts/build_rnaseq_ref/*fq ./tmp/\n\nif [ -n \"$just\" ] && [ $just == \"rsem\" ]\nthen\n echo \"Not running tophat\"\nelse\n ${tophatPath}/tophat2 -G ${dest_annot_file} -T --transcriptome-index ${annot_install_dir}/tophat_trans_index -o ./tmp/$$.tophat.out ${genome_fasta} ./tmp/tmp.left.fq ./tmp/tmp.right.fq\nfi\n\n####################\n## Prep for RSEM\n####################\n\nif [ -n \"just \"] && [ $just == \"tophat\" ]\nthen \n echo \"Not running rsem\"\nelse\n ${rsemPath}/rsem-prepare-reference --gtf ${dest_annot_file} --transcript-to-gene-map ${gene_iso_map_file} ${genome_fasta} ${annot_install_dir}/rsem_trans_index\nfi \n\n \n\n\n\n"
},
{
"alpha_fraction": 0.5564853549003601,
"alphanum_fraction": 0.569037675857544,
"avg_line_length": 22.266666412353516,
"blob_id": "026c98e0fc2011fbb82906e353653eb8498de075",
"content_id": "8423df2002192909c87d9b9a69d3582eae04e4c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 717,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 30,
"path": "/10X/run_10X.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#$ -cwd\n#$ -q long\n#$ -P regevlab\n#$ -l h_vmem=50g\n#$ -e qsub_logs/error.err\n#$ -o qsub_logs/out.log\n\n\nsource /broad/software/scripts/useuse\nreuse UGER\n\n# Variables defined by wrapper script\ncellranger_path=path_to_cellranger\nid=my_id\n# ./<FLOWCELLID>/outs/fastq_path\nfastq_path=path_to_fastq \n#comma seperated sample barcodes\nbcs=barcode\ntranscriptome_path=path_to_trans\n\n${cellranger_path}/cellranger run --id=${id} \\\n\t \t\t\t --transcriptome=${transcriptome_path} \\\n\t --fastqs=${fastq_path} \\\n\t --jobmode=sge \\\n\t \t\t\t --indices=${bcs} \\\n\t \t\t\t --maxjobs=8 \\\n\t \t\t\t --mempercore=16 \\\n\t \t\t\t #--uiport=3600\n\t\t\t\t \t\n\t\t\t\n\t\n"
},
{
"alpha_fraction": 0.6843658089637756,
"alphanum_fraction": 0.7138643264770508,
"avg_line_length": 20.1875,
"blob_id": "2c2bbd650734a60324af9219136fc929640960a6",
"content_id": "db7b51ab7a8e56833c10ddb9eda2b4b5e723954e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 16,
"path": "/10X/demux_10X.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n#$ -cwd\n#$ -q long\n#$ -P regevlab\n#$ -l h_vmem=16g\n#$ -e qsub_logs/demult.err\n#$ -o qsub_logs/demult.log\n\nsource /broad/software/scripts/useuse\nreuse -q .bcl2fastq2-2.17.1.14\nreuse UGER\ncellranger_path=path_to_cellranger\nbclPath=path_to_bcl\n\n# outputs to current directory\n${cellranger_path}/cellranger demux --run=${bclPath}\n"
},
{
"alpha_fraction": 0.6603773832321167,
"alphanum_fraction": 0.6918238997459412,
"avg_line_length": 21.619047164916992,
"blob_id": "3c8b15e8f4f9ef70ab51f4ae99004ff46c182698",
"content_id": "3bab16f7915142e48b4d84ada96330676e062ec2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 21,
"path": "/drop-seq/demultiplex.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n\n#$ -cwd\n#$ -q long\n#$ -P regevlab\n#$ -N test_rnaseq\n#$ -l m_mem_free=20g\n#$ -e /broad/hptmp/karthik/error_demult.err\n#$ -o /broad/hptmp/karthik/out_demult.log\n\nset -x\n\nsource /broad/software/scripts/useuse\ndatadir=${nextseq_loc}\noutdir=${fastq_loc}\nmkdir -p ${outdir}\n\necho ${datadir}\n\nuse .bcl2fastq2-2.17.1.14\nnohup bcl2fastq --runfolder-dir ${datadir} --output-dir ${outdir} --mask-short-adapter-reads 10 --minimum-trimmed-read-length 10 --no-lane-splitting\n\n\n"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 28,
"blob_id": "1eb5f44848e93d1fd65540c509bad71dd7714a11",
"content_id": "3c07627cc7348690b6238c623e776b7d4bf0690b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 88,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 3,
"path": "/build_rnaseq_ref/bwa_ncrna.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nbwa index /seq/regev_genome_portal/RESOURCES/Zebrafish/Zv10/Zv10.ncrna.fa \n"
},
{
"alpha_fraction": 0.6439476609230042,
"alphanum_fraction": 0.6690294146537781,
"avg_line_length": 42.595237731933594,
"blob_id": "3c8588c2af696dd27f4b681a24787b30045310c4",
"content_id": "1421de4ac005a1b1633a8c226f9c8f01990c7fa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1834,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 42,
"path": "/fitCountData.R",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "meanCVfit = function(count.data, reads.use=FALSE, do.text=FALSE, diffCV.cutoff=0.5, do.spike=FALSE, main.use=NULL){\n\n# Empirical mean, var and CV\nmean_emp = apply(count.data, 1, mean)\nvar_emp = apply(count.data, 1, var)\ncv_emp = sqrt(var_emp) / mean_emp\n\n# NB sampling\na=colSums(count.data)\nsize_factor = a/ mean(a)\nfit=fitdistr(size_factor, \"Gamma\")\nif (do.spike) spike.genes=grep(\"^ERCC\", rownames(count.data), value=TRUE)\nprint(fit)\nif (!reads.use){\n hist(size_factor, 50, probability=TRUE, xlab=\"N_UMI/<N_UMI>\", main = main.use)\n} else {\n hist(size_factor, 50, probability=TRUE, xlab=\"N_Reads/<N_Reads>\", main = main.use)\n}\ncurve(dgamma(x, shape=fit$estimate[1], rate=fit$estimate[2]),from=0, to=quantile(size_factor, 0.999), add=TRUE, col=\"red\",\n main=\"Gamma dist fit for size factor\")\ntext(5,0.6, paste(\"shape = \", round(fit$estimate[1],2)))\ntext(5,0.5, paste(\"rate = \", round(fit$estimate[2],2)))\n\n# Gamma distributions of individual genes are just scaled versions. If X ~ Gamma(a,b)\n# then cX ~ Gamma(a, b/c)\na_i = rep(fit$estimate[1], length(mean_emp)); names(a_i) = names(mean_emp)\nb_i = fit$estimate[2] / mean_emp; names(b_i) = names(mean_emp)\nmean_NB = a_i / b_i; var_NB = a_i*(1+b_i) / (b_i^2)\ncv_NB = sqrt(var_NB)/mean_NB\n\ndiffCV = log(cv_emp) - log(cv_NB)\npass.cutoff=names(diffCV)[which(diffCV > diffCV.cutoff & (mean_emp > 0.005 & mean_emp < 100))]\n \nplot(mean_emp,cv_emp,pch=pch.use,cex=0.5,col=\"black\",xlab=\"Mean Counts\",ylab=\"CV (counts)\", log=\"xy\", main = main.use)\nif (do.spike) points(mean_emp[spike.genes],cv_emp[spike.genes],pch=16,cex=0.5,col=\"red\")\ncurve(sqrt(1/x), add=TRUE, col=\"red\", log=\"xy\", lty=2, lwd=2)\nor = order(mean_NB)\nlines(mean_NB[or], cv_NB[or], col=\"magenta\", lwd=2)\nif(do.text) text(mean_emp[pass.cutoff],cv_emp[pass.cutoff],pass.cutoff,cex=cex.text.use)\n\nreturn(pass.cutoff)\n}\n\n\n\n"
},
{
"alpha_fraction": 0.634892463684082,
"alphanum_fraction": 0.6395477652549744,
"avg_line_length": 35.67479705810547,
"blob_id": "2593918fa85260a64b6763dabfedc87638985c93",
"content_id": "9bc3d7a8c8a73810f2098cf45c908b5b8c7e3fc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4511,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 123,
"path": "/build_rnaseq_ref/process_gtf.py",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n# created by : Asma Bankapur\n# modified by : Karthik Shekhar\n\n# The following program will read in a gtf file and create a txt file with gene and isoform names as two columns\n\n##### IMPORT MODULES #####\nimport os\nimport argparse\nimport csv \n\n# GLOBALS\nDELIMITER = \"\\t\"\nGTF_COMMENT = \"#!\"\nSPLIT_TOKENS = \"; \"\nSPLIT_COUPLE = ' \"'\nSPLIT_COUPLE_JOIN = ' '\nTRANS_ID = \"transcript_id\"\nDEF_TRANS_ID = \"\"\nSTRIP_QUOTES = '\"'\nTRANS_NAME = \"transcript_name\"\nDEF_TRANS_NAME = \"\"\nJOIN_NAID = \"_\"\nUNKNOWN = '\"unknown\"'\nTOKEN_SEP=\";\"\n\ndef correct_gtf( gtf_file ):\n corrected_file = [ ];\n with open(gtf_file, \"r\") as gtf:\n gtf_reader = csv.reader(gtf, delimiter=DELIMITER)\n for feature_row in gtf_reader:\n\t # Add comment feature as is and continue\n\t if feature_row[0].startswith( GTF_COMMENT ):\n\t corrected_file.append(DELIMITER.join(feature_row))\n\t\tcontinue\n\t ###\n\t #geneid: dict for last element in feature_row\n\t #gene_id_order_list : list to maintain last element token order\n\t ###\n\t geneid = {}\n\t gene_id_order_list = [ ]\n\n\t gene_cood_chr_info = feature_row[0:-1]\n\t token_list_coupled = feature_row[-1].split(SPLIT_TOKENS)\n \n\t # Loop to split each token pair and append to order list\n\t # Also store the token pair in geneid as key val\n for token_couple in token_list_coupled:\n\t token, token_value = token_couple.split(SPLIT_COUPLE)\n token_value=STRIP_QUOTES+token_value\n\t\tgene_id_order_list.append(token)\n\t\tgeneid[token] = token_value\n\t \n\t # Get 'gene_name', if not empty str and strip \"\n\n\t trans_id = geneid.get(TRANS_ID, DEF_TRANS_ID).strip(STRIP_QUOTES)\n\t new_transname = geneid.get(TRANS_NAME, DEF_TRANS_NAME).strip(STRIP_QUOTES)\n\t trans_id = JOIN_NAID.join([id_token for id_token in [new_transname, trans_id] if id_token])\n trans_id = UNKNOWN if not trans_id else STRIP_QUOTES+trans_id+STRIP_QUOTES\n\t geneid[TRANS_ID] = trans_id\n \n\t # ADD ID TOKEN\n\t for id_token in [TRANS_ID]:\n\t if id_token not in gene_id_order_list:\n gene_id_order_list.append(id_token)\n gene_id_str = SPLIT_TOKENS.join([token+SPLIT_COUPLE_JOIN+(geneid[token]).strip(TOKEN_SEP) for token in gene_id_order_list])\n\t gene_id_str += TOKEN_SEP\n\t corrected_file.append(DELIMITER.join(feature_row[0:-1] + [gene_id_str]))\n return(corrected_file) \t \n\ndef gene_to_isomap( gtf_file, col1=None, col2=None ):\n # Makes a gene to iso map table\n if col1 is None:\n col1 = \"gene_name\"\n if col2 is None:\n col2 = \"transcript_id\"\n \n gene_iso = [ ]\n with open(gtf_file, \"r\") as gtf:\n gtf_reader = csv.reader(gtf, delimiter=DELIMITER)\n\n\tfor feature_row in gtf_reader:\n\t if feature_row[0].startswith(GTF_COMMENT):\n\t continue\n\t \n\t token_list_coupled = feature_row[-1].split(SPLIT_TOKENS)\n\n geneid = {}\n\t gene_id_order_list = []\n\t for token_couple in token_list_coupled:\n\t token, token_value = token_couple.split(SPLIT_COUPLE)\n\t\ttoken_value=STRIP_QUOTES+token_value.strip(';')\n\t\tgene_id_order_list.append(token)\n\t\tgeneid[token] = token_value\n\t \n if (not geneid.has_key(col1)) or (not geneid.has_key(col2)):\n\t print \"Error gtf file does not have these keys\"\n\t\treturn -1\n\t \n\t temp_str = DELIMITER.join([geneid[col1].strip('\"'), geneid[col2].strip('\"').strip(';')])\n gene_iso.append(temp_str)\n temp=set(gene_iso)\n gene_iso=list(temp)\n return gene_iso\t \n\n\ndef write_to_file( reconstructed_feature_list, modified_gtf_file):\n with open(modified_gtf_file, \"w\") as mgh:\n mgh.write(\"\\n\".join(reconstructed_feature_list))\n\nif __name__ == \"__main__\":\n arg_raw = argparse.ArgumentParser(prog=\"create_gene_iso_map.py\", description=\"Create a txt file with gene and isoform names for RSEM\")\n arg_raw.add_argument(\"--gtf_input\", help=\"Input GTF file (Required).\")\n arg_raw.add_argument(\"--gtf_output\", help=\"Output GTF file (Required).\")\n arg_raw.add_argument(\"--gene_iso_map_file\", help=\"txt file containing gene to iso map columns (Required). \")\n arg_parsed = arg_raw.parse_args()\n #gtf_corrected = correct_gtf( arg_parsed.gtf_input )\n #write_to_file(gtf_corrected, arg_parsed.gtf_output)\n genes_iso_table = gene_to_isomap(arg_parsed.gtf_output, col1=\"gene_name\", col2=\"transcript_id\")\n #print type(genes_iso_table)\n print(len(genes_iso_table))\n write_to_file(genes_iso_table, arg_parsed.gene_iso_map_file)\n"
},
{
"alpha_fraction": 0.7433155179023743,
"alphanum_fraction": 0.7754010558128357,
"avg_line_length": 45.75,
"blob_id": "a7f63e399ce3f113c85a5e26dde351a8494005af",
"content_id": "1d65c8fb8e8f1de3fe21dd15685890c8f302c6a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 4,
"path": "/build_rnaseq_ref/build_RNAseq_ref.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nwd=/seq/regev_genome_portal/RESOURCES/Zebrafish/Zv10\n/seq/regev_genome_portal/SOFTWARE/RNASEQ_genome_pipeline/install_genome_for_pipeline.pl ${wd}/Zv10.fa ${wd}/Zv10.rRNA.fa\n"
},
{
"alpha_fraction": 0.7258883118629456,
"alphanum_fraction": 0.7512690424919128,
"avg_line_length": 55.28571319580078,
"blob_id": "b6dd23b927f51c0b592b7d897d498a998e171f7b",
"content_id": "049b6d1028c5cda09121cd79d4416466cac45efc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 394,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 7,
"path": "/build_rnaseq_ref/tophat_prepare.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ntophatPath=/seq/regev_genome_portal/SOFTWARE/tophat2/current\ndest_annot_file=./Annotations/Zv10/Zv10.minus_nc.corrected.gtf\nannot_install_dir=`readlink -f ./Annotations/Zv10`\ngenome_fasta=Zv10.fa\n${tophatPath}/tophat2 -G ${dest_annot_file} -T --transcriptome-index ${annot_install_dir}/tophat_trans_index -o ./tmp/$$.tophat.out ${genome_fasta} ./tmp/tmp.left.fq ./tmp/tmp.right.fq\n"
},
{
"alpha_fraction": 0.6421621441841125,
"alphanum_fraction": 0.7329729795455933,
"avg_line_length": 56.8125,
"blob_id": "851b1c26e2b79017356b982031454ded32b8d858",
"content_id": "17cf7bafa1d5ac31dd82639660253b2778e10af7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 16,
"path": "/build_rnaseq_ref/prepare_gtf.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# remove ncRNA\ngrep -v \"rRNA\" Danio_rerio.GRCz10.84.gtf > Danio_rerio.GRCz10.84.minus_nc0.gtf\ngrep -v \"snRNA\" Danio_rerio.GRCz10.84.minus_nc0.gtf > Danio_rerio.GRCz10.84.minus_nc.gtf\ngrep -v \"snoRNA\" Danio_rerio.GRCz10.84.minus_nc.gtf > Danio_rerio.GRCz10.84.minus_nc0.gtf\ngrep -v \"scaRNA\" Danio_rerio.GRCz10.84.minus_nc0.gtf > Danio_rerio.GRCz10.84.minus_nc.gtf\ngrep -v \"pseudogene\" Danio_rerio.GRCz10.84.minus_nc.gtf > Danio_rerio.GRCz10.84.minus_nc0.gtf\ngrep -v \"miRNA\" Danio_rerio.GRCz10.84.minus_nc0.gtf > Danio_rerio.GRCz10.84.minus_nc.gtf\ngrep -v \"antisense\" Danio_rerio.GRCz10.84.minus_nc.gtf > Danio_rerio.GRCz10.84.minus_nc0.gtf\n\nmv Danio_rerio.GRCz10.84.minus_nc0.gtf Danio_rerio.GRCz10.84.minus_nc.gtf\nrm Danio_rerio.GRCz10.84.minus_nc0.gtf\n\n# ribosomal RNA\ncat Danio_rerio.GRCz10.ncrna.fa | awk '/^>/ && /rRNA/{flag=1; print; next;} /^>/{flag=0} //{if(flag==1){print}}' > Danio_rerio.GRCz10.rRNA.fa\n"
},
{
"alpha_fraction": 0.735897421836853,
"alphanum_fraction": 0.7615384459495544,
"avg_line_length": 47.75,
"blob_id": "e40b0c9dbf819285003c05aa16df829aefdff385",
"content_id": "a8d5218d3e4b522b538c55bf058074d099088981",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 8,
"path": "/build_rnaseq_ref/rsem_prepare.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nrsemPath=/seq/regev_genome_portal/SOFTWARE/BIN\ngene_iso_map_file=Zv10.gene_to_iso\ndest_annot_file=./Annotations/Zv10/Zv10.minus_nc.corrected.gtf\nannot_install_dir=`readlink -f ./Annotations/Zv10`\ngenome_fasta=Zv10.fa\n${rsemPath}/rsem-prepare-reference --gtf ${dest_annot_file} --transcript-to-gene-map ${gene_iso_map_file} ${genome_fasta} ${annot_install_dir}/rsem_trans_index\n"
},
{
"alpha_fraction": 0.7701863646507263,
"alphanum_fraction": 0.7784678936004639,
"avg_line_length": 55.82352828979492,
"blob_id": "0dc0b1a054b15ef63c7a85d942b9e86acb1014e1",
"content_id": "6afe9626e1ed8e055d407ce6a0c8e58fe07b3ff7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 966,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 17,
"path": "/QC_report.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nwd=`readlink -f .`\nconfig=/seq/regev_genome_portal/RESOURCES/Zebrafish/Zv10/Config/Zv10.config\nannot_name=Zv10\nexpmat_name=HabSCZv10\n\n/seq/regev_genome_portal/SOFTWARE/RNASEQ_genome_pipeline/aggregate_links_to_sample_outputs.pl $wd\n\n/seq/regev_genome_portal/SOFTWARE/RNASEQ_genome_pipeline/generate_sample_summary_stats.pl --annot_conf $config --reads_list_file samples.txt --project_base_dir $wd > QC_short.txt\n\n/seq/regev_genome_portal/SOFTWARE/RNASEQ_genome_pipeline/util/summarize_rnaseqQC_results.pl samples.txt $wd > QC_long.txt\n\nfind RSEM_${annot_name}/ -type f | egrep 'genes.results' > rsem.genes.list\n\n/seq/regev_genome_portal/SOFTWARE/RNASEQ_genome_pipeline/merge_RSEM_output_to_matrix.pl --rsem_files rsem.genes.list --mode counts > ${expmat_name}.RSEM.genes.counts.matrix\n/seq/regev_genome_portal/SOFTWARE/RNASEQ_genome_pipeline/merge_RSEM_output_to_matrix.pl --rsem_files rsem.genes.list --mode tpm > ${expmat_name}.RSEM.genes.tpm.matrix\n"
},
{
"alpha_fraction": 0.5486935973167419,
"alphanum_fraction": 0.5724465847015381,
"avg_line_length": 68.83333587646484,
"blob_id": "639646809207cd8b9a0cc2cfde76f01244e4e737",
"content_id": "0a4a52397f99e8c373c676aafe493511cac97421",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 421,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 6,
"path": "/build_rnaseq_ref/gene_to_iso_from_gtf.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncat Zv10.minus_nc.gtf | awk '{if (match($0, /gene_name[^;]*;/)){c=substr($0, RSTART, RLENGTH-1); gsub(\"gene_name \",\"\",c); gsub(\"\\\"\",\"\",c); print c}}' > gene_name.txt\ncat Zv10.minus_nc.gtf | awk '{if (match($0, /transcript_id[^;]*;/)){c=substr($0, RSTART, RLENGTH-1); gsub(\"transcript_id \",\"\",c); gsub(\"\\\"\",\"\",c); print c}}' > transcript_id.txt\nsed -i '/^$/d' gene_name.txt\nsed -i '/^$/d' transcript_id.txt\n\n\n"
},
{
"alpha_fraction": 0.6166134476661682,
"alphanum_fraction": 0.6749201416969299,
"avg_line_length": 36.93939208984375,
"blob_id": "4161ae4e80502ad95fff64e4cd3009dda4646054",
"content_id": "9b9373f489f201adc93a1117494eb8373fdc1104",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1252,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 33,
"path": "/10X/call_cellranger.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Wrapper script for 10X pipeline\nmkdir -p qsub_logs\n\n# To Change \nrun_ids=(\"P17_Retina1\" \"P17_Retina2\")\nbcs=(\"SI-3A-A2\" \"SI-3A-C2\")\nbcl_path=/ahg/regev_nextseq/Data/160422_NB501164_0150_AH3MVGBGXY\ntrans_path=/seq/regev_genome_portal/SOFTWARE/10X/refdata-cellranger-1.1.0/mm10\ncellranger_path=/seq/regev_genome_portal/SOFTWARE/10X/cellranger-1.1.0/\n\n# Automatically defined\nn=`expr ${#run_ids[@]} - 1`\nfastq_path0=`basename $bcl_path | cut -f 4 -d'_'`\nfastq_path=./${fastq_path0/A/}/outs/fastq_path\n\n# cell ranger demux\nsed \"s|path_to_bcl|${bcl_path}|g;s|path_to_cellranger|${cellranger_path}|g\" < /home/unix/karthik/repo/dropseq_scripts/10X/demux_10X.sh > demux_10X_${fastq_path0/A/}.sh\n\n# cell ranger run\nfor ((i==0; i<=$n; i++))\ndo\n run_id=${run_ids[$i]}\n barcode=${bcs[$i]}\n sed \"s|my_id|${run_id}|g;s|path_to_fastq|${fastq_path}|g\" < /home/unix/karthik/repo/dropseq_scripts/10X/run_10X.sh > run_10X_${run_id}.sh\n sed -i \"s|path_to_cellranger|${cellranger_path}|g\" run_10X_${run_id}.sh\n sed -i \"s|barcode|${barcode}|g;s|path_to_trans|${trans_path}|g\" run_10X_${run_id}.sh\n sed -i \"s|error.err|${run_id}.10X.err|g;s|out.log|${run_id}.10X.out|g\" run_10X_${run_id}.sh\ndone\n\n#use uger\n#qsub run_10X_${run_id}.sh >> dispatch.txt 2>&1\n"
},
{
"alpha_fraction": 0.7172364592552185,
"alphanum_fraction": 0.7264957427978516,
"avg_line_length": 34.378150939941406,
"blob_id": "1bd6fa7c70a1c59fdc43d61ae4d45d9735498d78",
"content_id": "1d0e4c273835caa7a91136c8546607f2b09defd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4212,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 119,
"path": "/drop-seq/run_Alignment_LSF.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Author: Karthik Shekhar, 05/26/16\n# Template file for executing Drop-seq alignment/quantification steps\n\nset -x\n\nb=fName\nbamName=bamFileName\nfq1=fastq1\nfq2=fastq2\nnumCells=numCellsNum\nreference_fasta=refFasta\nmetaDataDir=metaDataLoc\ntoolsPath=/broad/mccarroll/software/dropseq/prod\nbaseDir=basedir\n\n# STEP 1 : Alignment\n${baseDir}/scripts/run_dsq_alignment.sh ${metaDataDir} ${fq1} ${fq2} ${b}\n\nmv ${baseDir}/bams/${bamName}.bam ${baseDir}/bams/${bamName}_old.bam\nmv ${baseDir}/bams/${bamName}.bam.bai ${baseDir}/bams/${bamName}_old.bam.bai\n\n# STEP 2 : Detect Bead Synthesis errors\n${toolsPath}/DetectBeadSynthesisErrors \\\nI=${baseDir}/bams/${bamName}_old.bam \\\nO=${baseDir}/bams/${bamName}_unmerged.bam \\\nOUTPUT_STATS=${baseDir}/synthesis_err_stats/${bamName}.synthesis_stats.txt \\\nSUMMARY=${baseDir}/synthesis_err_stats/${bamName}.synthesis_stats.summary.txt \\\nNUM_BARCODES=$((numCells*2)) \\\nPRIMER_SEQUENCE=AAGCAGTGGTATCAACGCAGAGTAC \\\nMAX_NUM_ERRORS=2 \\\nCELL_BARCODE_TAG=XC\n\nmv ${baseDir}/bams/${bamName}_unmerged.bam ${baseDir}/bams/${bamName}.bam\nsamtools index ${baseDir}/bams/${bamName}.bam\n\nrm ${baseDir}/bams/${bamName}_old.bam\nrm ${baseDir}/bams/${bamName}_old.bam.bai\n\n# STEP 3: Collapse cell barcodes by edit distance\n${toolsPath}/CollapseBarcodesInPlace \\\nI=${baseDir}/bams/${bamName}.bam \\\nO=${baseDir}/bams/${bamName}_collapsed.bam \\\nPRIMARY_BARCODE=XC \\\nOUT_BARCODE=ZC \\\nMIN_NUM_READS_CORE=5000 \\\nMIN_NUM_READS_NONCORE=1000 \\\nEDIT_DISTANCE=1\n\nrm ${baseDir}/bams/${bamName}.bam\nrm ${baseDir}/bams/${bamName}.bam.bai\nmv ${baseDir}/bams/${bamName}_collapsed.bam ${baseDir}/bams/${bamName}.bam \nsamtools index ${baseDir}/bams/${bamName}.bam\n\n# Summary of collapse\n${toolsPath}/BAMTagofTagCounts \\\nI=${baseDir}/bams/${bamName}.bam \\\nO=${baseDir}/bam_reads/${bamName}.collapse_stats.txt \\\nPRIMARY_TAG=ZC \\\nSECONDARY_TAG=XC\n\n# STEP 4: Bam Tag Histogram\n${toolsPath}/BAMTagHistogram \\\nI=${baseDir}/bams/${bamName}.bam \\\nO=${baseDir}/bam_reads/${bamName}.reads.txt.gz \\\nTAG=ZC\n\ngunzip ${baseDir}/bam_reads/${bamName}.reads.txt.gz\n\n# STEP 5: DropSeqCumuPlot.R to compute inflection in cumulative plots. Estimates number of cells in the data\n\nRfile=DropSeqCumuPlot_${bamName}.R\nbamReadsFile=${baseDir}/bam_reads/${bamName}.reads.txt\n#temp1=`echo ${bamReadsFile} | sed 's:\\/:\\\\\\/:g'`\n#temp2=`echo ${baseDir} | sed 's:\\/:\\\\\\/:g'`\\\\\\/bam_reads\\\\\\/${bamName}\nsed \"s|fileName|${bamReadsFile}|g;s|figName|${baseDir}/bam_reads/${bamName}|g\" < ${baseDir}/scripts/DropSeqCumuPlot.R > ${baseDir}/scripts/run_files/DropSeqCumuPlot_${bamName}.R\nR CMD BATCH ${baseDir}/scripts/run_files/DropSeqCumuPlot_${bamName}.R ${baseDir}/bsub_logs/DropSeqCumuPlot.${bamName}.out\n\nnumCells=`cat ${baseDir}/bam_reads/${bamName}_numCells.txt` \nsed \"s|filename_input|${bamReadsFile}|g;s|Ncells_input|${numCells}|g\" < ${baseDir}/scripts/collect_cell_barcodes.R > ${baseDir}/scripts/run_files/collect_cell_barcodes.${bamName}.R\nR CMD BATCH ${baseDir}/scripts/run_files/collect_cell_barcodes.${bamName}.R ${baseDir}/bsub_logs/collect_cell_barcodes.${bamName}.Rout\n\n# STEP 6: DGE UMIs\n${toolsPath}/DigitalExpression I=${baseDir}/bams/${bamName}.bam \\\nO=${baseDir}/UMI_DGE/${bamName}.umi.dge.txt.gz \\\nSUMMARY=${baseDir}/UMI_DGE/${bamName}.umi.dge.summary.txt \\\nCELL_BARCODE_TAG=ZC \\\nNUM_CORE_BARCODES=${numCells}\n\n# STEP 7: DGE READS\n${toolsPath}/DigitalExpression \\\nI=${baseDir}/bams/${bamName}.bam \\\nO=${baseDir}/reads_DGE/${bamName}.reads.dge.txt.gz \\\nSUMMARY=${baseDir}/reads_DGE/${bamName}.reads.dge.summary.txt \\\nCELL_BARCODE_TAG=ZC \\\nNUM_CORE_BARCODES=${numCells} \\\nOUTPUT_READS_INSTEAD=true\n\n# STEP 8: QC\nBASEDIR=/broad/mccarroll/software/dropseq/prod\nsource $BASEDIR/configDropSeqRNAEnvironment.bash\n\n$BASEDIR/DropSeqStandardAnalysis \\\n--BAMFile ${baseDir}/bams/${bamName}.bam \\\n--reference $reference_fasta \\\n--numCells ${numCells} \\\n--estimatedNumCells ${numCells} \\\n--estimatedNumBeads $((numCells*20)) \\\n--report_dir ${baseDir}/QC_files \\\n--pointSize=0.75 \\\n--batchSystem local \\\n--beadSynthesisErrorDetail ${baseDir}/synthesis_err_stats/${bamName}.synthesis_stats.txt \\\n--cellTag XC \\\n--cellTagCollapsed ZC \\\n--outPDF ${baseDir}/QC_reports/${bamName}_QC.pdf \\\n--tempDir ${baseDir}/tempQC \\\n--use_threads \\\n--verbose 1 \n\n"
},
{
"alpha_fraction": 0.7166666388511658,
"alphanum_fraction": 0.7462121248245239,
"avg_line_length": 64.80000305175781,
"blob_id": "184d5de6a5d2c4ebad3435e255adec5ec58cf01d",
"content_id": "f3925cc5fc01fcaae61b82372800704aeecae0b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1320,
"license_type": "no_license",
"max_line_length": 316,
"num_lines": 20,
"path": "/README.md",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "\n\n### Demultiplexing\n\nqsub -v nextseq_loc=/ahg/regev_nextseq/Data/<FOLDER>,fastq_loc=<PATH> demultiplex.sh\n\n### Aligning and quantifying\n\n1. Check that you have the following in your path,\n * R-3.2, Java-1.8, Samtools, Picard-Tools\n2. Check that your local R repository has the package \"ineq\"\n3. Create a working folder. Copy all the attached files in the following directory in a subfolder called \"scripts\",\n4. Ensure sure all the fastq files are in a folder called \"Data\" (case sensitive) next to scripts\n5.The format for the fastq files must be <SampleName>_R1.fastq.gz and <SampleName>_R2.fastq.gz. \n6. Open `run_dsq_pipeline_XXX.sh` (XXX = LSF or uger)\n *Change the `numCells=(6000 1500 3000)` line to indicate the estimated number of cells in each of your samples. This is an example of 3 samples with 6000, 1500 and 3000 cells respectively. The order in which the samples will be processed will be the alphabetical order in which they exist within the folder Data.\n7. while in the scripts folder, type\n *`$chmod +x *.sh`\n8. Now you are all set. You can kick off the pipeline by the following command,\n `$ ./run_dsq_pipeline_XXX.sh` \n\nNote that `run_dsq_pipeline_XXX.sh` uses the LSF job runner and invokes a \"bsub\" command to submit the job to the cluster. This will need to be modified accordingly. \n\n"
},
{
"alpha_fraction": 0.6446570754051208,
"alphanum_fraction": 0.6468899250030518,
"avg_line_length": 35.882354736328125,
"blob_id": "37fcc791af14d267336c88d9ed5b54c9cefb447f",
"content_id": "a9934e91c48857d892b9117407c1f0d4edeb4d35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3135,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 85,
"path": "/build_rnaseq_ref/create_gene_iso_map.py",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n\n# Karthik Shekhar\n\n# The following program will read in a gtf file and create a txt file with gene and isoform names as two columns\n\n##### IMPORT MODULES #####\nimport os\nimport argparse\nimport csv \n\n# GLOBALS\nDELIMITER = \"\\t\"\nGTF_COMMENT = \"#!\"\nSPLIT_TOKENS = \"; \"\nSPLIT_COUPLE = ' \"'\nSPLIT_COUPLE_JOIN = ' '\nTRANS_ID = \"transcript_id\"\nDEF_TRANS_ID = \"\"\nSTRIP_QUOTES = '\"'\nTRANS_NAME = \"transcript_name\"\nDEF_TRANS_NAME = \"\"\nJOIN_NAID = \"_\"\nUNKNOWN = '\"unknown\"'\nTOKEN_SEP=\";\"\n\ndef correct_gtf( gtf_file ):\n corrected_file = [ ];\n with open(gtf_file, \"r\") as gtf:\n gtf_reader = csv.reader(gtf, delimiter=DELIMITER)\n for feature_row in gtf_reader:\n\t # Add comment feature as is and continue\n\t if feature_row[0].startswith( GTF_COMMENT ):\n\t corrected_file.append(DELIMITER.join(feature_row))\n\t\tcontinue\n\t ###\n\t #geneid: dict for last element in feature_row\n\t #gene_id_order_list : list to maintain last element token order\n\t ###\n\t geneid = {}\n\t gene_id_order_list = [ ]\n\n\t gene_cood_chr_info = feature_row[0:-1]\n\t token_list_coupled = feature_row[-1].split(SPLIT_TOKENS)\n \n\t # Loop to split each token pair and append to order list\n\t # Also store the token pair in geneid as key val\n for token_couple in token_list_coupled:\n\t token, token_value = token_couple.split(SPLIT_COUPLE)\n token_value=STRIP_QUOTES+token_value\n\t\tgene_id_order_list.append(token)\n\t\tgeneid[token] = token_value\n\t \n\t # Get 'gene_name', if not empty str and strip \"\n\n\t trans_id = geneid.get(TRANS_ID, DEF_TRANS_ID).strip(STRIP_QUOTES)\n\t new_transname = geneid.get(TRANS_NAME, DEF_TRANS_NAME).strip(STRIP_QUOTES)\n\t trans_id = JOIN_NAID.join([id_token for id_token in [new_transname, trans_id] if id_token])\n trans_id = UNKNOWN if not trans_id else STRIP_QUOTES+trans_id+STRIP_QUOTES\n\t geneid[TRANS_ID] = trans_id\n \n\t # ADD ID TOKEN\n\t for id_token in [TRANS_ID]:\n\t if id_token not in gene_id_order_list:\n gene_id_order_list.append(id_token)\n gene_id_str = SPLIT_TOKENS.join([token+SPLIT_COUPLE_JOIN+(geneid[token]).strip(TOKEN_SEP) for token in gene_id_order_list])\n\t gene_id_str += TOKEN_SEP\n\t corrected_file.append(DELIMITER.join(feature_row[0:-1] + [gene_id_str]))\n return(corrected_file) \t \n\ndef write_to_file( reconstructed_feature_list, modified_gtf_file):\n with open(modified_gtf_file, \"w\") as mgh:\n mgh.write(\"\\n\".join(reconstructed_feature.list))\n\nif __name__ == \"__main__\":\n arg_raw = argparse.ArgumentParser(prog=\"create_gene_iso_map.py\", description=\"Create a txt file with gene and isoform names for RSEM\")\n arg_raw.add_argument(\"--gtf_file\", help=\"GTF file (Required).\")\n arg_raw.add_argument(\"--output\", help=\"Output text file path (Required).\")\n arg_parsed = arg_raw.parse_args()\n gtf_corrected = correct_gtf( arg_parsed.gtf_file )\n write_to_file(gtf_corrected, arg_parsed.output)\n print 1\n #genes_iso_table = gene_to_iso( arg_parsed.gtf_file)\n #write_to_file(genes_iso_table, arg_parsed.output)\n"
},
{
"alpha_fraction": 0.750629723072052,
"alphanum_fraction": 0.755667507648468,
"avg_line_length": 35,
"blob_id": "cfecd2d635a446f2d3909cd662e550897c1a674d",
"content_id": "4d6968399c4234d65e46a6eb7394661a21b1a039",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 11,
"path": "/drop-seq/collect_cell_barcodes.R",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "\ncollect_cell_barcodes <- function(filename,Ncells){ \n# Records the top Ncells barcodes, and writes them to a text file \n\ndata = read.table(file=filename)\nbarcodes = data$V2[1:Ncells]\n\nfilename = gsub(\"reads.txt\",\"barcodes_use.txt\",filename)\nwrite.table(as.data.frame(barcodes), file=filename, col.names=FALSE, row.names=FALSE,quote=FALSE)\n}\n\ncollect_cell_barcodes(\"filename_input\", Ncells_input)\n"
},
{
"alpha_fraction": 0.6672694683074951,
"alphanum_fraction": 0.6745027303695679,
"avg_line_length": 26.649999618530273,
"blob_id": "ab4e93aecf4d284b706a5b40f3ef856af0e0a537",
"content_id": "c976f73157e2fe6ed0d12bf723d05703c17c0324",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 20,
"path": "/drop-seq/rnaseq_runner.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n\n#$ -cwd\n#$ -q long\n#$ -P <project space>\n#$ -l m_mem_free=1g\n#$ -N <Name of your run>\n#$ -e </path/to/error_file>\n#$ -o </path/to/out_file>\n\nsource /broad/software/scripts/useuse\ncd </path/to/output_dir>\n/seq/regev_genome_portal/SOFTWARE/KCO/RNASEQ_pipeline/run_RNASEQ_pipeline_many_samples_UGER_array.sh \\\n--annot_conf <annot_config> \\\n--run_conf <path_to_run_config>\\\n--reads_list_file </path/to/read_list_file> \\\n--project_base_dir </path/to/output_dir> \\\n--queue long --memory 20 \\\n--num_threads_each 1\n--project_name <project space>\n"
},
{
"alpha_fraction": 0.6464266777038574,
"alphanum_fraction": 0.6856528520584106,
"avg_line_length": 30.94827651977539,
"blob_id": "c4a0d33b5848ddab71d8aa1ada555342babc1c87",
"content_id": "86892c9c2d61628341e3aa8d4300c44bfa9103a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1861,
"license_type": "no_license",
"max_line_length": 242,
"num_lines": 58,
"path": "/drop-seq/run_dsq_pipeline_SGE.sh",
"repo_name": "karthikshekhar/dropseq_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Author : Karthik Shekhar, 05/26/2016\n# Master file for invoking Drop-seq pipline \n# SGE invocation\n# usage : ./run_dsq_pipeline_LSF.sh [fastqPath]\n# assumes : files are organized as $fastqPath/SampleX_R1.fastq.gz and $fastqPath/SampleX_R2.fastq.gz\n\n#Values depend on sample\nfastqPath=$1\nrefFastaPath=/broad/mccarroll/software/metadata/individual_reference/GRCh37.75_GRCm38.81/m38_transgene/m38_transgene.fasta\nmetaDataDir=/broad/mccarroll/software/metadata/individual_reference/GRCh37.75_GRCm38.81/m38_transgene\nnumCells=(1500 3000 3000) \nqueue=regevlab\nbaseDir=`readlink -f ..`\n\nrm -rf ../Analysis ../bsub_logs ../bam* ../*DGE ../temp* ../QC* ./run_files ../synthesis_err_stats\nmkdir -p ../bsub_logs\nmkdir -p ../Analysis\nmkdir -p ../QC_files\nmkdir -p ../QC_reports\nmkdir -p ../tempQC\nmkdir -p ../bam_reads\nmkdir -p ../synthesis_err_stats\nmkdir -p run_files\n\nmkdir -p ../bams_HUMAN_MOUSE\nmkdir -p ../UMI_DGE\nmkdir -p ../reads_DGE\n\nl=0\nfor fq1 in $fastqPath/*R1*;\ndo\n\n#STEP 1 : CREATE NEW INSTANCE OF RUN FILE FOR SAMPLE\nfq2=${fq1/R1/R2}\n\n#Get absolute paths\nfq1=`readlink -f ${fq1}`\nfq2=`readlink -f ${fq2}`\n\nbfq1=`basename ${fq1}`\nbfq2=`basename ${fq2}`\n\nb0=`echo ${bfq1} | grep -P '^[a-zA-Z0-9\\_]*_R1' -o`\nb=${b0/_R1/}\nsed \"s|fName|${b}|g;s|fastq1|${fq1}|g;s|fastq2|${fq2}|g;s|bamFileName|${b}|g;s|numCellsNum|${numCells[l]}|g;s|refFasta|${refFastaPath}|g;s|metaDataLoc|${metaDataDir}|g;s|basedir|${baseDir}|g\" < run_Alignment_SGE.sh > run_Alignment_SGE_${b}.sh\nerrfile=${baseDir}/bsub_logs/${b}.err\noutfile=${baseDir}/bsub_logs/${b}.out\nsed -i \"s|error.err|${errfile}|g;s|out.out|${outfile}|g\" run_Alignment_SGE_${b}.sh\nchmod +x run_Alignment_SGE_${b}.sh\nmv run_Alignment_SGE_${b}.sh run_files\n\nqsub run_files/run_Alignment_SGE_${b}.sh -e ${baseDir}/bsub_logs/${b}.err -o ${baseDir}/bsub_logs/${b}.out\n\necho ${numCells[l]}\nl=`expr $l + 1`\ndone\n\n\n\n\n\n\n\n\n"
}
] | 20 |
Prakadeeswaran05/Visualize_odom_with_marker
|
https://github.com/Prakadeeswaran05/Visualize_odom_with_marker
|
64eaaed257319e06bd98b613ffa84a5124fd04df
|
457ece8485fee6355679f3769127bcc4f94e1fbd
|
b88c6e2073355cd7451b8bcdd8e2885451c15013
|
refs/heads/main
| 2023-05-01T08:54:38.264014 | 2021-05-18T04:29:58 | 2021-05-18T04:29:58 | 367,694,080 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6819853186607361,
"alphanum_fraction": 0.6948529481887817,
"avg_line_length": 24.77777862548828,
"blob_id": "0ca2ab12cafcf9529827d464fad0e007ee9188f4",
"content_id": "df53c8af5e758c3e47eb841fc6158fba38c0e44d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1632,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 63,
"path": "/viz_odom_with_marker.py",
"repo_name": "Prakadeeswaran05/Visualize_odom_with_marker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#!coding=utf-8\nimport rospy\nfrom visualization_msgs.msg import Marker\nfrom geometry_msgs.msg import Point\nfrom nav_msgs.msg import Odometry\nimport sys\n\nclass odom_viz:\n\n\n\n\tdef __init__(self):\n\t\t\n\t\tself.odom_sub = rospy.Subscriber('/odom',Odometry,self.callback)\n\t\tself.marker_pub=rospy.Publisher(\"marker_test\", Marker, queue_size=10)\n\t\tself.count=0\n\t\tself.marker=Marker()\n\n\tdef callback(self,msg):\n\t\tself.marker.header.frame_id = \"map\"\n\t\tself.marker.header.stamp = rospy.Time.now()\n\t\t\t\t\n\t\tself.marker.id = 0\n\t\tself.marker.action = Marker.ADD\n\t\tself.marker.lifetime = rospy.Duration()\n\t\tself.marker.type = Marker.POINTS\n\t\tself.marker.color.r = 1.0\n\t\tself.marker.color.g = 0.0\n\t\tself.marker.color.b = 0.0\n\t\tself.marker.color.a = 1.0\n\n\t\tself.marker.scale.x = 0.1\n\t\tself.marker.scale.y = 0.1\n\t\tself.marker.scale.z = 0.1 \n\t\tif self.count==0:\n\t\t\tself.marker.points=[]\n\t\t\tself.init_point=Point()\n\t\t\tself.init_point.x = msg.pose.pose.position.x\n\t\t\tself.init_point.y = msg.pose.pose.position.y\n\t\t\tself.init_point.z = msg.pose.pose.position.z\n\t\t\tself.marker.points.append(self.init_point) \n\t\t\tself.marker_pub.publish(self.marker)\n\t\telse:\n\t\t\tself.marker_point=Point()\n\t\t\tself.marker_point.x = msg.pose.pose.position.x\n\t\t\tself.marker_point.y = msg.pose.pose.position.y\n\t\t\tself.marker_point.z = msg.pose.pose.position.z\n\t\t\tself.marker.points.append(self.marker_point)\n\t\t\tself.marker_pub.publish(self.marker)\n \n\t\tself.count+=1\n\ndef main(args):\n\tod = odom_viz()\n\trospy.init_node('odom_viz', anonymous=True)\n\ttry:\n\t\trospy.spin()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n\nif __name__ == '__main__':\n\tmain(sys.argv)\n\n \n\n\n"
},
{
"alpha_fraction": 0.612500011920929,
"alphanum_fraction": 0.625,
"avg_line_length": 15,
"blob_id": "e789adb77b8501805af0555b561a42cb9cff0a8d",
"content_id": "ae1f729e4fce5560827ae483a32b814e7eecffcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 5,
"path": "/README.md",
"repo_name": "Prakadeeswaran05/Visualize_odom_with_marker",
"src_encoding": "UTF-8",
"text": "# Visualize_odom_with_marker\n\n<p align=\"left\">\n <img src=\"output1.gif\" />\n</p>\n"
}
] | 2 |
carefreedavid/oj
|
https://github.com/carefreedavid/oj
|
d6cd181c5b530c32b4fab264fbe1c89565984dc7
|
f560fc291e303af0a7ce145ffb2a1d2e1fed424d
|
1001ec09b8b9abf2f0d37eea33d57a9904277e92
|
refs/heads/master
| 2021-01-11T16:30:58.777461 | 2017-01-26T07:50:33 | 2017-01-26T07:50:33 | 80,094,977 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5046248435974121,
"alphanum_fraction": 0.5046248435974121,
"avg_line_length": 23.94871711730957,
"blob_id": "7d48e6ecbc72d7e6c4bb7eeed61331f7be6f21c6",
"content_id": "0c5169c4cd252a7cb8d251284dd5ed70a914334a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 973,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 39,
"path": "/_unfinsihed/search.py",
"repo_name": "carefreedavid/oj",
"src_encoding": "UTF-8",
"text": "import csv\nimport sys\nimport defy\n\n# prompt user to enter city to search\nrestart = 'now'\nwhile restart == 'now':\n print \"Which city do you want to search?\"\n print \"~ cape town, durban, joburg, pretoria ~\"\n answer = raw_input(\"> \")\n\n # reads jhb csv\n if answer == 'joburg':\n defy.index(answer)\n if defy.circle() == '':\n restart = ''\n\n # reads ct csv\n elif answer == 'cape town':\n defy.index(answer)\n if defy.circle() == '':\n restart = ''\n\n # read dbn csv\n elif answer == 'durban':\n defy.index(answer)\n if defy.circle() == '':\n restart = ''\n\n # read pta csv\n elif answer == 'pretoria':\n defy.index(answer)\n if defy.circle() == '':\n restart = ''\n\n else:\n print \"Something went wrong, try again using one of these:\"\n print \"cape town, durban, joburg, pretoria\"\n print \"-------------------------------------------------\"\n"
},
{
"alpha_fraction": 0.598802387714386,
"alphanum_fraction": 0.6047903895378113,
"avg_line_length": 19.24242401123047,
"blob_id": "0353b6def304dbc757b1b49b2a1a57117eeedce7",
"content_id": "f8ff92c921da12d75bc0103b7ac20ad48e70e579",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 33,
"path": "/alph_oj/alph_oj.py",
"repo_name": "carefreedavid/oj",
"src_encoding": "UTF-8",
"text": "import sys\nimport csv\n\nalph_csv = 'alph.csv'\n\nnumber = []\nletter = []\ncount = []\naddit = []\n\nprint \"hey, type dat letter below:\"\nletter_raw = raw_input(\"> \")\nletter[:0] = letter_raw\n\nfor i in letter:\n with open(alph_csv) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n keep = row[i]\n number.append(keep)\n county = len(number)\n count.append(county-1)\n\ncount.reverse()\nmy_dic = dict(zip(count, number))\n\nfor key, position in my_dic.items():\n answer = ((26 ** int(key)) * int(position))\n addit.append(answer)\n\nthe_answer = sum(addit)\nprint \"your very own number is: \"\nprint the_answer\n"
},
{
"alpha_fraction": 0.6463414430618286,
"alphanum_fraction": 0.6463414430618286,
"avg_line_length": 15.300000190734863,
"blob_id": "1ec46bf619286f6adaba37db05ed493628fc1f7c",
"content_id": "dcb07ecc65d8a14eb5e230220a44743bbeaae990",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 10,
"path": "/_unfinsihed/rand_csv.py",
"repo_name": "carefreedavid/oj",
"src_encoding": "UTF-8",
"text": "import csv\nimport sys\nimport random\n\nkeep_list = []\n # turns reader into a dic\nwith open('rand.csv') as f:\n d = dict(filter(None, csv.reader(f)))\n\nprint d \n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 13.75,
"blob_id": "fd6f3a11bc04ce6e04912bf8879a7259969d7072",
"content_id": "d07b477a30ddad7f7d5c3adfed6c6fc4b2527f42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 4,
"path": "/_unfinsihed/f.py",
"repo_name": "carefreedavid/oj",
"src_encoding": "UTF-8",
"text": "import defy\n\nresult = defy.index('City Bowl')\nprint result \n"
},
{
"alpha_fraction": 0.551980197429657,
"alphanum_fraction": 0.5643564462661743,
"avg_line_length": 15.833333015441895,
"blob_id": "b2e8b9bf5323d45e6ea5eb9fc8173fe0be34875b",
"content_id": "ac3b4a4881b3f28c6818dc72f5e54d0dec919564",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 404,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 24,
"path": "/_unfinsihed/ct_search.py",
"repo_name": "carefreedavid/oj",
"src_encoding": "UTF-8",
"text": "# this script wants to allow the user to choose a region in the Cape Town area.\n\nimport csv\nimport sys\n\nct_file = open('ct_reg.csv',\"rb\")\n\nreader = csv.reader(ct_file)\n\nrownum = 0\nfor row in reader:\n # Save header row.\n if rownum == 0:\n header = row\n\n else:\n colnum = 0\n for col in row:\n print '%s' % col\n colnum += 1\n\n rownum += 1\n\nct_file.close()\n"
},
{
"alpha_fraction": 0.43809524178504944,
"alphanum_fraction": 0.43809524178504944,
"avg_line_length": 25.25,
"blob_id": "d7e3abf35f21987465ddcd49abe9cb921c0f4edd",
"content_id": "017e43f8f1d7964d2583b11331679112dd9017c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2100,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 80,
"path": "/_unfinsihed/area_oj/defy.py",
"repo_name": "carefreedavid/oj",
"src_encoding": "UTF-8",
"text": "import csv\nimport sys\n\n# a function that gives the user a set of suggestions and returns subburbs in an area.\nct_file = 'ct.csv'\nct_sub = 'ct_sub.csv'\ndbn_file = 'dbn.csv'\ndbn_sub = 'dbn_sub.csv'\njhb_file = 'jhb.csv'\npta_file = 'pta.csv'\npta_sub = 'pta_sub.csv'\n\ndef index(area):\n\n if area == 'cape town':\n filey = ct_file\n sub = ct_sub\n elif area == 'durban':\n filey = dbn_file\n sub = dbn_sub\n elif area == 'joburg':\n filey = jhb_file\n sub = ct_sub\n elif area == 'pretoria':\n filey = pta_file\n sub = pta_sub\n\n print \"\"\"\n-------------------------------------------\nWhich area in %s would you like to search?\n-------------------------------------------\n~ type \\'y\\' if you want some suggestions ~\n\n \"\"\" % area\n\n subburb = raw_input(\"> \")\n\n while subburb == 'y':\n\n print \"\"\"\n------------------------------------------\nsuggested subburb searches\n------------------------------------------\"\"\"\n\n keep_list = []\n # turns reader into a dic\n with open(sub) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n keep = row['sub']\n print keep\n\n print \"\"\"\n---------------------------------------------\nWhich area in %s would you like to search?\n---------------------------------------------\"\"\" % area\n subburb = raw_input(\"> \")\n\n print \"-----------------------------------------------\"\n print \"Here are the results for the %s of %s\" % (subburb, area)\n print \"-----------------------------------------------\"\n\n keep_list = []\n # turns reader into a dic\n with open(filey) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n keep = row[subburb]\n print keep\n\ndef circle():\n print \"---\"\n print \"Do you want to search anything else? (yes or no)\"\n answer = raw_input(\"> \")\n if answer == 'yes':\n restart = 'now'\n else:\n print \"thank you, come again\"\n restart = ''\n return restart\n"
},
{
"alpha_fraction": 0.6138613820075989,
"alphanum_fraction": 0.6534653306007385,
"avg_line_length": 15.833333015441895,
"blob_id": "c71bd16a08722b22996436e72afc55d0287774a2",
"content_id": "7d0faa9cb26f4b4e54421aba8e810e0b0e6c460b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 6,
"path": "/_unfinsihed/fml.py",
"repo_name": "carefreedavid/oj",
"src_encoding": "UTF-8",
"text": "number = [1, 2]\ncount = [0, 1]\naddition = []\n\ndictionary = dict(zip(count, number))\nprint dictionary\n"
}
] | 7 |
mnw247/Python
|
https://github.com/mnw247/Python
|
357502ceeaa522795e2e0ad27d84fe7add807181
|
9da0c4178bfb41b008751ee55667f19f582def1e
|
5b79e8fc342c68c0cae1866e6bf25d7c8a223fe9
|
refs/heads/master
| 2020-04-30T20:35:22.753753 | 2019-03-22T03:43:00 | 2019-03-22T03:43:00 | 176,868,735 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6689655184745789,
"alphanum_fraction": 0.6908046007156372,
"avg_line_length": 31.259260177612305,
"blob_id": "9bc37218115f5bb1352bcb4b01eb1bd9593241ea",
"content_id": "bf1859ffb8fe6af60266a64eb881e6e6a458c915",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 870,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 27,
"path": "/python_stack/python_OOP/TDD/allTests.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom myHomework import reverseList, isPalindrome, coins, factorial, fib\nclass reverseListTest(unittest.TestCase):\n def test1(self):\n return self.assertEqual(reverseList([1,3,5]), [5,3,1])\n def test2(self):\n return self.assertEqual(reverseList(2,4,-3), [-3,4,2])\n\nclass isPalindromeTest(unittest.TestCase):\n def test1(self):\n return self.assertEqual(isPalindrome(\"racecar\"), True)\n def test2(self):\n return self.assertEqual(isPalindrome(\"rabbit\", False))\n\nclass coinsTest(unittest.TestCase):\n def test1(self):\n return self.assertEqual(coinsTest, ?)\n\nclass factorialTest(unittest.TestCase):\n def test1(self):\n return self.assertEqual(factorialTest, ?)\n\nclass fibTest(unittest.TestCase):\n def test1(self):\n return self.assertEqual(fibTest, ?)\nif __name__ == \"__main__\":\n unittest.main()"
},
{
"alpha_fraction": 0.682445764541626,
"alphanum_fraction": 0.6903353333473206,
"avg_line_length": 32.86666488647461,
"blob_id": "674c459e83616e459567608ab5c976e408cba288",
"content_id": "dc59aa5e67445c5b7d86c0a24ac8283e0879fc02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 15,
"path": "/python_stack/django/py3/all_projects/generator/views.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse, redirect\nfrom django.utils.crypto import get_random_string\n\ndef index(request):\n if \"attempts\" in request.session:\n request.session['attempts'] += 1\n else:\n request.session[\"attempts\"] = 0\n content = {\"unique_id\" : get_random_string(length=14) }\n return render(request, \"generator/index.html\", content)\n\ndef reset(request):\n if \"attempts\" in request.session:\n request.session.flush()\n return redirect('/random_word/')"
},
{
"alpha_fraction": 0.6210873126983643,
"alphanum_fraction": 0.6210873126983643,
"avg_line_length": 30.39655113220215,
"blob_id": "6f564e92ee4e62a7508cfebf3c43c2fc63eaff7b",
"content_id": "bba0799d1fd1c9e73adc240ad668338dc3eab6ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1821,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 58,
"path": "/python_stack/django/py3/Restful-Users-master/apps/users/views.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom .models import *\n\ndef index(request):\n for object in User.objects.all():\n context = {\n 'id': object.id,\n 'first_name': object.first_name,\n 'last_name': object.last_name,\n 'email': object.email,\n 'created_at': object.created_at,\n }\n context = {\n 'user_data': User.objects.all(),\n }\n return render(request, 'users/index.html', context)\n\ndef new(request):\n return render(request, 'users/new.html')\n\ndef create(request):\n errors = User.objects.basic_validator(request.POST)\n if len(errors):\n for tag, error in errors.items():\n messages.error(request, error, extra_tags = tag)\n return redirect('/users/new')\n\n user = User.objects.create(first_name = request.POST['first_name'], last_name = request.POST['last_name'], email = request.POST['email'])\n # id = User.objects.last().id\n return redirect('/users/'+str(user.id))\n\ndef show(request, id):\n context = {\n 'id': id,\n 'first_name': User.objects.get(id=id).first_name,\n 'last_name': User.objects.get(id=id).last_name,\n 'email': User.objects.get(id=id).email,\n 'created_at': User.objects.get(id=id).created_at,\n }\n return render(request, 'users/show.html', context)\n\ndef edit(request, id):\n return render(request, 'users/edit.html', {'id': id})\n\ndef update(request, id):\n user = User.objects.get(id=id)\n user.first_name = request.POST['first_name']\n user.last_name = request.POST['last_name']\n user.email = request.POST['email']\n user.save()\n\n return redirect('/users/'+str(id))\n\ndef destroy(request, id):\n User.objects.get(id=id).delete()\n\n return redirect('/users/index')\n"
},
{
"alpha_fraction": 0.6103461384773254,
"alphanum_fraction": 0.6222553253173828,
"avg_line_length": 43.79999923706055,
"blob_id": "16067963d86bf3fd660b2fa0a6ef36cd8892fb8d",
"content_id": "3503916b1d06dff393c2e57cff1f4b6d95a37ed9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2687,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 60,
"path": "/exam/apps/trip/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\nfrom datetime import datetime\nimport re\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\nclass UserManager(models.Manager):\n def validator(self,request):\n errors = {}\n if len(request['first_name']) < 2 or not str.isalpha(request['first_name']):\n errors['first_name'] = 'first name must be at least 2 charachters long and contain no numbers'\n if len(request['last_name']) < 2 or not str.isalpha(request['last_name']):\n errors['last_name'] = 'last name must be at least 2 charachters long and contain no numbers'\n if len(request['email']) < 1 or not EMAIL_REGEX.match(request['email']):\n errors['email'] = 'must enter a vlaid email address'\n if len(request['password']) < 8:\n errors['password'] = 'pasword must be at least 8 charachters'\n if request['confirm_pw'] != request['password'] :\n errors['confirm_pw'] = 'your passwords do not match'\n return errors\n \n def tripValidator(self,request):\n errors = {}\n today = datetime.now()\n if len(request['destination']) < 1:\n errors['destination'] = \"No Destination Typed\"\n if len(request['description']) < 1:\n errors['description'] = \"No Description Typed\"\n if len(request['sdate']) < 1:\n errors['sdate'] = \"No Travel Date From Typed\"\n return errors\n if len(request['edate']) < 1:\n errors['edate'] = \"No Travel Date To Typed\"\n return errors\n startdate = datetime.strptime(request['sdate'],'%Y-%m-%d')\n enddate = datetime.strptime(request['edate'],'%Y-%m-%d')\n if startdate <= today:\n errors['sdate'] = \"Start Date is Invalid\"\n if enddate <= startdate:\n errors['edate'] = \"End Date must be after Start Date\"\n return errors\n\nclass User(models.Model):\n first_name = models.CharField(max_length = 255)\n last_name = models.CharField(max_length = 255)\n email = models.CharField(max_length = 255,unique=True)\n password = models.CharField(max_length = 255)\n objects = UserManager()\n \nclass Trip(models.Model):\n destination = models.CharField(max_length = 50)\n description = models.TextField(max_length=100)\n sdate = models.DateField()\n edate = models.DateField()\n creator = models.ForeignKey(User,related_name=\"created_trips\")\n trip_members = models.ManyToManyField(User, related_name=\"joined_trips\")\n objects = UserManager()\n # created_at = models.DateTimeField(auto_now_add=True)\n # updated_at = models.DateTimeField(auto_now=True)"
},
{
"alpha_fraction": 0.6823658347129822,
"alphanum_fraction": 0.6955093145370483,
"avg_line_length": 35.52000045776367,
"blob_id": "07805aa9822a06c104c1983f4a6af24310b50436",
"content_id": "12a3a71355c7d76f66e9c61cff2018458a7bbc97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 913,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 25,
"path": "/python_stack/django/py3/likes_books/apps/like/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\n\nclass User(models.Model):\n first_name = models.CharField(max_length=255)\n last_name = models.CharField(max_length=255)\n email = models.CharField(max_length=255)\n \n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __repr__(self):\n return f'User(first_name={self.first_name},last_name={self.last_name}, email={self.email})'\n\nclass Book(models.Model):\n name = models.CharField(max_length=255)\n desc = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n uploaded_by = models.ForeignKey(User, related_name=\"uploads\")\n liked_by = models.ManyToManyField(User, related_name=\"books_liked\")\n\n def __repr__(self):\n return f'Book(name={self.name},desc={self.desc})'\n"
},
{
"alpha_fraction": 0.6432977914810181,
"alphanum_fraction": 0.6432977914810181,
"avg_line_length": 33.97058868408203,
"blob_id": "0a159e2e36d3a8e7c55e9b8d6e716f3e96c54177",
"content_id": "85c94b83971a458adbf4cf424cb06f887ad7b247",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3566,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 102,
"path": "/exam/apps/trip/views.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom django.db import IntegrityError\nfrom django import forms\nimport bcrypt\nfrom .models import *\n\ndef index(request):\n return render(request,'trip/index.html')\n\ndef dashboard(request):\n if \"id\" in request.session:\n content = {\n \"trips\" : Trip.objects.filter(trip_members = request.session['id']), \n \"alltrips\" : Trip.objects.exclude(trip_members=request.session['id'])\n }\n return render(request,'trip/dashboard.html', content)\n else:\n return redirect('/')\n\ndef login(request):\n try:\n user = User.objects.get(email=request.POST['login_email'])\n if bcrypt.checkpw(request.POST['login_password'].encode(), user.password.encode()):\n request.session['first_name'] = user.first_name\n request.session['id'] = user.id\n return redirect('/dashboard')\n except User.DoesNotExist:\n pass\n messages.error(request, 'Login unsuccessful. Plase check email and passowrd, and try again.', extra_tags='login')\n return redirect('/')\n\ndef register(request):\n errors = User.objects.validator(request.POST)\n pwHash = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value, extra_tags=key)\n return redirect('/')\n else:\n try:\n user = User.objects.create(first_name = request.POST['first_name'],last_name = request.POST['last_name'],email = request.POST['email'],password = pwHash)\n except IntegrityError:\n messages.error(request, 'this email already exists', extra_tags='email')\n return redirect('/')\n request.session['first_name'] = request.POST['first_name']\n request.session['id'] = user.id\n return redirect('/dashboard')\n\ndef logout(request) :\n request.session.flush()\n return redirect('/')\n\ndef addtrip(request):\n if \"id\" in request.session:\n return render(request,'trip/addtrip.html')\n return redirect(\"/\")\n\ndef tripadded(request):\n errors = Trip.objects.tripValidator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value, extra_tags=key)\n return redirect('/addtrip')\n else:\n Trip.objects.create(\n destination=request.POST['destination'],\n description=request.POST['description'],\n sdate=request.POST['sdate'],\n edate=request.POST['edate'], \n creator = User.objects.get(id = request.session['id'])\n )\n UID = User.objects.get(id=request.session['id'])\n TID = Trip.objects.last()\n UID.joined_trips.add(TID)\n return render(request,'trip/addtrip.html')\n\ndef view(request,id):\n trip = Trip.objects.get(id=id)\n content = {\n \"trip\":trip,\n \"joinedusers\":trip.trip_members.all()\n }\n #\"joinedusers\":User.objects.filter(joined_trips=Trip.objects.get(id=id))\n return render(request,'trip/view.html', content)\n\ndef join(request,id):\n UID = User.objects.get(id=request.session['id'])\n TID = Trip.objects.get(id=id)\n UID.joined_trips.add(TID)\n return redirect('/dashboard')\n\ndef cancel(request,id):\n UID = User.objects.get(id=request.session['id'])\n TID = Trip.objects.get(id=id)\n UID.joined_trips.remove(TID)\n return redirect('/dashboard')\n\ndef delete(request,id):\n TID = Trip.objects.get(id=id)\n TID.delete()\n return redirect('/dashboard')"
},
{
"alpha_fraction": 0.5517799258232117,
"alphanum_fraction": 0.5655339956283569,
"avg_line_length": 21.089284896850586,
"blob_id": "dfa8cccfcd93a8dbe62e8dc20ed21aefbbb271b1",
"content_id": "053be9d1420b64d29950d221b69f8e55dcd027e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1236,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 56,
"path": "/python_stack/python_OOP/animal.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "class animal:\n def __init__(self, name, health):\n self.name = name\n self.health = health\n\n def walk(self):\n print('walking')\n self.health -= 1\n return self\n\n def run(self):\n print('running')\n self.health -= 5\n return self\n\n def displayHealth(self):\n print(self.name,\"Health: \" + str(self.health))\n\nclass dog(animal):\n def __init__(self, name):\n super().__init__(name, 150)\n\n def pet(self):\n self.health += 5\n print(\"pet\")\n return self\n\nclass dragon(animal):\n def __init__(self, name):\n super().__init__(name, 170)\n\n def fly(self):\n print(\"fly\")\n self.health -= 10\n return self\n \n def displayHealth(self):\n super().displayHealth()\n print(\"I am a Dragon\")\n\nclass bird(animal):\n def __init__(self, name, health):\n super().__init__(name, health)\n\n\nanimal1 = animal('squirrel', 50)\nanimal1.walk().walk().walk().run().run().displayHealth()\n\nchihuahua = dog(\"whatsupdawg\")\nchihuahua.walk().walk().walk().run().run().pet().displayHealth()\n\npuff = dragon(\"puffthemagic\")\npuff.walk().fly().displayHealth()\n\nostrich = bird(\"ostrich\", 90)\nostrich.walk().run().displayHealth()"
},
{
"alpha_fraction": 0.6253443360328674,
"alphanum_fraction": 0.6423324346542358,
"avg_line_length": 44.39583206176758,
"blob_id": "53b77083fd627aa37cc2ee7e6d8d64d5d2154f74",
"content_id": "476efa1fcc00ebf839b1ce1d1298898f691bc53a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2178,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 48,
"path": "/shannon/apps/giftfinder/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\nfrom datetime import datetime\nimport re\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\nclass UserManager(models.Manager):\n def validator(self,request):\n errors = {}\n if len(request['first_name']) < 2 or not str.isalpha(request['first_name']):\n errors['first_name'] = 'first name must be at least 2 charachters long and contain no numbers'\n if len(request['last_name']) < 2 or not str.isalpha(request['last_name']):\n errors['last_name'] = 'last name must be at least 2 charachters long and contain no numbers'\n if len(request['email']) < 1 or not EMAIL_REGEX.match(request['email']):\n errors['email'] = 'must enter a vlaid email address'\n if len(request['password']) < 8:\n errors['password'] = 'pasword must be at least 8 charachters'\n if request['confirm_pw'] != request['password'] :\n errors['confirm_pw'] = 'your passwords do not match'\n return errors\n \n def jobValidator(self,request):\n errors = {}\n if len(request['title']) < 4:\n errors['title'] = \"Title Must Be Greater than 3 Characters\"\n if len(request['description']) < 11:\n errors['description'] = \"Description Must Be Greater than 10 Characters\"\n if len(request['location']) < 1:\n errors['location'] = \"Location must not be blank\"\n return errors\n\nclass User(models.Model):\n first_name = models.CharField(max_length = 255)\n last_name = models.CharField(max_length = 255)\n email = models.CharField(max_length = 255,unique=True)\n password = models.CharField(max_length = 255)\n objects = UserManager()\n \nclass Job(models.Model):\n title = models.CharField(max_length = 50)\n description = models.TextField(max_length=100)\n location = models.TextField(max_length=100)\n postedon = models.DateTimeField(auto_now_add=True)\n creator = models.ForeignKey(User,related_name=\"created_jobs\")\n jobsadded = models.ManyToManyField(User, related_name=\"joined_jobs\")\n objects = UserManager()\n # updated_at = models.DateTimeField(auto_now=True)"
},
{
"alpha_fraction": 0.6436452865600586,
"alphanum_fraction": 0.6567225456237793,
"avg_line_length": 42.67856979370117,
"blob_id": "2540874b7faf6da04cb9b998d359460c8fc0477f",
"content_id": "e1a1992fd52746cb8dab7d6402a8b6e993bb6052",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2447,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 56,
"path": "/python_stack/django/py3/micahs_belt_exam/apps/book_review/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django import forms\nfrom datetime import datetime\nimport re, bcrypt\n\nemail_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\nclass UserManager(models.Manager):\n def regValidator(self, postData):\n errors = {}\n if len(postData['first_name'])<2 or not postData['first_name'].isalpha():\n errors['first_name'] = \"First Name is not long enough\"\n if len(postData['last_name'])<2 or not postData['last_name'].isalpha():\n errors['last_name'] = \"Last Name is not long enough\"\n if User.objects.filter(email = postData['email']):\n errors['email_exists'] = \"Email is already Registered\"\n if email_regex.match(postData['email']) == None:\n errors['email_format'] = \"Email must be a valid format\"\n if len(postData['password'])<5:\n errors['password'] = \"Invalid Password! Must have no fewer than 5 characters\"\n if postData['password'] != postData['verifypass']:\n errors['verifypass'] = \"Password confirmation must match password.\"\n return errors\n\n def loginValidator(self, postData):\n user = User.objects.filter(email = postData['email_login']).first()\n errors = {}\n if not user:\n errors['email'] = \"Please Enter Valid Email\"\n elif not bcrypt.checkpw(postData['password_login'].encode('utf8'), user.password.encode('utf8')):\n errors['email'] = \"Invalid Password\"\n return errors\n\nclass User(models.Model):\n first_name = models.CharField(max_length=255)\n alias = models.CharField(max_length=255)\n email = models.CharField(max_length=255)\n password = models.CharField(max_length=255)\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n\n objects = UserManager()\n\nclass Book(models.Model):\n title = models.CharField(max_length=255)\n author = models.CharField(max_length=255)\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n\nclass Review(models.Model):\n comments = models.TextField(max_length=1000)\n stars = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n user = models.ForeignKey(User, related_name='reviewed_books')\n book = models.ForeignKey(Book, related_name='book_reviews')\n\n"
},
{
"alpha_fraction": 0.5757575631141663,
"alphanum_fraction": 0.5770470499992371,
"avg_line_length": 37.775001525878906,
"blob_id": "1b041440ccc432d670f37be3c2c8b9b4a96179fa",
"content_id": "62574605d02a80b6c1ad1ef98d10e93e3fe0b33d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1551,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 40,
"path": "/python_stack/flask_fundamentals/dojo_survey_without_validation/server.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "# [ Assignment: Dojo Survey ]\n'''\nObjectives:\n===================================================\n<> Shall building a server with Flask from scratch\n<> Shall pass paas information to the routes\n<> Shall generate different http responses url requests:\n========================================================='''\n\n# import the Flask libray\n# module Class Method Method object\nfrom flask import Flask, render_template, redirect, request, session\n\nlanguages = ['Ruby','Python','JavaScript','PHP','HTML','CSS','C++','SQL']\nlocations = ['Dallas','San Jose','Seattle','Colorado','L.A.']\n\n# Backend server setup\n# Object\napp = Flask (__name__)\napp.secret_key = 'ThisIsSecret'\n\n# index route shall handle form rendering\[email protected] ('/')\ndef index ():\n return render_template(\"index.html\", language=languages, location=locations)\n\[email protected] ('/profile', methods = ['POST'])\ndef profile ():\n underline = \"\\n\"\n underline = \"-\" * 13\n session['index'] = request.form['username']\n # Terminal print-debugginh\n print (session['index'])\n print ('Got Post Indo:\\n%s\\n' % underline)\n print ('Requested user named: ', request.form['username']) # Print first name in terminal\n print ('Requested user location: ', request.form['location']) # Print email in terminal\n return render_template ('profile.html') # Redirects back to index '/'\n\nif __name__ == '__main__': # Run the server applicatin\n app.run (debug = True)\n"
},
{
"alpha_fraction": 0.6811594367027283,
"alphanum_fraction": 0.6811594367027283,
"avg_line_length": 24,
"blob_id": "0f11374ae2762e047ffd21b692c06b7b782a5485",
"content_id": "6a2712e251c0a7c1b08bfb388ea55894af27a5ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 11,
"path": "/python_stack/flask_fundamentals/Dojo_Survey/dojosurvey.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, redirect, request\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n return render_template(\"index.html\")\n\[email protected]('/results', methods = ['POST'])\ndef results():\n return render_template(\"results.html\")\napp.run(debug=True)\n\n"
},
{
"alpha_fraction": 0.5856950283050537,
"alphanum_fraction": 0.6491228342056274,
"avg_line_length": 19.61111068725586,
"blob_id": "57d76d5b560dea060b4598e23d12a7e00b659d68",
"content_id": "372d86e1886e0501fc5f0a9f079e0a137bc45d64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 36,
"path": "/python_stack/python_fundamentals/FunctionsIntermediate1.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "# randInt() returns a random integer between 0 to 100\nimport random\ndef randInt():\n x = 0\n x = int(random.random()*101)\n return x\ny = randInt()\nprint (y)\n\n# randInt(max=50) returns a random integer between 0 to 50\nimport random\ndef randInt(max=51):\n x = 0\n x = int(random.random()*max)\n return x\ny = randInt()\nprint (y)\n\n# randInt(min=50) returns a random integer between 50 to 100\nimport random\ndef randInt(max=51):\n x = 0\n x = int(random.random()*max)+50\n return x\ny = randInt()\nprint (y)\n\n# randInt(min=50, max=500) returns a random integer between 50 and 500\nimport random\ndef randInt(min=50,max=500):\n x = 0\n max = max-49\n x = int(random.random()*(max+1-min))+min\n return x\ny = randInt()\nprint (y)"
},
{
"alpha_fraction": 0.5383838415145874,
"alphanum_fraction": 0.5717171430587769,
"avg_line_length": 27.285715103149414,
"blob_id": "133b5960ca910a6a918b48c6c4d84f1af69f0b69",
"content_id": "a86e6327b0abf5f7cf4f58932932a371f095372c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 35,
"path": "/exam/apps/trip/migrations/0004_auto_20180626_2043.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10 on 2018-06-26 20:43\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('trip', '0003_auto_20180626_1637'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='trip',\n name='user',\n ),\n migrations.RemoveField(\n model_name='trip',\n name='users',\n ),\n migrations.AddField(\n model_name='trip',\n name='creator',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='created_trips', to='trip.User'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='trip',\n name='trip_members',\n field=models.ManyToManyField(related_name='joined_trips', to='trip.User'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.541208803653717,
"alphanum_fraction": 0.6160714030265808,
"avg_line_length": 25.490909576416016,
"blob_id": "40c262349a16b6caa1ff601c82bb4db8ac3fbacc",
"content_id": "1e480e3ff1d15ceb6717ae1e852637d05fb7d279",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1456,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 55,
"path": "/python_stack/python_fundamentals/LoopBasic1.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "#1) Basic - Print all the numbers/integers from 0 to 150.\nfor count in range(0,151):\n print(count)\n\n#2) Multiples of Five - Print all the multiples of 5 from 5 to 1,000,000.\nfor count in range (0,1000001,5):\n print(count)\n\n#3) Counting, the Dojo Way - Print integers 1 to 100. If divisible by 5, print \"Coding\" instead. If by 10, also print \" Dojo\".\nfor count in range (1,101):\n x = ''\n if count % 5 == 0:\n x = \"Coding\"\n if count % 10 == 0:\n x = x + \" Dojo\"\n print (x)\n else:\n print (count)\n\n#4) Whoa. That Sucker's Huge - Add odd integers from 0 to 500,000, and print the final sum.\nsum = 0\nfor x in range (0,500000,1):\n if (x%2!=0):\n sum = sum + x\nprint (sum)\n\n\n#5) Countdown by Fours - Print positive numbers starting at 2018, counting down by fours (exclude 0).\nfor count in range (2018,0,-4):\n print (count)\n\n\n#6) Flexible Countdown - Based on earlier \"Countdown by Fours\", given lowNum, highNum, mult, print multiples of mult from lowNum to highNum, using a FOR loop. For (2,9,3), print 3 6 9 (on successive lines)\nlowNum = 2\nhighNum = 9\nmult = 3\nfor count in range (lowNum,highNum+1,1):\n if (count % mult == 0):\n print(count)\n\n\n# list = [3,5,1,2]\n# for i in list:\n# print(i)\n3,5,1,2\n\n# list = [3,5,1,2]\n# for i in range(list):\n# print(i)\nerror because list is a list and not a integer value\n\n# list = [3,5,1,2]\n# for i in range(len(list)):\n# print(i)\n0,1,2,3"
},
{
"alpha_fraction": 0.5596774220466614,
"alphanum_fraction": 0.5596774220466614,
"avg_line_length": 33.5,
"blob_id": "909291ef39ae089906b5ce10085f4cb93d27cf79",
"content_id": "aae8c4f1b17e46abdf253f3a25fca661ccb1536d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 620,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 18,
"path": "/shannon/apps/giftfinder/urls.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^login/$', views.login),\n url(r'^register/$', views.register),\n url(r'^dashboard/$', views.dashboard),\n url(r'^logout/$', views.logout),\n url(r'^addjob/$', views.addjob),\n url(r'^submit/$', views.jobadd),\n url(r'^view/(?P<id>\\d+)$', views.view),\n url(r'^edit/(?P<id>\\d+)$', views.edit),\n url(r'^edit/(?P<id>\\d+)/submit/$', views.editsubmit),\n url(r'^delete/(?P<id>\\d+)$', views.delete),\n url(r'^join/(?P<id>\\d+)/$', views.join),\n # url(r'^cancel/(?P<id>\\d+)/$', views.cancel),\n]"
},
{
"alpha_fraction": 0.4471544623374939,
"alphanum_fraction": 0.49593496322631836,
"avg_line_length": 20.764705657958984,
"blob_id": "d23b7f58fd16ed49dac51f98d6897333ad80697c",
"content_id": "4ade651c10338d2f17fb1d12dddc0c85247c9546",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 17,
"path": "/python_stack/python_OOP/TDD/myHomework.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "def reverseList(x):\n for i in range (int(len(x)/2)):\n temp = x[i]\n x[i] = x[len(x)-1-i]\n x[len(x)-1-i] = temp\n return x\nx = [1,3,5,7,8,9]\nprint(reverseList(x))\n\ndef isPalindrome(x):\n for i in range (int(len(x)/2)):\n temp = x[i]\n x[i] = x[len(x)-1-i]\n x[len(x)-1-i] = temp\n return x\nx = [1,3,5,7,8,9]\nprint(reverseList(x))"
},
{
"alpha_fraction": 0.5559701323509216,
"alphanum_fraction": 0.5559701323509216,
"avg_line_length": 31.625,
"blob_id": "b0a49dd5daec31427fba994f54909874ecf84018",
"content_id": "8b666dd9275887fe46a13d497839b44f7ebf4b7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 16,
"path": "/exam/apps/trip/urls.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index),\r\n url(r'^login/$', views.login),\r\n url(r'^register/$', views.register),\r\n url(r'^dashboard/$', views.dashboard),\r\n url(r'^logout/$', views.logout),\r\n url(r'^addtrip/$', views.addtrip),\r\n url(r'^submit/$', views.tripadded),\r\n url(r'^trip/(?P<id>\\d+)$', views.view),\r\n url(r'^delete/(?P<id>\\d+)$', views.delete),\r\n url(r'^join/(?P<id>\\d+)/$', views.join),\r\n url(r'^cancel/(?P<id>\\d+)/$', views.cancel),\r\n]"
},
{
"alpha_fraction": 0.5607985258102417,
"alphanum_fraction": 0.6206896305084229,
"avg_line_length": 24.045454025268555,
"blob_id": "7251c5fc82abc8e261fdd6d70a1c43d04fb24e42",
"content_id": "61001b9548642fe348284de0aa5a83339bd07284",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 22,
"path": "/exam3/apps/app1/migrations/0003_job_postedon.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10 on 2018-06-29 18:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app1', '0002_auto_20180629_1552'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='job',\n name='postedon',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5919854044914246,
"alphanum_fraction": 0.6111111044883728,
"avg_line_length": 38.21428680419922,
"blob_id": "098f20d0e5eacfbc3deb3a414e21e509130ffeb0",
"content_id": "1e041ec500fb60789f2ea979e23a92bfc9f7447e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1098,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 28,
"path": "/python_stack/django/py3/Restful-Users-master/apps/users/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\nimport re\n\nEMAIL_REGEX = re.compile('^[_a-z0-9-]+(.[_a-z0-9-]+)@[a-z0-9-]+(.[a-z0-9-]+)(.[a-z]{2,4})$')\n\nclass BlogManager(models.Manager):\n def basic_validator(self, postData):\n errors = {}\n if len(postData['first_name']) < 5:\n errors['first_name'] = 'First Name should be more than five characters.'\n if len(postData['last_name']) < 5:\n errors['last_name'] = 'Last Name should be more than five characters.'\n # if not EMAIL_REGEX.match(postData['email']):\n # errors['email'] = 'Email is not valid.'\n if not re.match(EMAIL_REGEX, postData['email']):\n errors['email'] = 'Email is not valid.'\n \n return errors\n\nclass User(models.Model):\n first_name = models.CharField(max_length = 255)\n last_name = models.CharField(max_length = 255)\n email = models.CharField(max_length = 255)\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n\n objects = BlogManager()\n"
},
{
"alpha_fraction": 0.6374884247779846,
"alphanum_fraction": 0.6374884247779846,
"avg_line_length": 33.752689361572266,
"blob_id": "fa159c4afa342625f131e1e201ebe94e0ce44217",
"content_id": "496ccfb90c56cbfe83e8e298d3f0cfd0b511a1c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3233,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 93,
"path": "/exam2/apps/app1/views.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nimport bcrypt\nfrom .models import *\n\ndef index(request):\n return render(request,'quotes/index.html')\n\ndef login(request):\n try:\n user = User.objects.get(email=request.POST['login_email'])\n if bcrypt.checkpw(request.POST['login_password'].encode(), user.password.encode()):\n request.session['id'] = user.id\n return redirect('/dashboard')\n except User.DoesNotExist:\n pass\n messages.error(request, 'Login unsuccessful. Plase check email and passowrd, and try again.', extra_tags='login')\n return redirect('/')\n\ndef register(request):\n errors = User.objects.validator(request.POST)\n pwHash = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value, extra_tags=key)\n return redirect('/')\n else:\n try:\n user = User.objects.create(first_name = request.POST['first_name'],last_name = request.POST['last_name'],email = request.POST['email'],password = pwHash)\n except IntegrityError:\n messages.error(request, 'this email already exists', extra_tags='email')\n return redirect('/')\n request.session['first_name'] = request.POST['first_name']\n request.session['id'] = user.id\n return redirect('/dashboard')\n\ndef logout(request) :\n request.session.flush()\n return redirect('/')\n\ndef dashboard(request):\n if \"id\" in request.session:\n content = {\n 'quotes':Quote.objects.all(),\n 'user' : User.objects.get(id = request.session['id'])\n }\n return render(request,'quotes/dashboard.html',content)\n else:\n return redirect('/')\n\ndef addquote(request):\n Quote.objects.create(\n author=request.POST['author'],\n message=request.POST['quote'],\n created_by = User.objects.get(id = request.session['id'])\n )\n return redirect('/dashboard')\n\ndef accountupdate(request, id):\n user = User.objects.get(id=id)\n return render(request, 'quotes/edit.html', {'user':user})\n\ndef submitupdate(request, id):\n errors = User.objects.editValidator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value, extra_tags=key)\n return redirect(f'/myaccount/{id}')\n else:\n user = User.objects.get(id=id)\n user.first_name = request.POST['first_name']\n user.last_name = request.POST['last_name']\n user.email = request.POST['email']\n user.save()\n return redirect('/dashboard')\n\ndef userquotes(request, id):\n if \"id\" in request.session:\n content = {\n 'quotes':User.objects.get(id=id).quote_added.all(), 'user':User.objects.get(id=id)\n }#or'quotes':Quote.objects.filter(created_by=id)\n return render(request, 'quotes/view.html',content)\n\ndef likedquotes(request, id):\n Like.objects.create(\n user_id=request.session['id'],\n quote_id=id\n )\n return render('/dashboard',content)\n\ndef delete(request,id):\n Quote.objects.get(id=id).delete()\n return redirect('/dashboard')\n\n"
},
{
"alpha_fraction": 0.7133758068084717,
"alphanum_fraction": 0.7133758068084717,
"avg_line_length": 30.399999618530273,
"blob_id": "e624c82a65415750b9dffbeabbf8ea611ca5fa1f",
"content_id": "29c524a15405d726c167aaaa1c1139cba7048b47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 5,
"path": "/python_stack/django/py3/likes_books/apps/like/views.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\ndef index(request):\n context = {\"users\": User.objects.all()}\n return render(request, \"books/index.html\", context)\n"
},
{
"alpha_fraction": 0.6516556143760681,
"alphanum_fraction": 0.6741721630096436,
"avg_line_length": 43.47058868408203,
"blob_id": "47ce0a74a03e3ba412dc0cfb9f0edaf172c6fc31",
"content_id": "c90e6447a54e3d96c5bac8fcba0538fdd8bed764",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 755,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 17,
"path": "/python_stack/django/py3/dojo/apps/dojo_ninjas/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Dojo(models.Model):\n name = models.CharField(max_length = 255)\n city = models.CharField(max_length = 255)\n state = models.CharField(max_length = 2)\n desc = models.TextField(max_length=255)\n def __repr__(self):\n return f'Dojo(id={self.id},name={self.name},city={self.city}, state={self.state})'\n\nclass Ninja(models.Model):\n first_name = models.CharField(max_length = 255)\n last_name = models.CharField(max_length = 255)\n state = models.CharField(max_length = 2)\n dojo_name = models.ForeignKey(Dojo, related_name=\"ninjas\")\n def __repr__(self):\n return f'Ninja(id={self.id},first_name={self.first_name},last_name={self.last_name},state={self.state}, dojo_name={self.dojo_name})'"
},
{
"alpha_fraction": 0.6081370711326599,
"alphanum_fraction": 0.6124197244644165,
"avg_line_length": 24.2702693939209,
"blob_id": "2cbafd4afa77e8625043523529cf9cd657168972",
"content_id": "6020372ecad369969469320cacd38aa24d5cd8f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 934,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 37,
"path": "/python_stack/flask_fundamentals/Great_Number_Game/server.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, redirect, session, flash\nimport random\napp = Flask(__name__)\napp.secret_key = 'ThisIsSecret'\n\ndef randomGenerate():\n session['num'] = random.randrange(0, 101)\n\[email protected]('/')\ndef index():\n try:\n session['num']\n except KeyError:\n randomGenerate()\n return render_template('index.html')\n\[email protected]('/guess', methods=['POST'])\ndef checkNum():\n guessed = request.form['num_guess']\n right = None\n wrong = None\n if int(guessed) > session['num']:\n flash('Number is too high!', 'toohigh')\n elif int(guessed) < session['num']:\n flash('Number is too low!', 'toolow')\n elif int(guessed) == session['num']:\n flash('Just right!', 'right')\n else:\n flash('try again!', 'wrong')\n return redirect('/')\n\[email protected]('/reset', methods=['POST'])\ndef reset():\n randomGenerate()\n return redirect('/')\n\napp.run(debug=True)"
},
{
"alpha_fraction": 0.5852156281471252,
"alphanum_fraction": 0.5872690081596375,
"avg_line_length": 23.149999618530273,
"blob_id": "9b9cd04eccd558539403dab8052ae5c8bb23045e",
"content_id": "0e547e98bcad1a97c726fa39007246359ef64a39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 487,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 20,
"path": "/python_stack/flask_fundamentals/playground/playground.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template \napp = Flask(__name__) \n\[email protected]('/')\ndef home():\n return \"Main Page\" \n\[email protected]('/play')\ndef play():\n return render_template(\"index.html\", num = int(3))\n\[email protected]('/play/<x>')\ndef play_x(x):\n return render_template(\"index.html\", num = int(x))\n\[email protected]('/play/<x>/<color>')\ndef play_x_color(x,color):\n return render_template(\"index.html\", num = int(x), color = color ) \n \napp.run(debug=True) \n"
},
{
"alpha_fraction": 0.5562499761581421,
"alphanum_fraction": 0.59375,
"avg_line_length": 12.333333015441895,
"blob_id": "f85798991c4b0f09eccedef2e28ed13d765b2b7d",
"content_id": "af6f707250e040f0737acfd1b036533199f840fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 12,
"path": "/python_stack/python_fundamentals/hello_world.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "x = \"Hello Python\"\nprint(x)\ny = 42\nprint(y)\n\n\nfor count in range(0,5):\n print(\"looping - \", count)\n\n\nfor count in range(0,5):\n print(\"looping - \", count)\n"
},
{
"alpha_fraction": 0.6796296238899231,
"alphanum_fraction": 0.6925926208496094,
"avg_line_length": 40.61538314819336,
"blob_id": "1941b373059e9243538df87b4fe33ef3615ad99f",
"content_id": "a1dade8c679c902d89afcb6982e2e14cc2cbaf4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 540,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 13,
"path": "/python_stack/django/py3/users/apps/user_login/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\n\n\nclass User (models.Model):\n first_name = models.CharField(max_length = 20)\n last_name = models.CharField(max_length = 20)\n email = models.CharField(max_length = 255)\n age = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n def __repr__(self):\n return f'User(id={self.id},first_name={self.first_name},last_name={self.last_name}, email={self.email}, age={self.age})'"
},
{
"alpha_fraction": 0.6274309158325195,
"alphanum_fraction": 0.6274309158325195,
"avg_line_length": 36.61538314819336,
"blob_id": "835f2a35b27c2c5360dc2066c45c35ca512d5df9",
"content_id": "94a363ea63c831e6d4d5c6c82984730d5eba48b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 977,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 26,
"path": "/python_stack/flask_fundamentals/hello_flask/understanding_routing.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\napp = Flask(__name__)\n\[email protected]('/')\ndef hello_world():\n return 'Hello World!' # Return the string 'Hello World!' as a response.\n\[email protected]('/dojo')\ndef dojo():\n return \"Dojo!\" # Return the string 'Dojo' as a response.\n\n\[email protected]('/say/<name>') # for a route '/hi/____' anything after '/hi/' gets passed as a variable 'name'\ndef hi(name):\n print(name)\n return \"Hi \" + name.capitalize()\n \[email protected]('/repeat/<num_of_repeats>/<string>') # for a route '/repeat/____/____', two parameters in the url get passed as num_of_repeats and string\ndef show_user_profile(num_of_repeats, string):\n print(int(num_of_repeats))\n print(string)\n return int(num_of_repeats) * (\" \" + string)\n\nif __name__==\"__main__\": # If __name__ is \"__main__\" we know we are running this file directly and not importing\n # it from a different module\n app.run(debug=True) # Run the app in debug mode."
},
{
"alpha_fraction": 0.6095718145370483,
"alphanum_fraction": 0.6460956931114197,
"avg_line_length": 34.31111145019531,
"blob_id": "cc61145f7e9bf32e839802405e70d63a89d90446",
"content_id": "301b93363c545a650899e9ea9922e84919ec68de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 259,
"num_lines": 45,
"path": "/python_stack/python_fundamentals/FunctionsBasic2.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "#1) Countdown - Create a function that accepts a number as an input. Return a new array that counts down by one, from the number (as arrays 'zero'th element) down to 0 (as the last element). For example countDown(5) should return [5,4,3,2,1,0].\ndef countDown(num):\n newarr = []\n for newarr in range (num, -1, -1):\n print(newarr)\ncountDown(5)\n\n#2) Print and Return - Your function will receive an array with two numbers. Print the first value, and return the second.\ndef par (arr):\n print(arr[0])\n return(arr[1])\npar ([2,3])\n\n#3) First Plus Length - Given an array, return the sum of the first value in the array, plus the array's length.\ndef fpl (arr):\n sum = arr[0]+len(arr)\n print(sum)\n return sum\nfpl([20,2,3,4,5])\n\n#4) Values Greater than Second - Write a function that accepts any array, and returns a new array with the array values that are greater than its 2nd value. Print how many values this is. If the array is only one element long, have the function return False\ndef greater(arr):\n second = arr[1]\n newarr = []\n count = 0\n if (len(arr) == 1 ):\n return False\n for i in range (0, (len(arr)), 1):\n if (arr[i]>second):\n count += 1\n newarr.append(arr[i])\n print (newarr)\n print (count)\ngreater([12,34,50,22,28,40])\n\n#5) This Length, That Value - Given two numbers, return array of length num1 with each value num2. Print \"Jinx!\" if they are same.\nnum1 = 3\nnum2 = 3\narr = []\nif (num1 == num2):\n print(\"Jinx!\")\nelse:\n for i in range (0,num1,1):\n arr.append(num2)\n print (arr)"
},
{
"alpha_fraction": 0.7649028897285461,
"alphanum_fraction": 0.7655726671218872,
"avg_line_length": 86.82353210449219,
"blob_id": "78714fb35a44ccee54b2cbf859d0cc8e931ab785",
"content_id": "7f1fd7d6a0fad6d29e5550efac2a6b7ffb9e819f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1493,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 17,
"path": "/python_stack/django/py3/Restful-Users-master/README.md",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "# Assignment: Semi-Restful Users\n\nCreate an app that can handle all of the CRUD operations (create, read, update and destroy) for a table. Ensure that you add validation rules before saving the records in the database.\n\nFollow the instructions in the wireframe below to build this application in Django.\n\n\n\nHave 7 routes. Because we are working with 'users', they might look like:\n\n* a GET request to /users - calls the index method to display all the users. This will need a template.\n* GET request to /users/new - calls the new method to display a form allowing users to create a new user. This will need a template.\n* GET request /users/<id>/edit - calls the edit method to display a form allowing users to edit an existing user with the given id. This will need a template.\n* GET /users/<id> - calls the show method to display the info for a particular user with given id. This will need a template.\n* POST to /users/create - calls the create method to insert a new user record into our database. This POST should be sent from the form on the page /users/new. Have this redirect to /users/<id> once created.\n* GET /users/<id>/destroy - calls the destroy method to remove a particular user with the given id. Have this redirect back to /users once deleted.\n* POST /users/update - calls the update method to process the submitted form sent from /users/<id>/edit. Have this redirect to /users/<id> once updated.\n"
},
{
"alpha_fraction": 0.7407878041267395,
"alphanum_fraction": 0.7407878041267395,
"avg_line_length": 42.66666793823242,
"blob_id": "93bf6ce8323bd0ba5c1e0a95f464ecaf8cab9946",
"content_id": "b20785b26e8b99eecda88e8bf6df61b55dc83e6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 787,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 18,
"path": "/python_stack/flask_MySQL/Leads_and_Clients/server.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, redirect\n# import the function connectToMySQL from the file mysqlconnection.py\nfrom mysqlconnection import connectToMySQL\napp = Flask(__name__)\n# invoke the connectToMySQL function and pass it the name of the database we're using\n# connectToMySQL returns an instance of MySQLConnection, which we will store in the variable 'mysql'\nmysql = connectToMySQL('leadsdb')\n# now, we may invoke the query_db method\nprint(\"all the users\", mysql.query_db(\"SELECT * FROM customers;\"))\n\[email protected]('/')\ndef index():\n all_customers = mysql.query_db(\"SELECT * FROM customers\")\n print(\"Fetched all customers\", all_customers)\n return render_template('index.html', customers = all_customers)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n"
},
{
"alpha_fraction": 0.5413271188735962,
"alphanum_fraction": 0.565774142742157,
"avg_line_length": 25.84375,
"blob_id": "cc1383d77fe359ae559b0b818f43f8200afb07cf",
"content_id": "b349bed7756cf01568875428ceab9313b67bf0c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 859,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 32,
"path": "/exam/apps/trip/migrations/0003_auto_20180626_1637.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10 on 2018-06-26 16:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('trip', '0002_trip_users'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='trip',\n old_name='plan',\n new_name='description',\n ),\n migrations.RenameField(\n model_name='trip',\n old_name='place',\n new_name='destination',\n ),\n migrations.AddField(\n model_name='trip',\n name='user',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='trip', to='trip.User'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5957821011543274,
"alphanum_fraction": 0.5957821011543274,
"avg_line_length": 34.625,
"blob_id": "32ad11bd79709ee70a1248fa28e8dea67ec03e6f",
"content_id": "ad5f714ff929693ddcea303be9ea005ca9b02762",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 569,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 16,
"path": "/exam2/apps/app1/urls.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^login/$', views.login),\n url(r'^register/$', views.register),\n url(r'^dashboard/$', views.dashboard),\n url(r'^logout/$', views.logout),\n url(r'^addquote/$', views.addquote),\n url(r'^myaccount/(?P<id>\\d+)$', views.accountupdate),\n url(r'^myaccount/(?P<id>\\d+)/update/$', views.submitupdate),\n url(r'^user/(?P<id>\\d+)$', views.userquotes),\n url(r'^like/(?P<id>\\d+)$', views.likedquotes),\n url(r'^delete/(?P<id>\\d+)$', views.delete),\n]"
},
{
"alpha_fraction": 0.5759162306785583,
"alphanum_fraction": 0.6073298454284668,
"avg_line_length": 28.875,
"blob_id": "c68d755840a339ed445079579452e03c3e703b19",
"content_id": "df3afb3617bf05929645e0e2c1e68f9225021c4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 32,
"path": "/python_stack/python_OOP/bike.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "class Bike:\n def __init__(self, price, max_speed):\n self.price = price\n self.max_speed = max_speed\n self.miles = 0\n \n def displayInfo(self):\n print (self.price, self.max_speed, self.miles)\n return self #only return self unnecessary since it is the last function that is run on all 3, but will crash if you have a function after because it would not have passed it back again.\n\n def ride(self):\n print ('Riding')\n self.miles += 10\n return self\n\n def reverse(self):\n print ('Reversing')\n if(self.miles>5):\n self.miles -= 5\n if(self.miles<=5 and self.miles>=0):\n self.miles = 0\n return self\n\nbike1 = Bike(200, \"25mph\")\nbike2 = Bike(300, \"35mph\")\nbike3 = Bike(100, \"15mph\")\n\nbike1.ride().ride().ride().ride().reverse().displayInfo()\n\nbike2.ride().ride().reverse().reverse().displayInfo()\n\nbike3.reverse().reverse().reverse().displayInfo()"
},
{
"alpha_fraction": 0.5103503465652466,
"alphanum_fraction": 0.5286624431610107,
"avg_line_length": 29.634145736694336,
"blob_id": "256982ac63029e21002a425b909b4e18179ba6d9",
"content_id": "6729ce02a15cbbb5d4303fb609632a82177bab5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1256,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 41,
"path": "/python_stack/python_OOP/product.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "class product:\n def __init__(self, price, itemname, weight, brand):\n self.price = price\n self.itemname = itemname\n self.weight = weight\n self.brand = brand\n self.status = \"for sale\"\n \n def sell(self):\n self.status = \"sold\"\n return self\n \n def add_tax(self, tax):\n self.price = self.price * tax\n return self\n\n def returnitem(self, reason_for_return):\n if (reason_for_return == \"defective\"):\n self.status = \"defective\"\n self.price = 0\n if (reason_for_return == \"like new\"):\n self.status = \"for sale\"\n if (reason_for_return == \"opened\"):\n self.status = \"used\"\n self.price *= .8\n return self\n \n def displayAll(self):\n print(\"Price: $\" + str(self.price))\n print(\"Weight: \" + str(self.weight))\n print(\"Brand: \" + str(self.brand))\n print(\"Status: \" + str(self.status))\n\np1 = product(5, \"apple\", \"1lb\", \"a\")\np2 = product(2, \"orange\", \"2 lb\", \"b\")\np3 = product(1, \"grapes\", \"1 lb\", \"c\")\np4 = product(2, \"lettuce\", \"3 lb\", \"d\")\np5 = product(3, \"brocolli\", \"2 lb\", \"e\")\np6 = product(8, \"soup\", \"1 lb\", \"a\")\n\np1.sell().add_tax(.15).returnitem(\"opened\").displayAll()\n"
},
{
"alpha_fraction": 0.634697437286377,
"alphanum_fraction": 0.634697437286377,
"avg_line_length": 33.344825744628906,
"blob_id": "cce5b1b27d0f4ce769edb299b82e287669e1745c",
"content_id": "c8b4d054bb4b39ea1747cef5b9e5a362456287ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3983,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 116,
"path": "/exam3/apps/app1/views.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom django.db import IntegrityError\nfrom django import forms\nimport bcrypt\nfrom .models import *\n\ndef index(request):\n return render(request,'jobs/index.html')\n\ndef dashboard(request):\n if \"id\" in request.session:\n content = {\n \"jobs\" : Job.objects.filter(jobsadded__id = request.session['id']), \n \"alljobs\" : Job.objects.exclude(jobsadded__id = request.session['id'])\n }\n return render(request,'jobs/dashboard.html', content)\n else:\n return redirect('/')\n\ndef login(request):\n try:\n user = User.objects.get(email=request.POST['login_email'])\n if bcrypt.checkpw(request.POST['login_password'].encode(), user.password.encode()):\n request.session['first_name'] = user.first_name\n request.session['id'] = user.id\n return redirect('/dashboard')\n except User.DoesNotExist:\n pass\n messages.error(request, 'Login unsuccessful. Plase check email and passowrd, and try again.', extra_tags='login')\n return redirect('/')\n\ndef register(request):\n errors = User.objects.validator(request.POST)\n pwHash = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value, extra_tags=key)\n return redirect('/')\n else:\n try:\n user = User.objects.create(first_name = request.POST['first_name'],last_name = request.POST['last_name'],email = request.POST['email'],password = pwHash)\n except IntegrityError:\n messages.error(request, 'this email already exists', extra_tags='email')\n return redirect('/')\n request.session['first_name'] = request.POST['first_name']\n request.session['id'] = user.id\n return redirect('/dashboard')\n\ndef logout(request) :\n request.session.flush()\n return redirect('/')\n\ndef addjob(request):\n if \"id\" in request.session:\n return render(request,'jobs/addjob.html')\n return redirect(\"/\")\n\ndef jobadd(request):\n errors = Job.objects.jobValidator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value, extra_tags=key)\n return redirect('/addjob')\n else:\n Job.objects.create(\n title=request.POST['title'],\n description=request.POST['description'],\n location=request.POST['location'],\n creator = User.objects.get(id = request.session['id'])\n )\n return redirect('/dashboard')\n\ndef edit(request, id):\n job = Job.objects.get(id=id)\n return render(request, 'jobs/edit.html', {'job':job})\n\ndef editsubmit(request, id):\n errors = Job.objects.jobValidator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value, extra_tags=key)\n return redirect(f'/edit/{id}')\n else:\n job = Job.objects.get(id=id)\n job.title = request.POST['title']\n job.description = request.POST['description']\n job.location = request.POST['location']\n job.save()\n return redirect('/dashboard')\n\ndef view(request,id):\n job = Job.objects.get(id=id)\n content = {\n \"job\":job,\n # \"joinedusers\":job.job_members.all()\n }\n #\"joinedusers\":User.objects.filter(joined_jobs=Job.objects.get(id=id))\n return render(request,'jobs/view.html', content)\n\ndef join(request,id):\n UID = User.objects.get(id=request.session['id'])\n TID = Job.objects.get(id=id)\n UID.joined_jobs.add(TID)\n return redirect('/dashboard')\n\ndef delete(request,id):\n TID = Job.objects.get(id=id)\n TID.delete()\n return redirect('/dashboard')\n\n# def cancel(request,id):\n# UID = User.objects.get(id=request.session['id'])\n# TID = Job.objects.get(id=id)\n# UID.joined_jobs.remove(TID)\n# return redirect('/dashboard')"
},
{
"alpha_fraction": 0.7684210538864136,
"alphanum_fraction": 0.7684210538864136,
"avg_line_length": 18,
"blob_id": "502da853bb1a79c48247df22de9c9166aee3b17b",
"content_id": "83f6847fedf86a35a65d37b5628a09cacd13814e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 5,
"path": "/shannon/apps/giftfinder/apps.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass GiftfinderConfig(AppConfig):\n name = 'giftfinder'\n"
},
{
"alpha_fraction": 0.6026058793067932,
"alphanum_fraction": 0.6156351566314697,
"avg_line_length": 24.58333396911621,
"blob_id": "0c17edcac7c7b2d7c3d4a04158c78ef8c71a416b",
"content_id": "c9f52219f685f57172cde85ef24507e18fe8a405",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 12,
"path": "/python_stack/flask_fundamentals/checkerboard/checkerboard.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template \napp = Flask(__name__) \n\[email protected]('/')\ndef basic8x8(x = 8,y = 8):\n return render_template(\"checkerboard.html\",x = x, y = y)\n\[email protected]('/<x>/<y>')\ndef size(x,y):\n return render_template(\"checkerboard.html\", x = int(x), y = int(y))\n\napp.run(debug=True)\n"
},
{
"alpha_fraction": 0.6288453936576843,
"alphanum_fraction": 0.6412305235862732,
"avg_line_length": 48.09803771972656,
"blob_id": "2d7defc149bc33b9aa47b43f209a04810fd872b0",
"content_id": "d76e2c14cb9205bab9bd7e92d6239063adfc0962",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2503,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 51,
"path": "/exam2/apps/app1/models.py",
"repo_name": "mnw247/Python",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\nfrom datetime import datetime\nimport re\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\nclass UserManager(models.Manager):\n def validator(self,request):\n errors = {}\n if len(request['first_name']) < 2 or not str.isalpha(request['first_name']):\n errors['first_name'] = 'first name must be at least 2 charachters long and contain no numbers'\n if len(request['last_name']) < 2 or not str.isalpha(request['last_name']):\n errors['last_name'] = 'last name must be at least 2 charachters long and contain no numbers'\n if len(request['email']) < 1 or not EMAIL_REGEX.match(request['email']):\n errors['email'] = 'must enter a vlaid email address'\n if len(request['password']) < 8:\n errors['password'] = 'pasword must be at least 8 charachters'\n if request['confirm_pw'] != request['password'] :\n errors['confirm_pw'] = 'your passwords do not match'\n return errors\n \n def editValidator(self,request):\n errors = {}\n user=User.objects.filter(email=request['email'])\n if len(request['first_name']) < 2 or not str.isalpha(request['first_name']):\n errors['first_name'] = 'first name must be at least 2 charachters long and contain no numbers'\n if len(request['last_name']) < 2 or not str.isalpha(request['last_name']):\n errors['last_name'] = 'last name must be at least 2 charachters long and contain no numbers'\n if len(request['email']) < 1 or not EMAIL_REGEX.match(request['email']):\n errors['email'] = 'must enter a valid email address'\n if request['email'] != User.objects.get(id=request['id']).email:\n if user:\n errors['email_taken'] = \"This email is already being used\"\n return errors\n\nclass User(models.Model):\n first_name = models.CharField(max_length = 255)\n last_name = models.CharField(max_length = 255)\n email = models.CharField(max_length = 255,unique=True)\n password = models.CharField(max_length = 255)\n objects = UserManager()\n\nclass Quote(models.Model):\n author = models.CharField(max_length = 255)\n message = models.TextField()\n created_by = models.ForeignKey(User, related_name=\"quote_added\")\n\nclass Like(models.Model):\n user = models.ForeignKey(User, related_name=\"liked_quote\")\n quote = models.ForeignKey(Quote, related_name=\"quote_liked\")"
}
] | 38 |
jabel415/socialSchedules
|
https://github.com/jabel415/socialSchedules
|
ef58885f30650d575c2e625d01f33f148671df39
|
d78d73b696e3e2db455d408008afa7d6f67ab959
|
3fbcba86133346b927e904b9aa396a0628c30bef
|
refs/heads/master
| 2021-03-27T19:33:48.947113 | 2017-04-17T17:18:15 | 2017-04-17T17:18:15 | 79,398,839 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.506649911403656,
"alphanum_fraction": 0.5638644695281982,
"avg_line_length": 31.145160675048828,
"blob_id": "a7faa3e816b8303039dbb78a7403463cf9eff9dc",
"content_id": "c780d318ba1cc542cb03898c75077d91fddce324",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3985,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 124,
"path": "/testing.py",
"repo_name": "jabel415/socialSchedules",
"src_encoding": "UTF-8",
"text": "import requests\nimport urllib\nfrom bs4 import BeautifulSoup\n\n\nr = requests.get('https://selfservice.uncc.edu/pls/BANPROD/bwckschd.p_disp_dyn_sched')\npayload1 = {'p_calling_proc': 'bwckschd.p_disp_dyn_sched',\n 'p_term': '201710'}\n\n'''term_in=201780&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy\n&sel_subj=ACCT&sel_crse=&sel_title=&sel_schd=%25&sel_insm=%25&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_levl=%25&sel_ptrm=%25&sel_attr=%25&begin_hh=0&begin_mi=0&\nbegin_ap=a&end_hh=0&end_mi=0&end_ap=a'''\n\nterm='201780'\nsubj = 'ACCT'\n\n'''data = {\n 'term_in': term,\n 'sel_subj': 'dummy',\n #'sel_day': 'dummy',\n 'sel_schd': 'dummy',\n #'sel_insm': 'dummy',\n #'sel_camp': 'dummy',\n 'sel_levl': 'dummy',\n 'sel_coll': 'dummy',\n 'sel_divs': 'dummy',\n 'sel_dept': 'dummy',\n #'sel_sess': 'dummy',\n #'sel_instr': 'dummy',\n #'sel_ptrm': 'dummy',\n 'sel_attr': 'dummy',\n 'sel_subj': subj,\n 'sel_crse_strt': '',\n 'sel_crse_end': '',\n 'sel_title': '',\n 'sel_schd': '%',\n 'sel_levl': '%',\n 'sel_coll': '%',\n 'sel_divs': '%',\n 'sel_dept': '%',\n #'sel_insm': '%',\n 'sel_from_cred': '',\n 'sel_to_cred': '',\n #'sel_camp': '%',\n #'sel_levl': '%',\n #'sel_ptrm': '%',\n 'sel_attr': '%',\n #'begin_hh': '0',\n #'begin_mi': '0',\n #'begin_ap': 'a',\n #'end_hh': '0',\n #'end_mi': '0',\n #'end_ap': 'a',\n 'call_proc_in': 'bwckctlg.p_disp_dyn_ctlg'\n }'''\n\ndata=[('term_in','201780'),\n('call_proc_in', 'bwckctlg.p_disp_dyn_ctlg'),\n('sel_subj','dummy'),\n('sel_levl','dummy'),\n('sel_schd','dummy'),\n('sel_coll','dummy'),\n('sel_divs','dummy'),\n('sel_dept','dummy'),\n('sel_attr','dummy'),\n('sel_subj', ''),#'ACCT'),\n('sel_crse_strt',''),\n('sel_crse_end',''),\n('sel_title',''),\n('sel_levl','%'),\n('sel_schd','%'),\n('sel_coll','%'),\n('sel_divs','%'),\n('sel_dept','%'),\n('sel_from_cred',''),\n('sel_to_cred',''),\n('sel_attr','%'),\n]\n\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',\n 'Host': 'selfservice.uncc.edu',\n 'Origin': 'https://selfservice.uncc.edu',\n 'Referer': 'https://selfservice.uncc.edu/pls/BANPROD/bwckgens.p_proc_term_date'\n}\n\ncookie = {'__cfduid': 'd8901f6820826fc9d17d8c020532f5c4e1480365574',\n '__unam': 'a49b7f5-158b9c9bab6-5214072d-8',\n '__utma': '71266260.157634453.1480360556.1488237453.1490754888.6',\n '__utmz': '71266260.1490754888.6.6.utmcsr=library.uncc.edu|utmccn=(referral)|utmcmd=referral|utmcct=/',\n 'accessibility': 'false',\n '_ga': 'GA1.2.157634453.1480360556'}\n\ndef call_server(url, method=\"get\", data=None, timeout=10):\n if isinstance(timeout, int): #checks to see whether timeout is an integer\n try:\n if method == \"get\":\n r = requests.get(url, timeout=int(timeout))\n else:\n if data != None:\n r = requests.post(url, timeout=int(timeout), data=data)\n else:\n return (\"Error\", \"You must post data.\")\n if r.status_code == 200:\n pageHTML = BeautifulSoup(r.text, 'html.parser')\n return pageHTML\n\n else:\n return (\"Error\", r.status_code, r.text)\n except requests.exceptions.Timeout:\n return (\"Error\", \"Timeout reached\")\n else:\n return (\"Error\", \"Timeout must be an integer\")\n\n#print call_server('https://selfservice.uncc.edu/pls/BANPROD/bwckctlg.p_display_courses', method='post', data=data, timeout=20)\n\na = ['asd', 'dawd', 'dawdawdad']\nb = 'asd'\nif b in a:\n print 'Match'"
},
{
"alpha_fraction": 0.518638551235199,
"alphanum_fraction": 0.5298131704330444,
"avg_line_length": 41.021507263183594,
"blob_id": "57b8d0f7e87106a94b79004184533bf90547fbad",
"content_id": "74e26decf62cd8a76e1adc101798a5205d8c96c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11723,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 279,
"path": "/data.py",
"repo_name": "jabel415/socialSchedules",
"src_encoding": "UTF-8",
"text": "import csv\nimport requests, re\nimport MySQLdb\nimport hashlib\nimport html5lib\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom bs4 import BeautifulSoup\n\n\n\nclass fetchData(object):\n\n ''' initialize headless browser and navigate to uncc selfservice schedule page '''\n def __init__(self):\n self.driver = webdriver.PhantomJS()\n self.driver.get('https://selfservice.uncc.edu/pls/BANPROD/bwckschd.p_disp_dyn_sched')\n\n def config_payloag(self, term='201780', subj='ACCT'):\n data = [\n ('term_in', term),\n ('call_proc_in', 'bwckctlg.p_disp_dyn_ctlg'),\n ('sel_subj', 'dummy'),\n ('sel_levl', 'dummy'),\n ('sel_schd', 'dummy'),\n ('sel_coll', 'dummy'),\n ('sel_divs', 'dummy'),\n ('sel_dept', 'dummy'),\n ('sel_attr', 'dummy'),\n ('sel_subj', subj),\n ('sel_crse_strt', ''),\n ('sel_crse_end', ''),\n ('sel_title', ''),\n ('sel_levl', '%'),\n ('sel_schd', '%'),\n ('sel_coll', '%'),\n ('sel_divs', '%'),\n ('sel_dept', '%'),\n ('sel_from_cred', ''),\n ('sel_to_cred', ''),\n ('sel_attr', '%'),\n ]\n return data\n\n def call_server(self, url, method=\"get\", data=None, timeout=10):\n if isinstance(timeout, int):\n try:\n if method == \"get\":\n r = requests.get(url, timeout=int(timeout))\n else:\n if data != None:\n r = requests.post(url, timeout=int(timeout), data=data)\n else:\n return (\"Error\", \"You must post data.\")\n if r.status_code == 200:\n return r.content\n else:\n return (\"Error\", r.status_code, r.text)\n except requests.exceptions.Timeout:\n return (\"Error\", \"Timeout reached\")\n else:\n return (\"Error\", \"Timeout must be an integer\")\n\n def get_search_page(self, term):\n self.dropDown = Select(self.driver.find_element_by_id('term_input_id'))\n self.dropDown.select_by_value(term)\n submit = self.driver.find_element_by_xpath('/html/body/div[3]/form/input[2]').click()\n parser = BeautifulSoup(self.driver.page_source, 'html5lib')\n return parser\n\n def get_courses_page(self, term, subj):\n page_html = self.call_server('https://selfservice.uncc.edu/pls/BANPROD/bwckctlg.p_display_courses', method='post', data=self.config_payloag(term=term, subj=subj), timeout=20)\n #parser = self.parse_html(page_html)\n parser = BeautifulSoup(page_html, 'html5lib')\n return parser\n\n def get_subjects(self, term, subj):\n subjects = {}\n subject_html = self.get_courses_page(term, subj)\n #subject_html = self.call_server('https://selfservice.uncc.edu/pls/BANPROD/bwckctlg.p_display_courses', method='post', data=self.config_payloag(term=term, subj=subj), timeout=20)\n #subject_html = BeautifulSoup(subject_html, 'html5lib')\n for option in subject_html.find(id=\"subj_id\").find_all('option'):\n subjects[str(option['value'])] = str(option.text)\n return subjects\n\n def get_courses(self, term, subj):\n courses = {}\n #subjects = self.get_subjects(term, subj)\n #for key, val in sorted(subjects.items()):\n courses_html = self.get_courses_page(term, subj)\n for course in courses_html.find_all('td', class_='nttitle'):\n course_num = course.text.split()[1]\n course_title = ' '.join(course.text.split()[3:])\n courses[course_num] = course_title\n return courses\n\n def get_sections(self, term, subj):\n def search(subj, course_num):\n subjSelect = Select(self.driver.find_element_by_xpath('//*[@id=\"subj_id\"]'))\n subjSelect.select_by_value(subj)\n self.driver.find_element_by_xpath('//*[@id=\"crse_id\"]').send_keys(course_num)\n self.driver.find_element_by_xpath('/html/body/div[3]/form/input[12]').click()\n available_subj = []\n sections = {}\n courses = {}\n search_page = self.get_search_page(term)\n for option in search_page.find(id=\"subj_id\").find_all('option'):\n available_subj.append(str(option['value']))\n subjects = self.get_subjects(term, subj)\n for key, val in (subjects.items()):\n not_available = False\n if key not in available_subj:\n not_available = True\n break\n if not_available:\n continue\n\n courses = self.get_courses(term, key)\n for k, v in courses.iteritems():\n search(key, k)\n parser = BeautifulSoup(self.driver.page_source, 'html5lib')\n\n no_results = False\n # Check if no results found page\n for head in parser.find_all('td', class_='pldefault'):\n header = head.text.encode('utf-8')\n if header == '\\nNo classes were found that meet your search criteria\\n\\n':\n self.driver.find_element_by_xpath('/html/body/div[3]/table[2]/tbody/tr/td/a').click()\n no_results = True\n break\n if no_results:\n continue\n\n for section in parser.find_all('th'):\n header = section.text.encode('utf-8').split( ' - ')\n print header\n #course_name = header[0]\n #crn = header[1]\n # TODO further pasing of sections if found in search\n\n ''' navigate through selfservice site to get course information for every subject for any passed in term (semester) '''\n def getCourses(self, term='201750'):\n subjects = {}\n courseData = []\n self.dropDown = Select(self.driver.find_element_by_id('term_input_id'))\n # Select semester\n self.dropDown.select_by_value(term)\n submit = self.driver.find_element_by_xpath('/html/body/div[3]/form/input[2]').click()\n pageHTML = BeautifulSoup(self.driver.page_source, 'html.parser')\n for option in pageHTML.find(id=\"subj_id\").find_all('option'):\n subjects[str(option['value'])] = str(option.text)\n for key, val in sorted(subjects.items()):\n if str.isalpha(key) and len(key) == 4:\n\n subjSelect = Select(self.driver.find_element_by_xpath('//*[@id=\"subj_id\"]'))\n subjSelect.select_by_value(key)\n self.driver.find_element_by_xpath('/html/body/div[3]/form/input[12]').click()\n source = self.driver.page_source\n pageHTML = BeautifulSoup(self.driver.page_source, 'html.parser')\n z=0\n a=0\n\n for course in pageHTML.find_all('th'):\n header = ((course.text))#.split(' - '))\n # #courseData.append([header])\n\n # for info in pageHTML.find_all('td', class_='dddefault'):\n # rows = info.find_all('td')\n # rows = [elem.text for elem in rows]\n # if len(rows) > 0:\n # rows = [x.encode('UTF-8') for x in rows]\n # courseData[z][a].append(rows)\n # z+=1\n # self.driver.find_element_by_link_text('Return to Previous').click()\n # return array of every course holding\n #return courseData\n ''' courseData[x] = [['Principles of ACCT I', '30011', 'ACCT 2121', '001'],['Class', '8:00 am - 9:30 am', 'MTWRF', 'Friday 116', 'May 22, 2017 - Jun 26, 2017', 'Lecture', 'Shirley Alyce Hunter (P)']] '''\n\n def csvOut(self):\n myfile = open('data.csv', 'wb')\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n for x in self.getCourses():\n wr.writerow(x)\n\n ''' get seat data per crn '''\n def getSeatsByCrn(self, crn='40005', term='201770'):\n seats = []\n r = requests.get('https://selfservice.uncc.edu/pls/BANPROD/bwckschd.p_disp_detail_sched?term_in=' + term + '&crn_in=' + crn)\n pageHTML = BeautifulSoup(r.text, 'html.parser')\n for row in pageHTML.find_all('td', class_='dddefault', text=re.compile(r'[0-9]{0,4}')):\n seats.append(row.text)\n return seats\n '''\n seats[0] - capacity\n seats[1] - actual\n seats[2] - remaining\n seats[3] - waitlist capacity\n seats[4] - waitlist actual\n seats[5] - waitlist remaining\n '''\n\n # TODO\n ''' get data for every professor from ratemyprofessor.com '''\n def getProfRating(self, prof):\n return\n\nclass dbConnect(object):\n\n def __init__(self):\n self.db = MySQLdb.connect(host=\"localhost\",\n user=\"root\",\n passwd=\"8sg0e$yF\",\n db=\"smart_schedules\")\n self.cursor = self.db.cursor()\n\n def is_user(self, email):\n val = self.cursor.execute(\"SELECT COUNT(*) FROM users WHERE email=%s\", (email))\n return bool(val)\n\n def create_user(self, f_name, l_name, student_id, email, password, is_ninernet, major=\"\", concentration=\"\", yr_num=1, picture=\"\", notes=\"\"):\n if not self.is_user(email):\n cpassword = \"\"\n pass_md5 = hashlib.md5(password).hexdigest()\n self.cursor.execute(\"INSERT INTO users(firstName, lastName, studentID, email, password, cpassword, major, class_year, concentration, picture_url, is_ninernet, notes)\" \\\n \"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\", (f_name, l_name, student_id, email, pass_md5, cpassword, major, yr_num, concentration, picture, is_ninernet, notes))\n return True\n else:\n return False\n\n def get_user_by_email(self, email):\n if self.is_user(email):\n user_info = self.cursor.execute(\"SELECT * FROM users WHERE email=%s\", (email))\n return user_info\n else:\n return False\n\n #def authenticate(self, email, password):\n # if self.is_user(email):\n\n\n '''\n def dbConnect(self):\n control = connection.cursor()\n courseData = self.getCourses()\n for x in range(0, len(courseData)):\n course = {\n 'crn': courseData[x][1],\n 'course_name': courseData[x][0],\n 'subject': courseData[x][2], # Strip crn from string\n 'course_num': courseData[x][2], # String crn from string\n 'credits': '', # TODO get credit data\n 'course_title': courseData[x][2],\n 'start_date': courseData[x][0],\n 'end_date': courseData[x][0],\n 'start_time': '',\n 'end_time': '',\n 'days': '',\n 'instructor_name': '',\n 'location': '',\n 'year': '',\n 'semester': '',\n 'seat_capacity': '',\n 'seat_accounted': '',\n 'seats_remaining': '',\n 'instructors_id': ''\n }\n return control\n '''\n\n\n\n\n\ntest = fetchData()\ntest.get_sections(term='201780', subj='')\n#test.call_server('https://selfservice.uncc.edu/pls/BANPROD/bwckctlg.p_display_courses', method='post', data=test.config_payloag(term='201780', subj=''), timeout=20)\n\n#test.getSeatsByCrn()"
}
] | 2 |
ARG-NCTU/Yolact-pytorch
|
https://github.com/ARG-NCTU/Yolact-pytorch
|
9b4b6cd16d3630e301222fac2a5062b5765f7f2e
|
6a1b62e625ed33cc1bc067ab57890bfd8bd87d97
|
e894681e50ef7f815c681b10cbd8de01a7b86c81
|
refs/heads/master
| 2023-01-29T05:49:30.458555 | 2020-12-08T09:41:15 | 2020-12-08T09:41:15 | 283,992,538 | 0 | 1 | null | 2020-07-31T09:12:32 | 2020-07-21T06:51:21 | 2020-07-21T06:51:19 | null |
[
{
"alpha_fraction": 0.699386477470398,
"alphanum_fraction": 0.7300613522529602,
"avg_line_length": 29.5625,
"blob_id": "134341a77fb898484c617857adabddbf94416386",
"content_id": "85d363dde8f0356984c503ca512f69e6d17ec523",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 489,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 16,
"path": "/Download_weights.py",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport logging\nimport gdown\nfrom zipfile import ZipFile\n\nmodels_url = 'https://drive.google.com/a/g2.nctu.edu.tw/uc?id=1Z3Vx-0BNJ4M6nb4ie40n76Tp5UGZRcgl&export=download'\nmodels_name = 'weights'\nif not os.path.isdir(models_name):\n gdown.download(models_url, output=models_name + '.zip', quiet=False)\n zip1 = ZipFile(models_name + '.zip')\n zip1.extractall(models_name)\n zip1.close()\n os.remove(models_name + \".zip\")\n\nprint(\"Finished downloading models.\") "
},
{
"alpha_fraction": 0.7457627058029175,
"alphanum_fraction": 0.7683615684509277,
"avg_line_length": 15.090909004211426,
"blob_id": "e3efed850785b59fa3a3d1ffa8c37357b0ec2dcd",
"content_id": "0b0340aca2727e80adeb65739085c9ca7214269b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 11,
"path": "/catkin_ws/src/fcn_pix2pix_prediction/README.md",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "# fcn_pix2pix_prediction on husky\n\n## Models path\n\n./src/weights\n\nDefault models: fcn_pix2pix.pkl\n\n## How to run\n\nroslaunch fcn_pix2pix_prediction fcn_pix2pix_prediction.launch\n"
},
{
"alpha_fraction": 0.7122448682785034,
"alphanum_fraction": 0.7285714149475098,
"avg_line_length": 29.625,
"blob_id": "9698f1f3725965e9f80c90f89ba53a77ddbf5e38",
"content_id": "a8e30fd7b3c6262b363a65bdf187d369bec358ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 16,
"path": "/Download_results.py",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport logging\nimport gdown\nfrom zipfile import ZipFile\n\nresult_url = 'https://drive.google.com/a/g2.nctu.edu.tw/uc?id=1c3ocyO-gDmv-OstM0XbBQdczWGWCANu3&export=download'\nresult_name = 'results'\nif not os.path.isdir(result_name):\n gdown.download(result_url, output=result_name + '.zip', quiet=False)\n zip1 = ZipFile(result_name + '.zip')\n zip1.extractall(result_name)\n zip1.close()\n os.remove(result_name + \".zip\")\n\nprint(\"Finished downloading results.\") "
},
{
"alpha_fraction": 0.7290502786636353,
"alphanum_fraction": 0.7709497213363647,
"avg_line_length": 24.571428298950195,
"blob_id": "1d6970ba8b2310a8aa404b299f730dc0200c4e6a",
"content_id": "6ba55e647008f0976c850c879d06e52a9356bc08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 14,
"path": "/rosbag/Download_rosbag.py",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport logging\nimport gdown\nfrom zipfile import ZipFile\n\nrosbag_name = 'xavier-predict.bag'\nrosbag_url = 'https://drive.google.com/u/2/uc?id=10qqGV1O-AhdgfW7CEX4As9L63f046439&export=download'\n\nif not os.path.isfile(rosbag_name):\n \n gdown.download(rosbag_url, output=rosbag_name, quiet=False)\n\nprint(\"Finished downloading rosbag.\") "
},
{
"alpha_fraction": 0.5811188220977783,
"alphanum_fraction": 0.6347844004631042,
"avg_line_length": 30.399351119995117,
"blob_id": "d794c883dd875d2434e460713017788736fea11e",
"content_id": "45975d4809f128201b9959d39f62fae561fec6b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9671,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 308,
"path": "/catkin_ws/src/fcn_pix2pix_prediction/src/fcn_pix2pix_predict.py",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport numpy as np\nimport cv2\nimport roslib\nimport rospy\nimport tf\nimport struct\nimport math\nimport time\nimport os\nimport rospkg\nimport math\nimport time\nimport sys\nimport PIL\nimport pandas as pd\nimport scipy.misc\nimport random\nfrom sensor_msgs import point_cloud2\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import CameraInfo, CompressedImage, PointCloud2, PointField\nfrom geometry_msgs.msg import PoseArray, PoseStamped, Point\nfrom nav_msgs.msg import Path\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom std_msgs.msg import Header\nimport message_filters\nfrom datetime import datetime\n\nfrom torchvision import transforms, utils, datasets\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom torchvision import models\nfrom torchvision.models.vgg import VGG\nfrom sklearn.metrics import confusion_matrix\nfrom subt_msgs.msg import *\n\n\nclass FCN16s(nn.Module):\n\n\tdef __init__(self, pretrained_net, n_class):\n\t\tsuper(FCN16s, self).__init__()\n\t\tself.n_class = n_class\n\t\tself.pretrained_net = pretrained_net\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\tself.deconv1 = nn.ConvTranspose2d(\n\t\t\t512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n\t\tself.bn1 = nn.BatchNorm2d(512)\n\t\tself.deconv2 = nn.ConvTranspose2d(\n\t\t\t512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n\t\tself.bn2 = nn.BatchNorm2d(256)\n\t\tself.deconv3 = nn.ConvTranspose2d(\n\t\t\t256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n\t\tself.bn3 = nn.BatchNorm2d(128)\n\t\tself.deconv4 = nn.ConvTranspose2d(\n\t\t\t128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n\t\tself.bn4 = nn.BatchNorm2d(64)\n\t\tself.deconv5 = nn.ConvTranspose2d(\n\t\t\t64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n\t\tself.bn5 = nn.BatchNorm2d(32)\n\t\tself.classifier = nn.Conv2d(32, n_class, kernel_size=1)\n\n\tdef forward(self, x):\n\t\toutput = self.pretrained_net(x)\n\t\tx5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)\n\t\tx4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)\n\n\t\t# size=(N, 512, x.H/16, x.W/16)\n\t\tscore = self.relu(self.deconv1(x5))\n\t\t# element-wise add, size=(N, 512, x.H/16, x.W/16)\n\t\tscore = self.bn1(score + x4)\n\t\t# size=(N, 256, x.H/8, x.W/8)\n\t\tscore = self.bn2(self.relu(self.deconv2(score)))\n\t\t# size=(N, 128, x.H/4, x.W/4)\n\t\tscore = self.bn3(self.relu(self.deconv3(score)))\n\t\t# size=(N, 64, x.H/2, x.W/2)\n\t\tscore = self.bn4(self.relu(self.deconv4(score)))\n\t\tscore = self.bn5(self.relu(self.deconv5(score))\n\t\t\t\t\t\t ) # size=(N, 32, x.H, x.W)\n\t\t# size=(N, n_class, x.H/1, x.W/1)\n\t\tscore = self.classifier(score)\n\n\t\treturn score\n\n\nclass VGGNet(VGG):\n\tdef __init__(self, cfg, pretrained=False, model='vgg16', requires_grad=True, remove_fc=True, show_params=False):\n\t\tsuper(VGGNet, self).__init__(self.make_layers(cfg[model]))\n\t\tranges = {\n\t\t\t'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)),\n\t\t\t'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)),\n\t\t\t'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)),\n\t\t\t'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37))\n\t\t}\n\t\tself.ranges = ranges[model]\n\n\t\tif pretrained:\n\t\t\texec(\"self.load_state_dict(models.%s(pretrained=True).state_dict())\" % model)\n\n\t\tif not requires_grad:\n\t\t\tfor param in super().parameters():\n\t\t\t\tparam.requires_grad = False\n\n\t\tif remove_fc: # delete redundant fully-connected layer params, can save memory\n\t\t\tdel self.classifier\n\n\t\tif show_params:\n\t\t\tfor name, param in self.named_parameters():\n\t\t\t\tprint(name, param.size())\n\n\tdef forward(self, x):\n\t\toutput = {}\n\n\t\t# get the output of each maxpooling layer (5 maxpool in VGG net)\n\t\tfor idx in range(len(self.ranges)):\n\t\t\tfor layer in range(self.ranges[idx][0], self.ranges[idx][1]):\n\t\t\t\tx = self.features[layer](x)\n\t\t\toutput[\"x%d\" % (idx+1)] = x\n\t\treturn output\n\n\tdef make_layers(self, cfg, batch_norm=False):\n\t\tlayers = []\n\t\tin_channels = 3\n\t\tfor v in cfg:\n\t\t\tif v == 'M':\n\t\t\t\tlayers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n\t\t\telse:\n\t\t\t\tconv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n\t\t\t\tif batch_norm:\n\t\t\t\t\tlayers += [conv2d,\n\t\t\t\t\t\t\t nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n\t\t\t\telse:\n\t\t\t\t\tlayers += [conv2d, nn.ReLU(inplace=True)]\n\t\t\t\tin_channels = v\n\t\treturn nn.Sequential(*layers)\n\n\nclass FCN_Pix2Pix_PREDICT():\n\tdef __init__(self):\n\t\tself.camera_type = rospy.get_param(\"~camera\")\n\t\trospy.loginfo(\"Camera_type: %s\", self.camera_type)\n\t\tself.veh_type = rospy.get_param(\"~veh\")\n\t\trospy.loginfo(\"Veh_type: %s\", self.veh_type)\n\t\n\t\t\n\t\tself.bridge = CvBridge()\n\t\tr = rospkg.RosPack()\n\t\tself.path = r.get_path('fcn_pix2pix_prediction')\n\t\tself.cfg = {\n\t\t\t'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n\t\t\t'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n\t\t\t'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n\t\t\t'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n\t\t}\n\t\tself.means = np.array([103.939, 116.779, 123.68]) / \\\n\t\t\t255. # mean of three channels in the order of BGR\n\t\tself.h, self.w = 480, 640\n\t\t# self.resize_count = 0\n\t\tself.n_class = 5\n\t\tmodel_name = \"fcn_pix2pix.pkl\"\n\t\tself.vgg_model = VGGNet(self.cfg, requires_grad=True, remove_fc=True)\n\t\tself.fcn_model = FCN16s(\n\t\t\tpretrained_net=self.vgg_model, n_class=self.n_class)\n\n\t\tuse_gpu = torch.cuda.is_available()\n\t\tnum_gpu = list(range(torch.cuda.device_count()))\n\t\trospy.loginfo(\"Cuda available: %s\", use_gpu)\n\n\t\tif use_gpu:\n\t\t\tts = time.time()\n\t\t\tself.vgg_model = self.vgg_model.cuda()\n\t\t\tself.fcn_model = self.fcn_model.cuda()\n\t\t\tself.fcn_model = nn.DataParallel(\n\t\t\t\tself.fcn_model, device_ids=num_gpu)\n\t\t\tprint(\"Finish cuda loading, time elapsed {}\".format(time.time() - ts))\n\t\tstate_dict = torch.load(os.path.join(\n\t\t\tself.path, \"weights/\", model_name))\n\t\tself.fcn_model.load_state_dict(state_dict)\n\n\t\tself.mask1 = np.zeros((self.h, self.w))\n\t\t#self.brand = ['', 'extinguisher', 'backpack', 'drill', 'survivor']\n\t\tself.brand = ['', 'backpack', 'survivor', 'vent', 'phone']\n\t\trospy.loginfo(\"Node ready!\")\n\n\t\t# =======================message_filters=================================\n\t\tself.depth_sub = message_filters.Subscriber(\n\t\t\t\"aligned_depth_to_color/image_raw\", Image)\n\t\tself.image_sub = message_filters.Subscriber(\"color/image_raw\", Image)\n\n\t\tself.ts = message_filters.ApproximateTimeSynchronizer(\n\t\t\t[self.image_sub, self.depth_sub], 5, 5)\n\t\tself.ts.registerCallback(self.img_cb)\n\t\t# =======================message_filters=================================\n\n\n\t\tself.rgb_pub = rospy.Publisher(\"predict_img/\", Image, queue_size=1)\n\t\t#self.image_pub = rospy.Publisher(\"/predict_mask\", Image, queue_size=1)\n\t\tself.msg_pub = rospy.Publisher(\n\t\t\t\"mask_to_point/\", arti_input, queue_size=1)\n\t\tself.masks_pub = rospy.Publisher(\n\t\t\t\"masks/\", masks, queue_size=1)\n\n\t\trospy.loginfo(\"Start Predicting image\")\n\t\tself.depth_data = None\n\t\tself.rgb_data = None\n\n\n\tdef img_cb(self, rgb_data, depth_data):\n\n\t\tself.rgb_data = rgb_data\n\t\tself.depth_data = depth_data\n\n\t\tif self.depth_data is not None and self.rgb_data is not None:\n\t\t\tcv_image = self.bridge.imgmsg_to_cv2(self.rgb_data, \"bgr8\")\n\n\t\t\tgenerate_img, predict_img, cX, cY, obj_list = self.predict(\n\t\t\t\tcv_image)\n\n\t\t\tmsg = arti_input()\n\t\t\tmsg.image = self.rgb_data\n\t\t\tmsg.depth = self.depth_data\n\t\t\tif(cX == 0 or cY == 0):\n\t\t\t\tgenerate_img[generate_img > 0] = 0\n\t\t\tmsg.mask = self.bridge.cv2_to_imgmsg(generate_img, \"8UC1\")\n\t\t\tself.msg_pub.publish(msg)\n\t\t\t\n\t\t\tif len(obj_list):\n\t\t\t\tmask_out = masks()\n\t\t\t\tfor objs in obj_list:\n\t\t\t\t\tmask_center_point = mask_center()\n\t\t\t\t\tmask_center_point.Class = self.brand[objs[2]]\n\t\t\t\t\tmask_center_point.x = objs[0]\n\t\t\t\t\tmask_center_point.y = objs[1]\n\t\t\t\t\tmask_out.masks.append(mask_center_point)\n\n\t\t\t\tmask_out.header = self.rgb_data.header\n\t\t\t\tmask_out.depth = self.depth_data\n\t\t\t\tmask_out.count = len(obj_list)\n\t\t\t\tmask_out.camera = self.camera_type\n\t\t\t\tself.masks_pub.publish(mask_out)\n\n\t\t\t\t#print(\"Artifact: {}\".format(self.brand[objs[2]]))\n\n\t\t\tself.rgb_pub.publish(\n\t\t\t\tself.bridge.cv2_to_imgmsg(predict_img, \"bgr8\"))\n\t\t\t\n\t\t\tself.depth_data = None\n\t\t\tself.rgb_data = None\n\n\n\tdef predict(self, img):\n\t\trgb_predict = img\n\t\timg = img[:, :, ::-1] # switch to BGR\n\n\t\timg = np.transpose(img, (2, 0, 1)) / 255.\n\t\timg[0] -= self.means[0]\n\t\timg[1] -= self.means[1]\n\t\timg[2] -= self.means[2]\n\n\t\t# convert to tensor\n\t\timg = img[np.newaxis, :]\n\t\timg = torch.from_numpy(img.copy()).float()\n\n\t\toutput = self.fcn_model(img)\n\t\toutput = output.data.cpu().numpy()\n\n\t\tN, _, h, w = output.shape\n\t\tmask = output.transpose(0, 2, 3, 1)\n\t\tmask = mask.reshape(-1, self.n_class).argmax(axis=1)\n\t\tmask = mask.reshape(N, h, w)[0]\n\t\tmask = np.asarray(mask, np.uint8)\n\n\t\t# =======================Filter=================================\n\t\tcnts, _ = cv2.findContours(\n\t\t\tmask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n\n\t\tobj_list = []\n\t\tcX = 0\n\t\tcY = 0\n\n\t\tfor c in cnts:\n\t\t\tM = cv2.moments(c)\n\t\t\tif M[\"m00\"] == 0:\n\t\t\t\tbreak\n\t\t\tcX = int(M[\"m10\"] / M[\"m00\"])\n\t\t\tcY = int(M[\"m01\"] / M[\"m00\"])\n\t\t\tarea = cv2.contourArea(c)\n\n\t\t\tif area > 4700:# ====Modify====\n\t\t\t\tprint(area)\n\t\t\t\tcv2.circle(rgb_predict, (cX, cY), 10, (1, 227, 254), -1)\n\t\t\t\tclass_name = mask[cY][cX]\n\t\t\t\tcv2.putText(rgb_predict, self.brand[mask[cY][cX]], (\n\t\t\t\t\tcX-50, cY-40), cv2.FONT_HERSHEY_SIMPLEX, 1.1, (252, 197, 5), 3)\n\t\t\t\tobj_list.append([cX, cY, class_name])\n\n\t\treturn mask, rgb_predict, cX, cY, obj_list\n\n\tdef onShutdown(self):\n\t\trospy.loginfo(\"Shutdown.\")\t\t\n\n\nif __name__ == '__main__':\n\trospy.init_node('FCN_Pix2Pix_PREDICT')\n\tfoo = FCN_Pix2Pix_PREDICT()\n\trospy.on_shutdown(foo.onShutdown)\n\trospy.spin()\n"
},
{
"alpha_fraction": 0.725301206111908,
"alphanum_fraction": 0.7493975758552551,
"avg_line_length": 26.66666603088379,
"blob_id": "1305ce99b32f47d80f5cdca3494914db3ac27133",
"content_id": "f80416866932f9ea67b29b1b6b3628226d45d158",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 15,
"path": "/Download_data.py",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport logging\nimport gdown\nfrom zipfile import ZipFile\n\ndata_url = 'https://drive.google.com/a/g2.nctu.edu.tw/uc?id=1gADkkutnJ3Qn3Rx_6br9pkX8yn-JJsDi&export=download'\ndata_name = 'data'\ngdown.download(data_url, output=data_name + '.zip', quiet=False)\nzip1 = ZipFile(data_name + '.zip')\nzip1.extractall(data_name)\nzip1.close()\nos.remove(data_name + \".zip\")\n\nprint(\"Finished downloading data.\") "
},
{
"alpha_fraction": 0.7412717938423157,
"alphanum_fraction": 0.7793017625808716,
"avg_line_length": 40.153846740722656,
"blob_id": "ee322a440ba11039b4f578333ec6b21f21085d19",
"content_id": "acf4e72006d2ad299a2aa863d10b6e6c820f5195",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1604,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 39,
"path": "/how_to_solve_python3_cv2.sh",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# `python-catkin-tools` is needed for catkin tool\n# `python3-dev` and `python3-catkin-pkg-modules` is needed to build cv_bridge\n# `python3-numpy` and `python3-yaml` is cv_bridge dependencies\n# `ros-kinetic-cv-bridge` is needed to install a lot of cv_bridge deps. Probaply you already have it installed.\n\nsudo apt-get install python-catkin-tools python3-dev python3-catkin-pkg-modules python3-numpy python3-yaml ros-melodic-cv-bridge\n# Create catkin workspace\nmkdir catkin_workspace\ncd catkin_workspace\ncatkin init\n# Instruct catkin to set cmake variables\ncatkin config -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so\n# Instruct catkin to install built packages into install place. It is $CATKIN_WORKSPACE/install folder\ncatkin config --install\n# Clone cv_bridge src\ngit clone https://github.com/ros-perception/vision_opencv.git src/vision_opencv\n# Find version of cv_bridge in your repository\napt-cache show ros-melodic-cv-bridge | grep Version\n## Version: 1.13.0-0xenial-20180416-143935-0800\n# Checkout right version in git repo. In our case it is 1.12.8\ncd src/vision_opencv/\ngit checkout 1.13.0\ncd ../../\n# Build\ncatkin build cv_bridge\n# Extend environment with new package\nsource install/setup.bash --extend\n\n\n## Finally you need to remove your cv-bridge in python2 or try to modify python path from python2.7 to python3.x\n\n# sudo apt remove ros-melodic-cv-bridge\n\ncd ..\n\n\n### Reference: https://stackoverflow.com/questions/49221565/unable-to-use-cv-bridge-with-ros-kinetic-and-python3"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7647058963775635,
"avg_line_length": 16,
"blob_id": "df205bd2c8913c0cec079aa258fbe555aa9a8a78",
"content_id": "df1bab419f44603a311c0c0967f8c039bf88a7a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 17,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 1,
"path": "/rosbag/README.md",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "Put rosbag here.\n"
},
{
"alpha_fraction": 0.7095267176628113,
"alphanum_fraction": 0.7168393135070801,
"avg_line_length": 28.656625747680664,
"blob_id": "8c52c61ccb97fe9da44a71a6e125fd31ba80d459",
"content_id": "68ded621e26ca416ce0f8afcc6cec0accadb1a82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4923,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 166,
"path": "/catkin_ws/src/yolact_prediction/src/yolact_predict_analytics.py",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nimport sys\nimport numpy as np\nimport cv2\nimport roslib\nimport rospy\n# import tf\n\nimport struct\nimport time\nimport os\nimport rospkg\nimport math\nimport argparse\n\nimport PIL\nimport pandas as pd\nimport scipy.misc\nimport random\nfrom sensor_msgs import point_cloud2\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import CameraInfo, CompressedImage, PointCloud2, PointField\nfrom geometry_msgs.msg import PoseArray, PoseStamped, Point\nfrom nav_msgs.msg import Path\nfrom cv_bridge import CvBridge, CvBridgeError\n# from std_msgs.msg import Header\nimport message_filters\nfrom datetime import datetime\n\nfrom torchvision import transforms, utils, datasets\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom torchvision import models\nfrom torchvision.models.vgg import VGG\n# from sklearn.metrics import confusion_matrix\n# from subt_msgs.msg import *\n\nimport torch.backends.cudnn as cudnn\nfrom modules.build_yolact import Yolact\nfrom utils.augmentations import FastBaseTransform\nfrom utils.functions import MovingAverage, ProgressBar\nfrom utils import timer\nfrom data.config import update_config, COLORS, cfg\nfrom utils.output_utils import NMS, after_nms, draw_img\n\nimport pyximport\npyximport.install(setup_args={\"include_dirs\": np.get_include()}, reload_support=True)\n\n\n\nclass yolact_prediction(object):\n\tdef __init__(self):\n\n\t\tparser = argparse.ArgumentParser(description='YOLACT Predict in ROS')\n\t\tparser.add_argument('--visual_top_k', default=100, type=int, help='Further restrict the number of predictions to parse')\n\t\tparser.add_argument('--traditional_nms', default=False, action='store_true', help='Whether to use traditional nms.')\n\t\tparser.add_argument('--hide_mask', default=False, action='store_true', help='Whether to display masks')\n\t\tparser.add_argument('--hide_bbox', default=True, action='store_true', help='Whether to display bboxes')\n\t\tparser.add_argument('--hide_score', default=True, action='store_true', help='Whether to display scores')\n\t\tparser.add_argument('--show_lincomb', default=False, action='store_true',\n\t\t\t\t\t\t\thelp='Whether to show the generating process of masks.')\n\t\tparser.add_argument('--no_crop', default=False, action='store_true',\n\t\t\t\t\t\t\thelp='Do not crop output masks with the predicted bounding box.')\n\t\tparser.add_argument('--real_time', default=True, action='store_true', help='Show the detection results real-timely.')\n\t\tparser.add_argument('--visual_thre', default=0.3, type=float,\n\t\t\t\t\t\t\thelp='Detections with a score under this threshold will be removed.')\n\t\tself.args = parser.parse_args()\n\n\t\tr = rospkg.RosPack()\n\t\tself.bridge = CvBridge()\n\n\t\tself.path = r.get_path('yolact_prediction')\n\t\tmodel_name = \"/src/weights/best_89.48_res101_custom_610000.pth\"\n\t\tstrs = model_name.split('_')\n\t\tconfig = strs[-3] + \"_\" + strs[-2] + \"_config\"\n\t\tupdate_config(config)\n\t\tprint(\"Using \" + config + \" according to the trained_model.\")\n\n\t\twith torch.no_grad():\n\n\t\t\tself.cuda = torch.cuda.is_available()\n\t\t\tif self.cuda:\n\t\t\t\tcudnn.benchmark = True\n\t\t\t\tcudnn.fastest = True\n\t\t\t\ttorch.set_default_tensor_type('torch.cuda.FloatTensor')\n\t\t\telse:\n\t\t\t\ttorch.set_default_tensor_type('torch.FloatTensor')\n\n\t\t\tself.net = Yolact()\n\t\t\tself.net.load_weights(self.path + model_name, self.cuda)\n\t\t\tprint('Model loaded.')\n\n\t\t\tif self.cuda:\n\t\t\t\tself.net = self.net.cuda()\n\n\t\t\tself.time_here = 0\n\t\t\tself.frame_times = MovingAverage()\n\n\t\t\t#### Publisher\n\t\t\tself.rgb_pub = rospy.Publisher(\"Yolact_predict_img/\", Image, queue_size=1)\n\n\t\t\timage_sub = rospy.Subscriber(\"/camera/color/image_raw\", Image, self.img_cb, queue_size = 1)\n\n\t\t\tprint (\"============ Ready ============\")\n\n\tdef img_cb(self, rgb_data):\n\t\t\n\t\tself.rgb_data = rgb_data \n\t\t\n\t\tif self.rgb_data is not None:\n\t\t\tcv_image = self.bridge.imgmsg_to_cv2(self.rgb_data, \"bgr8\")\n\n\t\t\tpredict_img = self.predict(cv_image)\n\n\n\t\t\tself.rgb_pub.publish(\n\t\t\t\tself.bridge.cv2_to_imgmsg(predict_img, \"bgr8\"))\n\n\t\t\n\n\t\t\tself.rgb_data = None\n\n\n\tdef predict(self, img):\n\n\t\trgb_origin = img\n\t\timg_numpy = img\n\n\t\timg = torch.from_numpy(img.copy()).float()\n\t\timg = img.cuda()\n\n\t\timg_h, img_w = img.shape[0], img.shape[1]\n\t\timg_trans = FastBaseTransform()(img.unsqueeze(0))\n\n\t\tnet_outs = self.net(img_trans)\n\t\tnms_outs = NMS(net_outs, 0)\n\n\t\tresults = after_nms(nms_outs, img_h, img_w, crop_masks=not self.args.no_crop, visual_thre=self.args.visual_thre)\n\t\ttorch.cuda.synchronize()\n\n\n\t\ttemp = self.time_here\n\t\tself.time_here = time.time()\n\n\t\n\t\tself.frame_times.add(self.time_here - temp)\n\t\tfps = 1 / self.frame_times.get_avg()\n\n\t\tframe_numpy = draw_img(results, img, self.args, class_color=True, fps=fps)\n\t\t\n\t\treturn frame_numpy\n\n\n\tdef onShutdown(self):\n\t\trospy.loginfo(\"Shutdown.\")\n\t\ttorch.cuda.empty_cache()\n\n\n\nif __name__ == '__main__':\n\trospy.init_node('yolact_prediction', anonymous=False)\n\tyolact_prediction = yolact_prediction()\n\trospy.on_shutdown(yolact_prediction.onShutdown)\n\trospy.spin()\n"
},
{
"alpha_fraction": 0.7407938838005066,
"alphanum_fraction": 0.7565758228302002,
"avg_line_length": 46.522727966308594,
"blob_id": "d2c8295ad8f5f6d954a4538d3f7982905e3a88e2",
"content_id": "3eedf8b09e73fcbf27de36ad28cba6ac7e9ba5ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2091,
"license_type": "no_license",
"max_line_length": 261,
"num_lines": 44,
"path": "/catkin_ws/README.md",
"repo_name": "ARG-NCTU/Yolact-pytorch",
"src_encoding": "UTF-8",
"text": "# yolact_ros\n\nROS wrapper for Yolact.\n\n## Installation\n\nYolact uses Python 3. If you use a ROS version built with Python 2, additional steps are necessary to run the node.\n\n- Set up a Python 3 environment.\n- Install the packages required by Yolact. See the Readme on https://github.com/dbolya/yolact for details.\n- Additionally, install the packages rospkg and empy in the environment.\n- You need to build the cv_bridge module of ROS with Python 3. I recommend using a workspace separate from other ROS packages. Clone the package to the workspace. You might need to adjust some of the following instructions depending on your Python installation.\n ```Shell\n git clone -b melodic https://github.com/ros-perception/vision_opencv.git\n ```\n- First method catkin_make, \n \n - If you use PC catkin_make, compile with\n ```Shell\n catkin_make -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so\n ```\n - If you use Xavier catkin_make, compile with\n ```Shell\n catkin_make -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/aarch64-linux-gnu/libpython3.6m.so\n ```\n- Second method for script,\n ```Shell\n cd ..\n source how_to_solve_python3_cv2.sh\n ```\n- Third method for catkin tools, use\n ```Shell\n catkin config -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so\n catkin build\n ```\n- add the following lines to the postactivate script of your environment (Change the paths according to your workspace path, virtual environment and Python installation):\n ```Shell\n source $HOME/catkin_ws/devel/setup.bash\n export OLD_PYTHONPATH=\"$PYTHONPATH\"\n export PYTHONPATH=\"$HOME/.virtualenvs/yolact/lib/python3.6/site-packages:$PYTHONPATH\"\n ```\n- add the following lines to the postdeactivate script of your environment:\n ```Shell\n export PYTHONPATH=\"$OLD_PYTHONPATH\"\n"
}
] | 10 |
guilhermereis1/QRCodepy
|
https://github.com/guilhermereis1/QRCodepy
|
22b0a40aa21bc32bb129378e20ff0989043c3d04
|
0961cc36102842fb68b51d3ae2abe881020375da
|
ade12751eb2a626f7c2f7de3a0708c2ef513e39a
|
refs/heads/main
| 2023-03-10T21:24:42.015595 | 2021-03-02T15:58:50 | 2021-03-02T15:58:50 | 343,829,666 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7443609237670898,
"avg_line_length": 18,
"blob_id": "079965e08b9dbb1c9ff71f8fbcef6eb8d3c6c0e2",
"content_id": "79fa75666f745cdfad55aecc29d54665a9216ddf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 7,
"path": "/qr.py",
"repo_name": "guilhermereis1/QRCodepy",
"src_encoding": "UTF-8",
"text": "import pyqrcode\nimport png\nfrom pyqrcode import QRCode\n\ns = \"guilhermereis\"\nurl = pyqrcode.create(s)\nurl.png('teste.png', scale = 5)\n"
}
] | 1 |
Yzhanjiang/djtypeidea
|
https://github.com/Yzhanjiang/djtypeidea
|
85b0578c311d6c3f00ef4677b109eb176cff03d0
|
cce29745b0d15f1b243fd977a41fe6caee940d24
|
2a2854b4e7904ceb8cee07f0f88a3b76ec5544e3
|
refs/heads/master
| 2021-04-30T10:13:08.363976 | 2018-06-10T10:20:31 | 2018-06-10T10:20:31 | 121,322,233 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6241299510002136,
"alphanum_fraction": 0.624709963798523,
"avg_line_length": 25.507692337036133,
"blob_id": "87e4e7de58414ba2f91a0cf90d396d694a5ea9bc",
"content_id": "0ab9498514bbf8b7b1dbb2f572777dadd4139af1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1724,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 65,
"path": "/comment/views.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import render,redirect\nfrom django.views.generic import TemplateView\nfrom .forms import CommentForm\nfrom django.conf import settings\nfrom models import Comment\n\n\nclass CommentShowMinix(object):\n def get_comments(self):\n target = self.request.path\n comments = Comment.objects.filter(target=target)\n\n return comments\n def get_context_data(self,**kwargs):\n\n kwargs.update({\n 'comment_from':CommentForm(),\n 'comment_list':self.get_comments(),\n })\n\n return super(CommentShowMinix,self).get_context_data(**kwargs)\n\n\n\nclass CommentView(TemplateView):\n http_method_names = ['POST']\n template_name = settings.THEME + \"/comment/result.html\"\n\n def get(self, request, *args, **kwargs):\n return super(CommentView,self).get(request,*args,**kwargs)\n\n\n def get_comments(self):\n target = self.request.path\n comments = Comment.objects.filter(target=target)\n\n return comments\n\n\n def post(self,request,*args,**kwargs):\n comment_form = CommentForm(request.POST)\n target = request.POST.get('target')\n print(request.POST)\n if comment_form.is_valid():\n instance = comment_form.save(commit=False)\n instance.target = target\n instance.save()\n\n succeed = True\n return redirect(target)\n else:\n succeed = False\n context = {\n 'succeed':succeed,\n 'form':comment_form,\n 'target':target,\n\n }\n return self.render_to_response(context)\n\n"
},
{
"alpha_fraction": 0.658269464969635,
"alphanum_fraction": 0.6637458801269531,
"avg_line_length": 37.04166793823242,
"blob_id": "33a2b3449d02d36af8430c8ab4236d04733b233f",
"content_id": "f9ccbb21e912039adb1c7c71109b0cd74652a7c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1826,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 48,
"path": "/djtypeidea/urls.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "#coding:utf8\n\"\"\"djtypeidea URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .custom_site import custom_site\nfrom comment.views import CommentView\n\n\nfrom blog.views import post_list,post_detail,index\nfrom blog.views import IndexView,CategoryView,TagView,PostView,AuthorView\nfrom config.views import links,LinkView\n# from config.views import links\n\nurlpatterns = [\n #class view\n url(r'^$',IndexView.as_view(),name=\"index\"),\n url(r'^tag/(?P<tag_id>\\d+)/$',TagView.as_view(),name=\"tag\"),\n url(r'^category/(?P<category_id>\\d+)/', CategoryView.as_view(), name=\"category\"),\n url(r'^post/(?P<pk>\\d+)/$', PostView.as_view(), name=\"detail\"),\n url(r'^author/(?P<author_id>\\d+)/$', AuthorView.as_view(), name=\"author\"),\n\n #func\n # url(r'^$',post_list,name=\"index\"),\n # url(r'^category/(?P<category_id>\\d+)/',post_list,name=\"category\"),\n # url(r'^tag/(?P<tag_id>\\d+)/$',post_list,name=\"tag\"),\n # url(r'^post/(?P<pk>\\d+)/$',post_detail,name=\"detail\"),\n\n\n url(r'links/$',LinkView.as_view(),name=\"links\"),\n url(r'comment/$',CommentView.as_view(),name=\"comment\"),\n url(r'^admin/', admin.site.urls),\n url(r'^cus_admin/', custom_site.urls),\n]\n"
},
{
"alpha_fraction": 0.6285529732704163,
"alphanum_fraction": 0.6363049149513245,
"avg_line_length": 30.917526245117188,
"blob_id": "00b53afec65b7b9dc73037f8130edc7240544294",
"content_id": "f9abed13d610f160962242e3c809bd08467e7610",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3292,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 97,
"path": "/blog/models.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db import models\nimport markdown\n\nfrom django.db.models import F\n\n# Create your models here.\n\nclass Post(models.Model):\n STATUS_ITEMS = (\n (1,'上线'),\n (2,'草稿'),\n (3,'删除'),\n )\n title = models.CharField(max_length=50,verbose_name=\"标题\")\n desc = models.CharField(max_length=255,blank=True,verbose_name=\"摘要\")\n category = models.ForeignKey('Category',verbose_name=\"分类\")\n tags = models.ManyToManyField('Tag',related_name=\"posts\",verbose_name=\"标签\")\n\n content = models.TextField(verbose_name=\"内容\",help_text=\"注:目前仅支持Markdown格式数据\")\n html = models.TextField(verbose_name=\"渲染后的内容\",default=\"\",help_text=\"注:目前仅支持Markdown格式数据\")\n is_markdown = models.BooleanField(verbose_name=\"使用markdown格式\",default=True)\n\n status = models.IntegerField(default=1,choices=STATUS_ITEMS,verbose_name=\"状态\")\n owner = models.ForeignKey(User,verbose_name=\"作者\")\n\n pv = models.PositiveIntegerField(default=0,verbose_name=\"pv\")\n uv = models.PositiveIntegerField(default=0,verbose_name=\"uv\")\n create_time = models.DateTimeField(auto_now_add=True,verbose_name=\"创建时间\")\n\n def __str__(self):\n return self.title\n\n def __unicode__(self):\n return self.title\n\n def status_show(self):\n return '当前状态:%s' % self.status\n\n def increase_pv(self):\n return self.model.objects.filter(id=self.id).update(pv=F('pv') + 1)\n\n def increase_uv(self):\n return self.model.objects.filter(id=self.id).update(pv=F('uv') + 1)\n\n def save(self,*args,**kwargs):\n if self.is_markdown:\n self.html = markdown.markdown(self.content)\n return super(Post,self).save(*args,**kwargs)\n\n class Meta:\n verbose_name = verbose_name_plural = '文章'\n\n\n\nclass Category(models.Model):\n STATUS_ITEMS = (\n (1,\"可用\"),\n (2,\"删除\")\n )\n name = models.CharField(max_length=50,verbose_name=\"名称\")\n status = models.PositiveIntegerField(default=1,choices=STATUS_ITEMS,verbose_name=\"状态\")\n is_nav = models.BooleanField(default=False,verbose_name=\"是否为导航\")\n\n owner = models.ForeignKey(User,verbose_name=\"作者\")\n created_time = models.DateTimeField(auto_now_add=True,verbose_name=\"创建时间\")\n\n def __str__(self):\n return self.name\n\n def __unicode__(self):\n return self.name\n class Meta:\n verbose_name = verbose_name_plural = \"分类\"\n \n\nclass Tag(models.Model):\n STATUS_ITEMS = (\n (1,'正常'),\n (2,'删除'),\n )\n name = models.CharField(max_length=10,verbose_name=\"名称\")\n status = models.PositiveIntegerField(default=1,choices=STATUS_ITEMS,verbose_name=\"状态\")\n owner = models.ForeignKey(User,verbose_name=\"作者\")\n create_time = models.DateTimeField(auto_now_add=True,verbose_name=\"创建时间\")\n\n def __str__(self):\n return self.name\n\n def __unicode__(self):\n return self.name\n class Meta:\n verbose_name = verbose_name_plural = \"标签\"\n"
},
{
"alpha_fraction": 0.5907716751098633,
"alphanum_fraction": 0.6016826629638672,
"avg_line_length": 22.849529266357422,
"blob_id": "95bfa19469005292616c51c0d1cde199e20b7dbc",
"content_id": "12e800de7dfed21833ca91b4eea06726f8aba98e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7623,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 319,
"path": "/blog/views.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\n\nfrom .models import Post,Tag,Category\nfrom config.models import SideBar\nfrom comment.models import Comment\nfrom django.http import Http404\nfrom django.core.paginator import Paginator,EmptyPage\n\nfrom django.views.generic import ListView,DetailView\nfrom django.conf import settings\nfrom comment.models import Comment\nfrom comment.forms import CommentForm\nfrom comment.views import CommentShowMinix\n\nfrom django.db.models import F\n\n####################class view\nclass CommonMixin(object):\n def get_category_context(self):\n # context = super(CommonMixin, self).get_context_data()\n categories = Category.objects.filter(status=1)\n nav_cates = []\n cates = []\n for cate in categories:\n if cate.is_nav:\n nav_cates.append(cate)\n else:\n cates.append(cate)\n\n print(nav_cates)\n print(\"555555555555555555555555\")\n return {\n 'nav_cates': nav_cates,\n 'cates': cates,\n }\n\n def get_side_bars(self):\n # side_bars = SideBar.objects.filter(status=1)\n return SideBar.objects.filter(status=1)\n\n\n def get_context_data(self,**kwargs):\n\n # side_bars = SideBar.objects.filter(status=1)\n recently_posts = Post.objects.filter(status=1)[:10]\n # hot_posts = Post.objects.filter(status=1).order_by('views')[:10]\n recently_comments = Comment.objects.filter(status=1)[:10]\n\n kwargs.update({\n # 'side_bars': side_bars,\n 'recently_posts': recently_posts,\n 'recently_comments': recently_comments,\n })\n # context.update(extra_context)\n # return context\n return super(CommonMixin,self).get_context_data(**kwargs)\n\n\n\n\nclass BasePostsView(CommonMixin,ListView):\n model = Post\n template_name = settings.THEME + '/blog/list.html'\n context_object_name = 'posts'\n paginate_by = 3\n allow_empty = True\n\nclass IndexView(BasePostsView):\n def get_queryset(self):\n query = self.request.GET.get('query')\n print(query)\n qs = super(IndexView, self).get_queryset()\n if query:\n qs = qs.filter(title__icontains=query) #select * from blog_post where title ilike \"%query%\"\n return qs\n\n def get_context_data(self, **kwargs):\n query = self.request.GET.get('query')\n return super(IndexView,self).get_context_data(query=query)\n\nclass CategoryView(BasePostsView):\n def get_queryset(self):\n qs = super(CategoryView,self).get_queryset()\n cate_id = self.kwargs.get('category_id')\n qs = qs.filter(category_id = cate_id)\n return qs\n\n\nclass AuthorView(BasePostsView):\n def get_queryset(self):\n author_id = self.kwargs.get('author_id')\n qs = super(AuthorView,self).get_queryset()\n if author_id:\n qs = qs.filter(owner_id=author_id)\n return qs\n\n\n\nclass TagView(BasePostsView):\n def get_queryset(self):\n tag_id = self.kwargs.get('tag_id')\n try:\n tag = Tag.objects.get(id=tag_id)\n except Tag.DoesNotExist:\n return []\n\n posts = tag.posts.all()\n return posts\n\nclass PostView(CommonMixin,DetailView,CommentShowMinix):\n model = Post\n template_name = settings.THEME + '/blog/detail.html'\n context_object_name = 'post'\n\n def get_comments(self):\n target = self.request.path\n comments = Comment.objects.filter(target=target)\n\n return comments\n def get(self,request,*args,**kwargs):\n\n response = super(PostView,self).get(request,*args,**kwargs)\n self.pv_uv()\n return response\n\n def pv_uv(self):\n # self.object.pv +=1\n # self.object.uv +=1\n # self.object.save()\n\n self.object.increase_pv()\n self.object.increase_uv()\n\n\n def get_context_data(self,**kwargs):\n\n kwargs.update({\n 'comment_from':CommentForm(),\n 'comment_list':self.get_comments(),\n })\n\n return super(PostView,self).get_context_data(**kwargs)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##################################func\ndef get_common_context():\n categories = Category.objects.filter(status=1)\n nav_cates = []\n cates = []\n for cate in categories:\n if cate.is_nav:\n nav_cates.append(cate)\n else:\n cates.append(cate)\n\n side_bars = SideBar.objects.filter(status=1)\n\n recently_posts = Post.objects.filter(status=1)[:10]\n # hot_posts = Post.objects.filter(status=1).order_by('views')[:10]\n recently_comments = Comment.objects.filter(status=1)[:10]\n\n context = {\n 'nav_cates': nav_cates,\n 'cates': cates,\n 'side_bars': side_bars,\n 'recently_posts': recently_posts,\n 'recently_comments': recently_comments,\n }\n return context\n\n\ndef index(request):\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef post_list(request,category_id=None,tag_id=None):\n queryset = Post.objects.all()\n\n page = request.GET.get('page',1)\n page_size = 1\n try:\n page = int(page)\n except TypeError:\n page = 1\n if category_id :\n #分类页面\n queryset = queryset.filter(category_id=category_id)\n elif tag_id:\n #标签页面\n try:\n tag = Tag.objects.get(id=tag_id)\n except Tag.DoesNotExist:\n queryset = []\n else:\n queryset = tag.posts.all()\n\n else:\n queryset = Post.objects.all()\n paginator = Paginator(queryset, page_size)\n try:\n posts = paginator.page(page)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n # categories = Category.objects.filter(status=1)\n # nav_cates = []\n # cates = []\n # for cate in categories:\n # if cate.is_nav:\n # nav_cates.append(cate)\n # else:\n # cates.append(cate)\n #\n # side_bars = SideBar.objects.filter(status = 1)\n #\n # recently_posts = Post.objects.filter(status=1)[:10]\n # # hot_posts = Post.objects.filter(status=1).order_by('views')[:10]\n # recently_comments = Comment.objects.filter(status=1)[:10]\n\n context = {\n 'posts': posts,\n # 'nav_cates':nav_cates,\n # 'cates':cates,\n # 'side_bars':side_bars,\n # 'recently_posts':recently_posts,\n # 'recently_comments':recently_comments,\n }\n common_context = get_common_context()\n context.update(common_context)\n\n return render(request,'blog/list.html',context=context)\n\n\ndef post_detail(request,pk=None):\n try:\n post = Post.objects.get(pk=pk)\n except Post.DoesNotExist:\n raise Http404(\"post does not exist\")\n\n categories = Category.objects.filter(status=1)\n nav_cates = []\n cates = []\n for cate in categories:\n if cate.is_nav:\n nav_cates.append(cate)\n else:\n cates.append(cate)\n\n side_bars = SideBar.objects.filter(status=1)\n\n recently_posts = Post.objects.filter(status=1)[:10]\n # hot_posts = Post.objects.filter(status=1).order_by('views')[:10]\n recently_comments = Comment.objects.filter(status=1)[:10]\n\n context = {\n 'post': post,\n # 'nav_cates': nav_cates,\n # 'cates': cates,\n # 'side_bars': side_bars,\n # 'recently_posts': recently_posts,\n # 'recently_comments': recently_comments,\n }\n common_context = get_common_context()\n context.update(common_context)\n\n\n return render(request,'blog/detail.html',context=context)"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.5632961988449097,
"avg_line_length": 23.617647171020508,
"blob_id": "77de963ca8fd15255f0b6e3745cfeb6c2fa0df25",
"content_id": "2b1efb8755631055ce3368b4b1c0a700d156e593",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2564,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 102,
"path": "/blog/admin.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\n# Register your models here.\n\nfrom .models import Post,Category,Tag\nfrom djtypeidea.custom_site import custom_site\nfrom .adminforms import PostAdminForm\nfrom djtypeidea.custom_admin import BaseOwnerAdmin\n\n\[email protected](Post, site=custom_site)\nclass PostAdmin(BaseOwnerAdmin):\n form = PostAdminForm\n\n list_display = [\n 'title','category','status_show','content','owner',\n 'create_time','operator','pv','uv'\n ]\n list_filter = ['title','owner']\n\n fields = (\n ('category', 'title'),\n 'content',\n 'status',\n 'tags',\n )\n search_fields = ['title','category__name']\n save_on_top = True\n\n\n # list_display_links = ['category','status']\n\n # show_full_result_count = False\n # date_hierarchy = 'create_time'\n # list_editable = ('title',)\n\n fields = (\n ('category','title'),\n 'desc',\n 'status',\n # 'is_markdown',\n ('content','is_markdown'),\n # 'html',\n 'tags',\n )\n # exclude = 'owner'\n # fieldsets = (# 跟fields互斥\n # ('基础配置',{\n # 'fields':(('category','title'),'content','status')\n # }),\n # ('高级配置',{\n # 'classes':('collapse','addon'),\n # 'fields':('tags',),\n # }),\n # )\n # filter_horizontal = ('tags',)\n\n\n def operator(self, obj):\n return format_html(\n '<a href=\"{}\">编辑</a>',\n # '/cus_admin/blog/post/%s/' %obj.id\n reverse('cus_admin:blog_post_change', args=(obj.id,))\n )\n operator.allow_tags = True\n operator.short_description = '操作'\n\n\n# class PostInlineAdmin(admin.TabularInline): # StackedInline 样式不同\n# fields = ('title', 'desc','owner')\n# extra = 3 # 控制额外多几个\n# model = Post\n\[email protected](Category, site=custom_site)\nclass CategoryAdmin(BaseOwnerAdmin):\n list_display = ('name', 'status', 'is_nav', 'created_time')\n fields = (\n 'name', 'status',\n 'is_nav',\n )\n\n\n # inlines = [\n # PostInlineAdmin,\n # ]\n\n\n\[email protected](Tag, site=custom_site)\nclass TagAdmin(BaseOwnerAdmin):\n list_display = ('name', 'status', 'create_time')\n fields = (\n 'name', 'status'\n )\n\n# admin.site.register(Post,PostAdmin)\n# admin.site.register(Category,CategoryAdmin)\n# admin.site.register(Tag,TagAdmin)\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.751655638217926,
"avg_line_length": 32.38888931274414,
"blob_id": "a7ccd73329452941107ff119d6451d781c05ec32",
"content_id": "f5a5a5ae2be0a302d5b11cb0db4d2d901a0c6a2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 18,
"path": "/comment/admin.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\n\nfrom .models import Comment\nfrom djtypeidea.custom_site import custom_site\nfrom djtypeidea.custom_admin import BaseOwnerAdmin\n\n# Register your models here.\n\[email protected](Comment, site=custom_site)\nclass CommentAdmin(BaseOwnerAdmin):\n list_display = ( 'content', 'nickname','email')\n # list_display = ('target', 'content', 'nickname','website','email','status','created_time')\n\nadmin.site.register(Comment,CommentAdmin)\n\n\n\n"
},
{
"alpha_fraction": 0.7040572762489319,
"alphanum_fraction": 0.70525062084198,
"avg_line_length": 30,
"blob_id": "917c41c3226a9cd4d032e90bd69c2d176baf16e9",
"content_id": "e1bca23cc875136bc7e372f8ad7b2f4e0ec45ab3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 27,
"path": "/config/admin.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\n\nfrom .models import Link,SideBar\nfrom djtypeidea.custom_site import custom_site\nfrom djtypeidea.custom_admin import BaseOwnerAdmin\n\n# Register your models here.\n\[email protected](Link, site=custom_site)\nclass LinkAdmin(BaseOwnerAdmin):\n list_display = ('title', 'href', 'status','weight','owner','created_time')\n # fields = (\n # 'name', 'status'\n # )\nadmin.site.register(Link,LinkAdmin)\n\[email protected](SideBar, site=custom_site)\nclass SideBarAdmin(BaseOwnerAdmin):\n list_display = ('title', 'display_type', 'status','content','owner','created_time')\n # fields = (\n # 'name', 'status'\n # )\nadmin.site.register(SideBar,SideBarAdmin)\n\n"
},
{
"alpha_fraction": 0.6912751793861389,
"alphanum_fraction": 0.6941514611244202,
"avg_line_length": 26.473684310913086,
"blob_id": "6ffdcd7c151ce590bd722f7a8b5e122b52e0e3c6",
"content_id": "bf5f4f874cea2adc63d6f88553d4465c07a419bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1043,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 38,
"path": "/config/views.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom .models import Link\nfrom django.views.generic import ListView\nfrom blog.views import CommonMixin\nfrom django.conf import settings\nfrom comment.forms import CommentForm\nfrom comment.models import Comment\n# Create your views here.\nfrom comment.views import CommentShowMinix\n\n\nclass LinkView(CommonMixin,ListView,CommentShowMinix):\n queryset = Link.objects.filter(status=1)\n model = Link\n template_name = settings.THEME + '/config/links.html'\n context_object_name = 'links'\n # paginate_by = 3\n # allow_empty = True\n\n\n def get_comments(self):\n target = self.request.path\n comments = Comment.objects.filter(target=target)\n\n return comments\n def get_context_data(self,**kwargs):\n kwargs.update({\n 'comment_from':CommentForm(),\n 'comment_list':self.get_comments(),\n })\n return super(ListView,self).get_context_data(**kwargs)\n\n\ndef links(request):\n pass"
},
{
"alpha_fraction": 0.595514714717865,
"alphanum_fraction": 0.6071332097053528,
"avg_line_length": 24.013513565063477,
"blob_id": "8cdd7cf11c2495cea7fe51d2ebfd9e2c93606f43",
"content_id": "a32301d25a6b347a2b024808397f96dd6da8404c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3717,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 148,
"path": "/blog/views-func.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\n\nfrom config.models import Post,Tag,Category\nfrom config.models import SideBar\nfrom comment.models import Comment\n\nfrom django.http import Http404\n\nfrom django.core.paginator import Paginator,EmptyPage\n\ndef get_common_context():\n categories = Category.objects.filter(status=1)\n nav_cates = []\n cates = []\n for cate in categories:\n if cate.is_nav:\n nav_cates.append(cate)\n else:\n cates.append(cate)\n\n side_bars = SideBar.objects.filter(status=1)\n\n recently_posts = Post.objects.filter(status=1)[:10]\n # hot_posts = Post.objects.filter(status=1).order_by('views')[:10]\n recently_comments = Comment.objects.filter(status=1)[:10]\n\n context = {\n 'nav_cates': nav_cates,\n 'cates': cates,\n 'side_bars': side_bars,\n 'recently_posts': recently_posts,\n 'recently_comments': recently_comments,\n }\n return context\n\n\ndef index(request):\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef post_list(request,category_id=None,tag_id=None):\n queryset = Post.objects.all()\n\n page = request.GET.get('page',1)\n page_size = 1\n try:\n page = int(page)\n except TypeError:\n page = 1\n if category_id :\n #分类页面\n queryset = queryset.filter(category_id=category_id)\n elif tag_id:\n #标签页面\n try:\n tag = Tag.objects.get(id=tag_id)\n except Tag.DoesNotExist:\n queryset = []\n else:\n queryset = tag.posts.all()\n\n else:\n queryset = Post.objects.all()\n paginator = Paginator(queryset, page_size)\n try:\n posts = paginator.page(page)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n # categories = Category.objects.filter(status=1)\n # nav_cates = []\n # cates = []\n # for cate in categories:\n # if cate.is_nav:\n # nav_cates.append(cate)\n # else:\n # cates.append(cate)\n #\n # side_bars = SideBar.objects.filter(status = 1)\n #\n # recently_posts = Post.objects.filter(status=1)[:10]\n # # hot_posts = Post.objects.filter(status=1).order_by('views')[:10]\n # recently_comments = Comment.objects.filter(status=1)[:10]\n\n context = {\n 'posts': posts,\n # 'nav_cates':nav_cates,\n # 'cates':cates,\n # 'side_bars':side_bars,\n # 'recently_posts':recently_posts,\n # 'recently_comments':recently_comments,\n }\n common_context = get_common_context()\n context.update(common_context)\n\n return render(request,'blog/list.html',context=context)\n\n\ndef post_detail(request,pk=None):\n try:\n post = Post.objects.get(pk=pk)\n except Post.DoesNotExist:\n raise Http404(\"post does not exist\")\n\n categories = Category.objects.filter(status=1)\n nav_cates = []\n cates = []\n for cate in categories:\n if cate.is_nav:\n nav_cates.append(cate)\n else:\n cates.append(cate)\n\n side_bars = SideBar.objects.filter(status=1)\n\n recently_posts = Post.objects.filter(status=1)[:10]\n # hot_posts = Post.objects.filter(status=1).order_by('views')[:10]\n recently_comments = Comment.objects.filter(status=1)[:10]\n\n context = {\n 'post': post,\n # 'nav_cates': nav_cates,\n # 'cates': cates,\n # 'side_bars': side_bars,\n # 'recently_posts': recently_posts,\n # 'recently_comments': recently_comments,\n }\n common_context = get_common_context()\n context.update(common_context)\n\n\n return render(request,'blog/detail.html',context=context)"
},
{
"alpha_fraction": 0.40229883790016174,
"alphanum_fraction": 0.40485313534736633,
"avg_line_length": 21.399999618530273,
"blob_id": "e064918a05774ae7fd50d7cbd92e52a1fc36e99c",
"content_id": "287133343523111f1d80370da24b4f23607965f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 787,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 35,
"path": "/templates/themes/default/config/links.html",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "{% extends \"../blog/base.html\" %}\n{% block content %}\n<ul class=\"list-group\">\n\n <li class=\"list-group-item\"></li>\n\n\n <table class=\"table\">\n <thead>\n <tr>\n <th scope=\"col\">Logo</th>\n <th scope=\"col\">网站</th>\n <th scope=\"col\">author</th>\n </tr>\n </thead>\n\n <tbody>\n {% for link in links %}\n <tr>\n <th><img height=\"25\" src=\"{{link.href}}/favicon.ico\"/></th>\n <td><a href=\"{{link.href}}\" target=\"_blank\" >{{link.title}}</td>\n <td> {{link.owner}}</td>\n\n </tr>\n {% endfor %}\n\n </tbody>\n\n </table>\n\n</ul>\n\n{% include 'themes/default/comment/block.html' %}\n\n{% endblock %}"
},
{
"alpha_fraction": 0.585616409778595,
"alphanum_fraction": 0.5931506752967834,
"avg_line_length": 29.39583396911621,
"blob_id": "a95fe64676d12ecf82143b5be1d0cf9f08e939f9",
"content_id": "eebeaec4340b0130324effc6093dddddd8a76b88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1460,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 48,
"path": "/blog/tests.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\n# Create your tests here.\nfrom django.contrib.auth.models import User\nfrom django.db import connection\nfrom django.test import TestCase\n\nfrom django.test.utils import override_settings\n\nfrom .models import Category\n\nclass TestCategory(TestCase):\n @override_settings(DEBUG=True)\n def setUp(self):\n user = User.objects.create_user(\"zhan\",'[email protected]','123456')\n Category.objects.bulk_create([\n Category(name='cate_bulk_%s' %i,owner=user)\n for i in range(10)\n ])\n\n\n # @override_settings(DEBUG=True)\n # def test_filter(self):\n # queryset = Category.objects.filter(status=1)\n #\n # print(type(queryset))\n # categories = queryset\n # print(categories)\n # for cate in categories:\n # print(cate.created_time)\n # print(cate.name)\n # print(connection.queries)\n # categories = categories.filter(status=1)\n # print(list(categories))\n # print('-----------------')\n # print(categories.query)\n # print('-----------------')\n # print(connection.queries)\n # print('-----------------')\n # print(categories)\n\n def test_values(self):\n categories = Category.objects.values('name')\n print(categories)\n categories = Category.objects.values_list('id','name')\n print(categories)\n\n"
},
{
"alpha_fraction": 0.6700565218925476,
"alphanum_fraction": 0.6847457885742188,
"avg_line_length": 31.740739822387695,
"blob_id": "dd4040332b87bc50e36b8cdc7199b650396c483e",
"content_id": "ac78b5f6a31566df57e79e54d1a6146e57d7e3d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 937,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 27,
"path": "/comment/models.py",
"repo_name": "Yzhanjiang/djtypeidea",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\n\nfrom django.db import models\n\nfrom blog.models import Post\n\nclass Comment(models.Model):\n STATUS_ITEMS = (\n (1,'正常'),\n (2,'删除'),\n )\n # post = models.ForeignKey(Post,verbose_name=\"文章\")\n target = models.CharField(max_length=200,null=True,verbose_name=\"评论目标\")\n content = models.CharField(max_length=2000,null=True,verbose_name=\"内容\")\n nickname = models.CharField(max_length=50,verbose_name=\"昵称\")\n website = models.URLField(verbose_name=\"网站\")\n email = models.EmailField(verbose_name=\"邮箱\")\n status = models.PositiveIntegerField(default=1,choices=STATUS_ITEMS,verbose_name='状态')\n created_time = models.DateTimeField(auto_now_add=True,verbose_name=\"创建时间\")\n\n class Meta :\n verbose_name = verbose_name_plural = \"评论\"\n\n"
}
] | 12 |
Floodster11/dynamics_api_query
|
https://github.com/Floodster11/dynamics_api_query
|
ca4c0cc41be0c9652b136ca36782708616248b12
|
d7dcfa2823be81873a68e625dd15124e57610ee3
|
e8af5d1c3104d8ccf53be0aed63bf9b0b4d4385b
|
refs/heads/master
| 2021-05-10T09:39:46.057929 | 2018-01-25T15:48:22 | 2018-01-25T15:48:22 | 118,932,958 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5749059319496155,
"alphanum_fraction": 0.6038946509361267,
"avg_line_length": 34.3125,
"blob_id": "2374c205d295f4c573ca0ce57a1d0aa7650b5537",
"content_id": "e9ba848c165e3a1d6b0a87b4f0544e32da3217bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4519,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 128,
"path": "/api_code - Single Project/dyn365auth.py",
"repo_name": "Floodster11/dynamics_api_query",
"src_encoding": "UTF-8",
"text": "import json\nimport adal\nimport requests\nfrom flask import Flask\nimport logging\nimport os\n\n\nclass Dynamics365Auth(object):\n LOGIN_ENDPOINT = \"https://login.microsoftonline.com\"\n RESOURCE = \"https://management.core.windows.net/\"\n\n def get_access_token_with_client_credentials(self, tenant_id, client_id, client_secret):\n context = adal.AuthenticationContext(self.LOGIN_ENDPOINT + '/' + tenant_id)\n\n token = context.acquire_token_with_client_credentials(resource=self.RESOURCE, client_id=client_id,\n client_secret=client_secret)\n\n return token\n\n def get_access_token_with_username_password(self, dyn365_url, tenant_id, username, password, client_id,\n client_secret):\n AUTH_ENDPOINT = \"https://login.microsoftonline.com/{tenant_id}/oauth2/token\".format(tenant_id=tenant_id)\n\n POST_TOKEN_REQUEST = {\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n \"resource\": dyn365_url,\n \"username\": username,\n \"password\": password,\n \"grant_type\": \"password\"\n }\n\n response = requests.post(AUTH_ENDPOINT, data=POST_TOKEN_REQUEST)\n\n token = response.json() # [\"access_token\"]\n\n return token\n\n def get_auth_params(self, json_file):\n error = None\n try:\n json_data = json.load(open(json_file))\n except Exception as e:\n json_data = None\n error = e\n\n return json_data, error\n\n\ndef main():\n # TENANT_ID = \"6871727a-5747-424a-b9d4-39a621930267\"\n # CLIENT_ID = \"012b7898-6c8b-41c0-bb58-11817fb6d6f7\"\n # CLIENT_SECRET = \"DFDZYSZKMJR62wp9shiWVUQaRlLEglXpRGX6ofdglus=\"\n # USER_NAME = \"[email protected]\"\n # PASSWORD = \"Fl00dst3r11\"\n # DYN365_URL = \"https://lixarqa.crm3.dynamics.com/\"\n\n app = Flask(\"Dynamics365Auth\")\n\n logger = logging.getLogger(\"Dynamics365Auth\")\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(\"dyn365auth.log\", mode=\"w\")\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.info(\"Starting Dynamics365Auth service...\")\n\n dynamics365Auth = Dynamics365Auth()\n\n logger.info(\"Getting Dynamics365Auth params...\")\n json_data, error = dynamics365Auth.get_auth_params(json_file=\"dyn365auth_params.json\")\n\n if json_data != None:\n TENANT_ID = json_data[\"tenant_id\"]\n CLIENT_ID = json_data[\"client_id\"]\n CLIENT_SECRET = json_data[\"client_secret\"]\n USER_NAME = json_data[\"user_name\"]\n PASSWORD = json_data[\"password\"]\n DYN365_URL = json_data[\"dyn365_url\"]\n\n logger.info(\"TENANT_ID = %s\", TENANT_ID)\n logger.info(\"CLIENT_ID = %s\", CLIENT_ID)\n logger.info(\"USER_NAME = %s\", USER_NAME)\n else:\n logger.info(\"Unexpected error: %s\", error)\n\n @app.route(\"/\")\n def index():\n return \"Dynamics365Auth RESTful service\"\n\n @app.route(\"/api/v2.0/token\", methods=[\"GET\"])\n def get_token2():\n access_token_info = None\n\n try:\n logger.info(\"The 'access_token_info' is requested.\")\n\n access_token_info = dynamics365Auth.get_access_token_with_username_password(\n tenant_id=TENANT_ID,\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n username=USER_NAME,\n password=PASSWORD,\n dyn365_url=DYN365_URL)\n access_token = access_token_info['access_token']\n token_type = access_token_info['token_type']\n # print(access_token)\n # print(token_type)\n # print(json.dumps(access_token_info))\n logger.info(\"The 'access_token_info' was received.\")\n except Exception as e:\n logger.info(\"Unexpected error: %s\", e)\n output = json.dumps(access_token_info)\n return output\n\n # run application\n app.run(host=\"0.0.0.0\", debug=True, port=5001)\n #---------- PERHAPS INSERT THE crm_request function from auth.py here. -----------\n teeest=\"yahoo\" + access_token\n return teeest\n\n# IF I RUN THIS AND hit http://localhost:5001/api/v2.0/token - it returns a valid token\n#Next, must do this via script. and substring out the access token\n# curl -X GET http://localhost:5001/api/v2.0/token works as well. Obvi same with Postman.\nif __name__ == \"__main__\":\n tester = main()"
},
{
"alpha_fraction": 0.5555555820465088,
"alphanum_fraction": 0.5727550983428955,
"avg_line_length": 40.384281158447266,
"blob_id": "ee800e41bff991570e57a8d863d40cf9640c32a1",
"content_id": "5eeb107b997d829a78a5915e5e504b86c06ff33e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9477,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 229,
"path": "/api_code - Single Project/hit_api.py",
"repo_name": "Floodster11/dynamics_api_query",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\nimport requests\nimport json\n##############################################################################################################\ndyn365_auth_url = 'http://localhost:5001/api/v2.0/token'\ndyn365_base_url = 'https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/'\naccess_token_key = \"access_token\"\nresponse = requests.get(url=dyn365_auth_url)\ntest_opportunityid = 'fa62735a-c3eb-e711-812f-480fcfeaf991'\n\n\ndef get_access_token():\n try:\n response_json = response.json()\n access_token = response_json[access_token_key]\n # print('Token: ' + access_token)\n return access_token\n except Exception as e:\n dyn365_access_token = None\n\n\ndef check_access_token_for_dyn365_fn(dyn365_access_token):\n if dyn365_access_token is not None:\n status = \"Valid\"\n else:\n status = \"Not-Valid\"\n return status\n\n\n# Get user input for desired OpportunityID\ndef get_user_input():\n opportunityid = input('Enter the Opportunity ID of desired Data:')\n return opportunityid\n\n\ndef process_web_api_fn(status, opportunityid, access_token):\n if status == \"Valid\":\n ########### CALL OPPORTUNITIES API ###########\n # https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/opportunities(d5bda00a-4ea2-e711-811c-480fcfea20c1)?$selec\n # t=name,salesstage,stepname --> returns that oppid with name field\n search = dyn365_base_url + 'opportunities(' + opportunityid + ')?$select=name,_customerid_value,' \\\n 'salesstage,stepname'\n\n header = {\n \"Authorization\": \"Bearer \" + access_token,\n 'OData-MaxVersion': '4.0',\n 'OData-Version': '4.0',\n 'Accept': 'application/json',\n 'Content-Type': 'application/json; charset=utf-8',\n 'Prefer': 'odata.maxpagesize=1',\n 'Prefer': 'odata.include-annotations=OData.Community.Display.V1.FormattedValue'\n }\n opp_response = requests.get(url=search, headers=header)\n opp_string = opp_response.text # Turns to a dict so we can't replace\n # print('Opp: ' + opp_string)\n\n ########### CALL PROJECT API ###########\n # https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/psa_projects?$select=psa_name,_psa_opportunity_value&$filter=_psa_opportunity_value%20eq%20d50424ed-cada-e711-8127-480fcfea20c1\n project_search_base = 'https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/psa_projects/'\n project_query = 'psa_name,_psa_opportunity_value,psa_tobeinvoiced,psa_forecastcostamount&$filter=_psa_opportunity_value eq ' + test_opportunityid\n\n # Queries Projects based on OppId\n project_search = project_search_base + '?$select=' + project_query\n project_response = requests.get(url=project_search, headers=header)\n project_string = project_response.text\n # print('Project String: ' + project_string)\n\n ### GET JSON TO GRAB PROJECT ID ###\n # proj_hits = get_number_of_proj_hits(project_string)\n project_json = convert_to_json(project_string)\n\n ## ENABLE THIS WHEN COMPLETED TO TAKE CORRECT PROJ ID FOR INVOICE ##\n # psa_projectid = project_json['value'][0]['psa_projectid']\n\n\n # ########### CALL INVOICE API ###########\n test_psa_projectid = '0a991dbd-88ec-e711-812f-480fcfeaf991'\n psa_projectid = test_psa_projectid\n\n invoice_search_base = 'https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/invoices'\n invoice_query = '_psa_project_value,datedelivered,totallineitemamount&$filter=_psa_project_value%20eq%20'+ psa_projectid\n invoice_search = invoice_search_base + '?$select=' + invoice_query\n invoice_response = requests.get(url=invoice_search, headers=header)\n invoice_string = invoice_response.text\n\n\n return opp_string, project_string, project_json, invoice_string\n else:\n print(\"Token Not Valid\")\n sys.exit(0)\n\n\ndef convert_to_json(input_string):\n output_json = json.loads(input_string)\n return output_json\n\ndef convert_dict_to_json(input_string_dict, proj_hits):\n output_json_dict = {}\n temp = {}\n for x in proj_hits:\n # temp[x] = json.dumps(input_string_dict)\n # output_json_dict[x] = json.loads(temp[x])\n output_json_dict = json.dumps(input_string_dict)\n output_json_dict = json.loads(output_json_dict)\n return output_json_dict\n\ndef get_vars_based_from_json(opp_json, project_json, invoice_json):\n ##### GET VARS FROM OPP JSON #####\n opportunityid = opp_json['opportunityid'] # OppId from Opps\n _customerid_value = opp_json['_customerid_value'] # _customerid from Opps\n salesstage = opp_json['salesstage'] # Sales stage from Opps\n stepname = opp_json['stepname'] # stepname (2-Qualified) from Opps\n # print('\\n' + opportunityid)\n\n opp_dict = {\n 'opportunityid' : opportunityid,\n '_customerid_value': _customerid_value,\n 'salesstage' : salesstage,\n 'stepname' : stepname\n }\n\n ##### GET VARS FROM PROJECT JSON #####\n psa_name = project_json['value'][0]['psa_name']\n psa_projectid = project_json['value'][0]['psa_projectid']\n\n ###------MAY NOT NEED THESE------###\n if (project_json['value'][0]['psa_tobeinvoiced']) is not None:\n psa_tobeinvoiced_dict = project_json['value'][0]['[email protected].' \\\n 'Display.V1.FormattedValue']\n else:\n psa_tobeinvoiced_dict = project_json['value'][0]['psa_tobeinvoiced']\n if (project_json['value'][0]['psa_forecastcostamount']) is not None:\n psa_forecastcostamount_dict = project_json['value'][0]['[email protected].' \\\n 'Display.V1.FormattedValue']\n else:\n psa_forecastcostamount_dict = project_json['value'][0]['psa_forecastcostamount']\n\n _psa_opportunity_value = project_json['value'][0]['_psa_opportunity_value']\n\n proj_dict = {\n 'psa_name' : psa_name,\n 'psa_projectid' : psa_projectid,\n '_psa_opportunity_value': _psa_opportunity_value,\n 'psa_tobeinvoiced' : psa_tobeinvoiced_dict,\n 'psa_forecastcostamount': psa_forecastcostamount_dict\n }\n # print(proj_dict)\n\n\n# ---------Good to here -----------\n ##### GET VARS FROM INVOICES JSON #####\n\n if (invoice_json['value'][0]['invoiceid']) is not None:\n invoiceid = invoice_json['value'][0]['invoiceid']\n _psa_project_value = invoice_json['value'][0]['_psa_project_value']\n datedelivered = invoice_json['value'][0]['datedelivered']\n totallineitemamount = invoice_json['value'][0]['[email protected]']\n\n if _psa_project_value is not None:\n project_name = invoice_json['value'][0]['[email protected]']\n project_id = invoice_json['value'][0]['_psa_project_value']\n else:\n project_name = None\n project_id = _psa_project_value\n\n if datedelivered is not None:\n date = invoice_json['value'][0]['[email protected]']\n else:\n date = datedelivered\n\n\n inv_dict = {\n 'invoiceid' : invoiceid,\n 'project_id' : project_id,\n 'project_name' : project_name,\n 'date' : date,\n 'totallineitemamount': totallineitemamount\n }\n # print(inv_dict)\n return opp_dict, proj_dict, inv_dict\n\ndef get_number_of_proj_hits(project_string):\n proj_hits = project_string.count('@odata.etag')\n\n if proj_hits > 1:\n more_than_one_project_flag = 1\n else:\n more_than_one_project_flag = 0\n return more_than_one_project_flag\n\n\ndef get_number_of_inv_hits(invoice_string):\n inv_hits = invoice_string.count('@odata.etag')\n inv_hits = range(0, inv_hits)\n return inv_hits\n\n\ndef main():\n # opportunityid = get_user_input()\n opportunityid = test_opportunityid\n access_token = get_access_token()\n status = check_access_token_for_dyn365_fn(access_token)\n opp_string, project_string, project_json, invoice_string = process_web_api_fn(status, opportunityid, access_token)\n opp_json = convert_to_json(opp_string)\n invoice_json = convert_to_json(invoice_string)\n opp_dict, proj_dict, inv_dict = get_vars_based_from_json(opp_json, project_json, invoice_json)\n\n\n\n ### TEST TO SEE DICTS WORKING ###\n print(opp_dict)\n print(proj_dict)\n print(inv_dict)\n\n # print(opp_string)\n # print(project_string)\n # print(invoice_string)\n\n # print(proj_hits)\n # print(inv_hits)\n\n # f = open('troubleshoot.txt', 'w')\n # f.write(opp_string + '\\n' + project_string + '\\n' + invoice_string)\n # f.close()\n\n\nmain()\n"
},
{
"alpha_fraction": 0.5638523697853088,
"alphanum_fraction": 0.5779380798339844,
"avg_line_length": 40.49647903442383,
"blob_id": "9d9e4cabe0c1d0791d709ccbbfff760d8b9e4bb7",
"content_id": "a0867b54d0a69950b55d4d3496e9c3cfadb1365e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11785,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 284,
"path": "/api_code Multi-Project/hit_api.py",
"repo_name": "Floodster11/dynamics_api_query",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\nimport requests\nimport json\n# from dyn365auth import Dynamics365Auth\n# from connectors.dyn365conn import Dynamics365RestConnector\n# from processors.dyn365map import Dynamics365Mapper\n\n# dynamics365auth = Dynamics365Auth()\n##############################################################################################################\ndyn365_auth_url = 'http://localhost:5001/api/v2.0/token'\ndyn365_base_url = 'https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/'\n# dynamics365Mapper = Dynamics365Mapper()\n\n# def get_created_by_fn(execution_date):\n# return DAG_NAME + \"-\" + get_created_date_fn(execution_date)\n#\n#\n# def get_created_date_fn(execution_date):\n# return execution_date.strftime(\"%Y-%m-%dT%H:%M:%S\")\n\naccess_token_key = \"access_token\"\nresponse = requests.get(url=dyn365_auth_url)\ntest_opportunityid = 'fa62735a-c3eb-e711-812f-480fcfeaf991'\n\n\ndef get_access_token():\n try:\n response_json = response.json()\n access_token = response_json[access_token_key]\n # print('Token: ' + access_token)\n return access_token\n except Exception as e:\n dyn365_access_token = None\n\n\ndef check_access_token_for_dyn365_fn(dyn365_access_token):\n if dyn365_access_token is not None:\n status = \"Valid\"\n else:\n status = \"Not-Valid\"\n return status\n\n\n# Get user input for desired OpportunityID\ndef get_user_input():\n opportunityid = input('Enter the Opportunity ID of desired Data:')\n return opportunityid\n\n\ndef process_web_api_fn(status, opportunityid, access_token):\n if status == \"Valid\":\n ########### CALL OPPORTUNITIES API ###########\n # https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/opportunities(d5bda00a-4ea2-e711-811c-480fcfea20c1)?$selec\n # t=name,salesstage,stepname --> returns that oppid with name field\n search = dyn365_base_url + 'opportunities(' + opportunityid + ')?$select=name,_customerid_value,' \\\n 'salesstage,stepname'\n\n header = {\n \"Authorization\": \"Bearer \" + access_token,\n 'OData-MaxVersion': '4.0',\n 'OData-Version': '4.0',\n 'Accept': 'application/json',\n 'Content-Type': 'application/json; charset=utf-8',\n 'Prefer': 'odata.maxpagesize=1',\n 'Prefer': 'odata.include-annotations=OData.Community.Display.V1.FormattedValue'\n }\n opp_response = requests.get(url=search, headers=header)\n opp_string = opp_response.text # Turns to a dict so we can't replace\n # print('Opp: ' + opp_string)\n\n ########### CALL PROJECT API ###########\n # https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/psa_projects?$select=psa_name,_psa_opportunity_value&$filter=_psa_opportunity_value%20eq%20d50424ed-cada-e711-8127-480fcfea20c1\n project_search_base = 'https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/psa_projects/'\n project_query = 'psa_name,_psa_opportunity_value,psa_tobeinvoiced,psa_forecastcostamount&$filter=_psa_opportunity_value eq ' + test_opportunityid\n\n # Queries Projects based on OppId\n project_search = project_search_base + '?$select=' + project_query\n project_response = requests.get(url=project_search, headers=header)\n project_string = project_response.text\n get_proj_id = json.loads(project_string)\n # proj_id = get_proj_id['value'][0]['psa_projectid']\n # print('projectID: ' + proj_id)\n print('Project String: ' + project_string)\n\n ### GET JSON TO GRAB PROJECT IDs ###\n proj_hits = get_number_of_proj_hits(project_string)\n psa_projectid_dict = {}\n project_json = convert_to_json(project_string)\n for x in proj_hits:\n psa_projectid_dict[x] = project_json['value'][x]['psa_projectid']\n # print('Dict: ')\n # print(psa_projectid_dict)\n\n # TODO TRY CONVERTING TO JSON HERE (AND AFTER OPP & INVOICE IF SUCCESSFUL) AND CONVERT TO DICT -->\n # TODO THEN WE CAN TRY TO CALL ALL INVOICES BASED ON ALL PROJECTS, NOT JUST ONE\n\n\n # ########### CALL INVOICE API ###########\n\n invoice_search_base = 'https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/invoices'\n invoice_query_dict = {}\n # invoice_response_dict = {}\n invoice_string_dict = {}\n for x in proj_hits:\n invoice_query_dict[x] = '_psa_project_value,datedelivered,totallineitemamount&$filter=_psa_project_value%20eq%20'+ psa_projectid_dict[x]\n invoice_search = invoice_search_base + '?$select=' + invoice_query_dict[x]\n invoice_response = requests.get(url=invoice_search, headers=header)\n invoice_string_dict[x] = invoice_response.text\n print(invoice_string_dict)\n\n # Queries Projects based on ProjID\n\n\n # print('Invoice: ------ ' + invoice_string)\n\n\n return opp_string, project_string, project_json, invoice_string_dict\n else:\n print(\"Token Not Valid\")\n sys.exit(0)\n\n\n# TODO Try to convert this to individual converts to be called in string def\n# def convert_to_json(opp_string, project_string, invoice_string):\n# opp_json = json.loads(opp_string)\n# project_json = json.loads(project_string)\n# invoice_json = json.loads(invoice_string)\n# # print(opp_string)\n# return opp_json, project_json, invoice_json\n\ndef convert_to_json(input_string):\n output_json = json.loads(input_string)\n return output_json\n\ndef convert_dict_to_json(input_string_dict, proj_hits):\n output_json_dict = {}\n temp = {}\n for x in proj_hits:\n # temp[x] = json.dumps(input_string_dict)\n # output_json_dict[x] = json.loads(temp[x])\n output_json_dict = json.dumps(input_string_dict)\n output_json_dict = json.loads(output_json_dict)\n return output_json_dict\n\ndef get_vars_based_from_json(opp_json, project_json, invoice_json_dict, proj_hits):\n ##### GET VARS FROM OPP JSON #####\n opportunityid = opp_json['opportunityid'] # OppId from Opps\n _customerid_value = opp_json['_customerid_value'] # _customerid from Opps\n salesstage = opp_json['salesstage'] # Sales stage from Opps\n stepname = opp_json['stepname'] # stepname (2-Qualified) from Opps\n # print('\\n' + opportunityid)\n\n opp_dict = {\n 'opportunityid' : opportunityid,\n '_customerid_value': _customerid_value,\n 'salesstage' : salesstage,\n 'stepname' : stepname\n }\n\n ##### GET VARS FROM PROJECT JSON #####\n psa_name_dict = {}\n psa_projectid_dict = {}\n psa_tobeinvoiced_dict = {}\n psa_forecastcostamount_dict = {}\n for x in proj_hits:\n psa_name_dict[x] = project_json['value'][x]['psa_name']\n psa_projectid_dict[x] = project_json['value'][x]['psa_projectid']\n\n ###------MAY NOT NEED THESE------###\n if (project_json['value'][x]['psa_tobeinvoiced']) is not None:\n psa_tobeinvoiced_dict[x] = project_json['value'][x]['[email protected].' \\\n 'Display.V1.FormattedValue']\n else:\n psa_tobeinvoiced_dict[x] = project_json['value'][x]['psa_tobeinvoiced']\n if (project_json['value'][0]['psa_forecastcostamount']) is not None:\n psa_forecastcostamount_dict[x] = project_json['value'][x]['[email protected].' \\\n 'Display.V1.FormattedValue']\n else:\n psa_forecastcostamount_dict[x] = project_json['value'][x]['psa_forecastcostamount']\n # print(psa_projectid + '\\n' + psa_name + '\\n' + _psa_opportunity_value)\n # print(psa_tobeinvoiced + '\\n' + psa_forecastcostamount)\n\n # print(psa_name_dict)\n _psa_opportunity_value = project_json['value'][0]['_psa_opportunity_value']\n\n proj_dict = {\n 'psa_name' : psa_name_dict,\n 'psa_projectid' : psa_projectid_dict,\n '_psa_opportunity_value': _psa_opportunity_value,\n 'psa_tobeinvoiced' : psa_tobeinvoiced_dict,\n 'psa_forecastcostamount': psa_forecastcostamount_dict\n }\n print(proj_dict)\n\n\n# TODO - HAVE TO CHANGE THESE TO DICTS POTENTIALLY AS DONE ABOVE WITH PROJECTS\n ##### GET VARS FROM INVOICES JSON #####\n invoiceid_dict = {}\n # m = invoice_json_dict[1]\n # print(m)\n\n\n for y in proj_hits:\n invoiceid_dict[x] = invoice_json_dict[x]['value'][0]['invoiceid']\n print('TEESSTT_________F_______')\n print(invoiceid_dict)\n\n\n\n _psa_project_value = invoice_json['value'][0]['_psa_project_value']\n if _psa_project_value is not None:\n project_name = invoice_json['value'][0]['[email protected]']\n project_id = invoice_json['value'][0]['_psa_project_value']\n else:\n project_name = _psa_project_value\n\n datedelivered = invoice_json['value'][0]['datedelivered']\n if datedelivered is not None:\n date = invoice_json['value'][0]['[email protected]']\n else:\n date = datedelivered\n\n\n totallineitemamount = invoice_json['value'][0]['[email protected]']\n\n # print('invid: ' + invoiceid + ' Proj ID: ' + project_id + ' Proj Name: ' + project_name + ' date: ' + date + ' line Amount: ' + totallineitemamount)\n\n\n inv_dict = {\n 'invoiceid' : invoiceid,\n 'project_id' : project_id,\n 'project_name' : project_name,\n 'date' : date,\n 'totallineitemamount': totallineitemamount\n }\n\n return opp_dict, proj_dict, inv_dict\n\ndef get_number_of_proj_hits(project_string):\n proj_hits = project_string.count('@odata.etag')\n proj_hits = range(0,proj_hits)\n return proj_hits\n\n\ndef get_number_of_inv_hits(invoice_string):\n inv_hits = invoice_string.count('@odata.etag')\n inv_hits = range(0, inv_hits)\n return inv_hits\n\n\ndef main():\n # opportunityid = get_user_input()\n opportunityid = test_opportunityid\n access_token = get_access_token()\n status = check_access_token_for_dyn365_fn(access_token)\n opp_string, project_string, project_json, invoice_string_dict = process_web_api_fn(status, opportunityid, access_token)\n opp_json = convert_to_json(opp_string)\n proj_hits = get_number_of_proj_hits(project_string)\n # invoice_json_dict = convert_dict_to_json(invoice_string_dict, proj_hits)\n print('invoice dict')\n # print(invoice_json_dict)\n opp_dict, proj_dict, inv_dict = get_vars_based_from_json(opp_json, project_json, invoice_string_dict, proj_hits)\n\n ### TEST TO SEE DICTS WORKING ###\n # print(opp_dict)\n # print(proj_dict)\n # print(inv_dict)\n\n # print(opp_string)\n # print(project_string)\n # print(invoice_string)\n\n # print(proj_hits)\n # print(inv_hits)\n\n\n # f = open('troubleshoot.txt', 'w')\n # f.write(opp_string + '\\n' + project_string + '\\n' + invoice_string)\n # f.close()\n\n\nmain()\n"
},
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7916666865348816,
"avg_line_length": 23,
"blob_id": "9d710a34d8141550ec3a2f5f7a0d8e5ea8c05376",
"content_id": "94c30ea1589f6c4a186766dc150e374b3c250bf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Floodster11/dynamics_api_query",
"src_encoding": "UTF-8",
"text": "# api_query\nPython Script to Query Dynamics API\n"
},
{
"alpha_fraction": 0.6340160369873047,
"alphanum_fraction": 0.677203893661499,
"avg_line_length": 31.100000381469727,
"blob_id": "d57317d19ec04c2945b6588b0f8980e6d4701828",
"content_id": "0ee6e65ee8ce7c9b57bf88ae9c1f4c5903098572",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2246,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 70,
"path": "/api_code Multi-Project/auth.py",
"repo_name": "Floodster11/dynamics_api_query",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n# GRABS CORRECT AUTH TOKEN BUT IT NEEDS TO USE USER/PASSWORD AUTHENTICATION INSTEAD OF CLIENT\n\nimport adal\nimport requests\nimport json\nfrom flask import Flask\n\ndyn365_auth_url = 'http://dyn365auth:5001/api/v2.0/token'\ndyn365_base_url = 'https://lixarqa.api.crm3.dynamics.com/api/data/v8.2/'\nTENANT_ID = \"6871727a-5747-424a-b9d4-39a621930267\"\nCLIENT_ID = \"012b7898-6c8b-41c0-bb58-11817fb6d6f7\"\nCLIENT_SECRET = \"DFDZYSZKMJR62wp9shiWVUQaRlLEglXpRGX6ofdglus=\"\nLOGIN_ENDPOINT = 'https://login.microsoftonline.com'\nRESOURCE = \"https://management.core.windows.net/\"\nDYN365_URL = \"https://lixarqa.crm3.dynamics.com/\"\n\n\ndef get_access_token_with_client_credentials():\n context = adal.AuthenticationContext(LOGIN_ENDPOINT + '/' + TENANT_ID)\n token = context.acquire_token_with_client_credentials(RESOURCE, client_id=CLIENT_ID, client_secret=CLIENT_SECRET)\n print(token)\n return token\n\n\ndef extract_access_token(token):\n access_token = ''\n token = json.dumps(token)\n try:\n test = json.loads(token)\n access_token = test['accessToken']\n print(access_token)\n except(KeyError):\n # handle any missing key errors\n print('Could not get access token')\n return access_token\n\n\ndef check_access_token_for_dyn365_fn(token):\n if token != None:\n next_operator_name = \"dyn365_access_token_ok\"\n else:\n next_operator_name = \"dyn365_access_token_not_ok\"\n print(next_operator_name)\n return next_operator_name\n\ndef crm_request(accessToken):\n query = 'psa_projects'\n if(accessToken != ''):\n crmrequestheaders = {\n 'Authorization': 'Bearer ' + accessToken,\n 'OData-MaxVersion': '4.0',\n 'OData-Version': '4.0',\n 'Accept': 'application/json',\n 'Content-Type': 'application/json; charset=utf-8',\n 'Prefer': 'odata.maxpagesize=500',\n 'Prefer': 'odata.include-annotations=OData.Community.Display.V1.FormattedValue'\n }\n crmresponse = requests.get(dyn365_base_url + query, headers=crmrequestheaders)\n print(crmresponse)\n return crmresponse\n\n\n\n\ntoken = get_access_token_with_client_credentials()\ngood = check_access_token_for_dyn365_fn(token)\ntoken1 = extract_access_token(token)\nres = crm_request(token1)"
}
] | 5 |
DaWe1992/ML_Python_Tutorials
|
https://github.com/DaWe1992/ML_Python_Tutorials
|
ec5648a25fe99ffaed34e56e9aef4752dc8c3ebc
|
4276867530ed2e5b123f5750e31ab5d4a5cc9fac
|
60de2fd580ce88f050dfdd6447976441add419b4
|
refs/heads/master
| 2019-07-13T18:42:25.675919 | 2018-04-06T13:53:36 | 2018-04-06T13:53:36 | 124,253,785 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6025390625,
"alphanum_fraction": 0.625,
"avg_line_length": 25.28205108642578,
"blob_id": "058c5fb1a1bd91698958cbae0250d1f526098475",
"content_id": "6aca40e4249be85fe36a5206dacd192a17590e94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1024,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 39,
"path": "/tf/tf_demo.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 14 17:53:28 2018\n\n@author: Daniel Wehner\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom sklearn.datasets import fetch_california_housing\n\n# CONSTRUCTION PHASE\nx = tf.Variable(3, name=\"x\")\ny = tf.Variable(4, name=\"y\")\nf = x*x*y + y + 2\n\ninit = tf.global_variables_initializer() # prepare an init node\n\n# EXECUTION PHASE\nwith tf.Session() as sess:\n init.run() # initialize the variables\n result = f.eval()\n print(result)\n \n# =============================================================================\n# load housing data\nhousing = fetch_california_housing()\nm, n = housing.data.shape\nhousing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]\n\nX = tf.constant(housing_data_plus_bias, dtype=tf.float32, name=\"X\")\ny = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\nXT = tf.transpose(X)\ntheta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)\n\nwith tf.Session() as sess:\n theta_value = theta.eval()\n print(theta_value)"
},
{
"alpha_fraction": 0.6715250611305237,
"alphanum_fraction": 0.7067188024520874,
"avg_line_length": 27.139999389648438,
"blob_id": "9d53d85ac54e1badbaa6d5ea50e457638093fddd",
"content_id": "d10b1ee0f022497ed060b1bec91b49c7d4ae8ea6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2813,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 100,
"path": "/sklearn/Classification_MNIST.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 7 14:34:16 2018\n\n@author: D062271\n\"\"\"\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.multiclass import OneVsOneClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve\nfrom sklearn.preprocessing import StandardScaler\n\ndef plot_number(digit):\n \"\"\"\n Reshapes the image array and plots the number\n \"\"\"\n digit_image = digit.reshape(28, 28)\n plt.imshow(digit_image, cmap=matplotlib.cm.binary, interpolation=\"nearest\")\n plt.axis(\"off\")\n plt.show()\n \ndef split_data(X, y):\n \"\"\"\n Split the training data\n \"\"\"\n return X[:60000], X[60000:], y[:60000], y[60000:]\n\ndef plot_roc_curve(fpr, tpr, label=None):\n plt.plot(fpr, tpr, linewidth=2, label=label)\n plt.plot([0, 1], [0, 1], \"k--\")\n plt.axis([0, 1, 0, 1])\n plt.xlabel(\"FPR\")\n plt.ylabel(\"TPR\")\n\n# fetch the mnist data set\nmnist = fetch_mldata(\"MNIST original\")\nX, y = mnist[\"data\"], mnist[\"target\"]\n\n# plot a random number\nplot_number(X[36000])\n\n# split the data and shuffle the training data\nX_train, X_test, y_train, y_test = split_data(X, y)\nshuffle_index = np.random.permutation(60000)\nX_train, y_train = X_train[shuffle_index], y_train[shuffle_index]\n\n# transform the training labels (we just want to detect 5s)\ny_train_5 = (y_train == 5)\ny_test_5 = (y_train == 5)\n\n# train an SGDClassifier\nsgd_clf = SGDClassifier(random_state=42)\nsgd_clf.fit(X_train, y_train_5)\n\nprint(sgd_clf.predict([X[36000]]))\nprint(cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\"))\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)\n\n# plot confusion matrix\nprint(confusion_matrix(y_train_5, y_train_pred))\n\ny_scores = cross_val_predict(sgd_clf, X_train, y_train, cv=3, method=\"decision_function\")\n\n# plot roc curve\nfpr, tpr, thresholds = roc_curve(y_train_5, y_scores[:,5])\nplot_roc_curve(fpr, tpr)\nplt.show()\n\n# multiclass classification\nsgd_clf.fit(X_train, y_train)\nprint(sgd_clf.predict([X[36000]]))\n\novo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))\novo_clf.fit(X_train, y_train)\nprint(ovo_clf.predict([X[36000]]))\n\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\n\ny_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nprint(conf_mx)\n\nplt.matshow(conf_mx, cmap=plt.cm.gray)\nplt.show()\n\nrow_sums = conf_mx.sum(axis=1, keepdims=True)\nnorm_conf_mx = conf_mx / row_sums\n\nnp.fill_diagonal(norm_conf_mx, 0)\nplt.matshow(norm_conf_mx, cmap=plt.cm.gray)\nplt.show()"
},
{
"alpha_fraction": 0.6021798253059387,
"alphanum_fraction": 0.6621253490447998,
"avg_line_length": 17.399999618530273,
"blob_id": "6be6db6fd441b0f9ab18cb9701486688f6f39917",
"content_id": "89335985fc16c677f9ab86dae15af2d3da22bc36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 20,
"path": "/misc/plotly_demo.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 6 07:13:56 2018\n\n@author: D062271\n\"\"\"\n\nimport numpy as np\n\nimport plotly\nfrom plotly.graph_objs import Scatter, Layout\n\nN = 1000\nrandom_x = np.random.randn(N)\nrandom_y = np.random.randn(N)\n\nplotly.offline.plot({\n \"data\": [Scatter(x=random_x, y=random_y, mode=\"markers\")],\n \"layout\": Layout(title=\"hello world\")\n})"
},
{
"alpha_fraction": 0.5064734816551208,
"alphanum_fraction": 0.5345252752304077,
"avg_line_length": 26.26890754699707,
"blob_id": "cc6dd04f7535d81c13b69837548586c0e674c545",
"content_id": "31e3a01a907b3ad3adecac755f6595adc8b93b28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3244,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 119,
"path": "/tf_serving/tf_mlp2.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCREATE A BASIC MULTI-LAYER-PERCEPTRON\nTO PERFORM ADDITION.\n\n@author: Daniel Wehner (D062271)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport logging\nimport numpy as np\nimport tensorflow as tf\n\ntf.app.flags.DEFINE_integer(\"model_version\", 1, \"version number of the model.\")\ntf.app.flags.DEFINE_string(\"work_dir\", \"/tmp\", \"Working directory.\")\nFLAGS = tf.app.flags.FLAGS\n\ndef main():\n \"\"\"\n Main function.\n\n :return:\n \"\"\"\n\n # DATA PREPARATION\n # ================================================\n data_train = np.array(\n [[1, 3], [2, 5],\n [3, 1], [3, 3],\n [4, 2], [7, 1],\n [8, 1], [2, 2],\n [5, 1], [1, 7],\n [0, 1], [0, 5],\n [0, 7], [0, 8],\n [1, 1], [1, 2],\n [0, 0], [1, 8]]\n )\n\n labels_train = np.array(\n [4, 7, 4, 6, 6, 8, 9, 4, 6, 8, 1, 5, 7, 8, 2, 3, 0, 9]\n )\n\n # specify that all features have real-value data\n feature_columns = [tf.feature_column.numeric_column(\"x\", shape=[2])]\n\n # CREATE CLASSIFIER\n # ================================================\n # build 3 layer dnn with 10, 20, 10 units respectively.\n classifier = tf.estimator.DNNClassifier(\n feature_columns=feature_columns,\n hidden_units=[10, 20, 10],\n n_classes=10,\n model_dir=\"/tmp/addition_model\"\n )\n\n # TRAINING PHASE\n # ================================================\n # define the training inputs\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data_train},\n y=labels_train,\n num_epochs=None,\n shuffle=True\n )\n\n # train model\n classifier.train(input_fn=train_input_fn, steps=10000)\n\n # PREDICT NEW SAMPLES (LOCAL)\n # ================================================\n # classify two new flower samples.\n # new_samples = np.array(\n # [[0, 6]],\n # dtype=np.float32\n # )\n #\n # predict_input_fn = tf.estimator.inputs.numpy_input_fn(\n # x={\"x\": new_samples},\n # num_epochs=1,\n # shuffle=False\n # )\n #\n # predictions = list(classifier.predict(input_fn=predict_input_fn))\n # predicted_classes = [p[\"classes\"] for p in predictions]\n #\n # print(\"Predictions: {}\".format(predicted_classes))\n\n # BUILD AND SAVE MODEL\n # ================================================\n export_path_base = sys.argv[-1]\n export_path = os.path.join(\n tf.compat.as_bytes(export_path_base),\n tf.compat.as_bytes(str(FLAGS.model_version))\n )\n\n feature_spec = {\"x\": tf.FixedLenFeature([2], tf.float32)}\n\n def serving_input_receiver_fn():\n serialized_tf_example = tf.placeholder(\n dtype=tf.string,\n shape=[None],\n name=\"input_tensors\"\n )\n\n receiver_tensors = {\"inputs\": serialized_tf_example}\n features = tf.parse_example(serialized_tf_example, feature_spec)\n return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)\n\n # export saved model\n classifier.export_savedmodel(export_path, serving_input_receiver_fn)\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n main()"
},
{
"alpha_fraction": 0.6881144046783447,
"alphanum_fraction": 0.7104557752609253,
"avg_line_length": 29.2702693939209,
"blob_id": "e7eb8224ee39c1cb712f7f4323b116d321180c56",
"content_id": "cb0b8247bab8b8b6f68f17e4cf5e87da7e78362c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 37,
"path": "/tf_serving/client.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "\"\"\"\nBASIC CLIENT FOR TENSORFLOW MODEL SERVER\n\n@author: Daniel Wehner (D062271)\n\"\"\"\n\n# imports\nfrom grpc.beta import implementations\nfrom tensorflow_serving.apis import prediction_service_pb2\nfrom tensorflow_serving.apis import classification_pb2\n\n# define host and port\nhost_port = \"localhost:9000\"\nhost, port = host_port.split(\":\")\n\ndef predict(host, port, instance):\n \"\"\"\n Contacts the server and makes a prediction.\n\n :param host: ip address of host\n :param port: port (9000 by default)\n :param instance: instance to predict class label for\n :return: prediction\n \"\"\"\n channel = implementations.insecure_channel(host, int(port))\n stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)\n\n request = classification_pb2.ClassificationRequest()\n request.model_spec.name = \"addition\"\n example = request.input.example_list.examples.add()\n example.features.feature[\"x\"].float_list.value.extend(instance)\n\n return stub.Classify(request, 10.0) # 10 secs timeout\n\n\n# predict class label and print it to console\nprint(predict(host, port, [0, 6]))"
},
{
"alpha_fraction": 0.6819799542427063,
"alphanum_fraction": 0.6997735500335693,
"avg_line_length": 34.54022979736328,
"blob_id": "8035c82f434896096dd425d74a5788742a3f5add",
"content_id": "94e63c170a5b562fc5526573f9234c7861bbcd1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3091,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 87,
"path": "/sklearn/Hands_On_Tutorial.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 30 16:53:13 2017\n\n@author: D062271\n\"\"\"\n\nimport os\nimport tarfile\nimport hashlib\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom six.moves import urllib\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nDOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\nHOUSING_PATH = \"datasets/housing\"\nHOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + \"/housing.tgz\"\n\n# download and extract the data\ndef fetch_housing_data(housing_url = HOUSING_URL, housing_path = HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path = housing_path)\n housing_tgz.close()\n \n# load the housing data\ndef load_housing_data(housing_path = HOUSING_PATH):\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)\n\n# create a test set\ndef split_train_test(data, test_ratio):\n np.random.seed(42)\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]\n\ndef test_set_check(identifier, test_ratio, hash):\n \"\"\"\n Calculate the hash value for an identifier\n \"\"\"\n return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio\n\ndef split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):\n \"\"\"\n Split the data by the identifier\n \"\"\"\n ids = data[id_column]\n in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash))\n return data.loc[~in_test_set], data.loc[in_test_set]\n\n# load data and plot\nhousing = load_housing_data()\nhousing.hist(bins = 50, figsize = (20, 15))\nplt.show()\n\n# add income category (1 to 5) to the training data\nhousing[\"income_cat\"] = np.ceil(housing[\"median_income\"] / 1.5)\nhousing[\"income_cat\"].where(housing[\"income_cat\"] < 5, 5.0, inplace=True)\n\nhousing[\"rooms_per_household\"] = housing[\"total_rooms\"] / housing[\"households\"]\nhousing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"] / housing[\"total_rooms\"]\nhousing[\"population_per_household\"] = housing[\"population\"] / housing[\"households\"]\n\n# perform a stratified split\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nfor train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n \n# remove attribute income_cat\nfor set_ in (strat_train_set, strat_test_set):\n set_.drop(\"income_cat\", axis=1, inplace=True)\n\nhousing = strat_train_set.copy()\nhousing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.4,\n s=housing[\"population\"]/100, label=\"population\", figsize=(10,7),\n c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"), colorbar=True)\nplt.legend()"
},
{
"alpha_fraction": 0.5077813863754272,
"alphanum_fraction": 0.5754614472389221,
"avg_line_length": 25.56730842590332,
"blob_id": "f92a729a4960c2eb81794fcc2302e9d6b2a28327",
"content_id": "43e229d2db088e9b05e8d6a9727e9bcef03c6fc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2763,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 104,
"path": "/tf_serving/tf_mlp.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "\n\"\"\"\nCREATE A BASIC MULTI-LAYER-PERCEPTRON\nTO PERFORM ADDITION.\n\n@author: Daniel Wehner (D062271)\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\n# general parameters\nlearning_rate = 0.01\nmini_batch_size = 100\ntraining_epochs = 100000\ndisplay_step = 500\n\n# network parameters\nn_hidden_1 = 10 # 1st hidden layer of neurons\nn_hidden_2 = 5 # 2nd hidden layer of neurons\nn_input = 2 # number of features\n\ndata_train = np.array(\n [[1, 3], [2, 5],\n [3, 1], [3, 3],\n [4, 2], [7, 1],\n [8, 1], [2, 2],\n [5, 1], [1, 7]])\n\nlabel_train = np.array(\n [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0]])\n\n# Tensorflow Graph input\nX = tf.placeholder(tf.float64, shape=[None, n_input], name=\"x-data\")\ny = tf.placeholder(tf.float64, shape=[None, 10], name=\"y-labels\")\n\ndef create_mlp(X, weights):\n \"\"\"\n Creates the mlp.\n\n :param x:\n :param weights:\n :return:\n \"\"\"\n # first hidden layer with sigmoid activation\n layer1 = tf.matmul(X, weights[\"h1\"])\n layer1 = tf.nn.sigmoid(layer1)\n\n # second hidden layer with sigmoid activation\n layer2 = tf.matmul(layer1, weights[\"h2\"])\n layer2 = tf.nn.sigmoid(layer2)\n\n # output layer with sigmoid activation\n layer_out = tf.matmul(layer2, weights[\"out\"])\n layer_out = tf.nn.sigmoid(layer_out)\n return layer_out\n\n# layer weights\nweights = {\n \"h1\": tf.Variable(tf.random_normal([n_input, n_hidden_1], dtype=np.float64)),\n \"h2\": tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], dtype=np.float64)),\n \"out\": tf.Variable(tf.random_normal([n_hidden_2, 10], dtype=np.float64))\n}\n\n# construct the model\npred = create_mlp(X, weights)\n\n# define loss and optimizer\ncost = tf.nn.l2_loss(pred - y, name=\"squared_error_cost\")\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n# initialize variables\ninit = tf.initialize_all_variables()\n\n# launch!\nwith tf.Session() as sess:\n sess.run(init)\n\n print(\"Starting training...\")\n\n for epoch in range(training_epochs):\n _, c = sess.run([optimizer, cost], feed_dict={\n X: data_train,\n y: label_train\n })\n\n # compute average cost\n avg_cost = c / data_train.shape[0]\n\n # display logs per epoch\n if epoch % display_step == 0:\n print(\"Epoch:\", '%05d' % (epoch), \"Training error=\", \"{:.9f}\".format(avg_cost))\n\n print(\"Finished training...\")\n\n print(\"Predicting 1 + 5...\")\n print(sess.run())"
},
{
"alpha_fraction": 0.6678261160850525,
"alphanum_fraction": 0.6950724720954895,
"avg_line_length": 30.962963104248047,
"blob_id": "c136894a2855496cde88bcfe38a0a70018f6f958",
"content_id": "1e7e4820fd46fff0ab84df6ed53d9acd06a3e58e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1725,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 54,
"path": "/sklearn/precision_recall_curves.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 4 07:12:45 2018\n\n@author: Daniel Wehner\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.model_selection import train_test_split\n\n# create data set\nX, y = make_classification(n_samples=1250, n_features=3, n_redundant=0,\n n_informative=3, n_clusters_per_class=4,\n random_state=50)\nplt.scatter(X[:, 0], X[:, 1], marker='o', c=y, s=25, edgecolor='k')\nplt.show()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n# train support vector machine\nsvc = SVC(gamma=0.05).fit(X_train, y_train)\n\n# calculate precision and recall curve for svm\nprecision, recall, thresholds = precision_recall_curve(\n y_test, svc.decision_function(X_test))\n\nclose_zero = np.argmin(np.abs(thresholds))\nplt.plot(precision[close_zero], recall[close_zero], \"o\", markersize=10,\n fillstyle=\"none\", label=\"svm threshold 0\", mew=2)\n\nplt.plot(precision, recall, label=\"svm\")\n\n# train random forest classifier\nrf = RandomForestClassifier(n_estimators=500, random_state=0, max_features=2)\nrf.fit(X_train, y_train)\n\nprecision_rf, recall_rf, thresholds_rf = precision_recall_curve(\n y_test, rf.predict_proba(X_test)[:, 1])\n\nclose_default_rf = np.argmin(np.abs(thresholds_rf - 0.5))\nplt.plot(precision_rf[close_default_rf], recall_rf[close_default_rf],\n \"^\", markersize=10, fillstyle=\"none\", label=\"rf threshold 0.5\", mew=2)\n\nplt.plot(precision_rf, recall_rf, label=\"rf\")\n\nplt.xlabel(\"Recall\")\nplt.ylabel(\"Precision\")"
},
{
"alpha_fraction": 0.599250316619873,
"alphanum_fraction": 0.623104453086853,
"avg_line_length": 27.086124420166016,
"blob_id": "6bb575ec2589995a9d2031ab0240da87ec53f84f",
"content_id": "b1903c8f7f93dc326fc3fbf872db37d4c2c774b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5869,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 209,
"path": "/tf/tf_nn_demo.py",
"repo_name": "DaWe1992/ML_Python_Tutorials",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Training of a neural network to\n# classify traffic signs.\n# 06.10.2017\n#\n# @author Daniel Wehner\n# @see https://www.datacamp.com/community/tutorials/tensorflow-tutorial\n\n# import modules\nimport os\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage import transform\nfrom skimage.color import rgb2gray\n\n#\n# Load the data from the directory specified\n# \n# @param data_dir:string\n# @return images:list, labels:list\ndef load_data(data_dir):\n \n # get all subdirectories of folder specified\n dirs = [d for d in os.listdir(data_dir)\n if os.path.isdir(os.path.join(data_dir, d))]\n \n # initialize lists for labels and images\n labels = []\n images = []\n \n # read the \n for d in dirs:\n label_dir = os.path.join(data_dir, d)\n file_names = [os.path.join(label_dir, f)\n for f in os.listdir(label_dir)\n if f.endswith('.ppm')]\n for f in file_names:\n images.append(data.imread(f))\n labels.append(int(d))\n \n return images, labels\n\n#\n# Display images\n# \n# @param images:list\ndef display_imgs(images):\n traffic_signs = [300, 2250, 3650, 4000]\n \n for i in range(len(traffic_signs)):\n plt.subplot(1, 4, i + 1)\n plt.axis('off')\n plt.imshow(images[traffic_signs[i]])\n plt.subplots_adjust(wspace = 0.5)\n # display the image\n plt.show()\n print(\"shape: {0}, min: {1}, max: {2}\".format(\n images[traffic_signs[i]].shape, \n images[traffic_signs[i]].min(), \n images[traffic_signs[i]].max()))\n \n#\n# Displays one image per class label\n#\n# @param images:list\n# @param labels:list\ndef display_img_per_class(images, labels):\n unique_labels = set(labels)\n \n # initialize the figure\n plt.figure(figsize = (15, 15))\n \n i = 1\n for label in unique_labels:\n # pick first image with according label\n image = images[labels.index(label)]\n plt.subplot(8, 8, i)\n plt.axis('off')\n plt.title(\"Label {0} ({1})\".format(label, labels.count(label)))\n i += 1\n # show the image\n plt.imshow(image)\n \n plt.show()\n \n#\n# Preprocess the images\n#\n# @param images:list\n# @return images28:list (list of resized images)\ndef preprocess_imgs(images):\n # resize images\n images28 = [transform.resize(image, (28, 28)) for image in images]\n # grayscale\n images28 = np.array(images28)\n images28 = rgb2gray(images28)\n return images28\n\n#\n# Train a neural net.\n#\n# @param images28:list\n# @param labels:list\ndef train_ann(images28, labels):\n x = tf.placeholder(dtype = tf.float32, shape = [None, 28, 28])\n y = tf.placeholder(dtype = tf.int32, shape = [None])\n \n # flatten the input data\n images_flat = tf.contrib.layers.flatten(x)\n # fully connected layer\n logits = tf.contrib.layers.fully_connected(images_flat, 62, tf.nn.relu)\n # define a loss function\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels = y, logits = logits))\n # define an optimizer\n train_op = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(loss)\n # convert logits to label indexes\n correct_pred = tf.arg_max(logits, 1)\n # define an accuracy metric\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n print(\"images_flat: \", images_flat)\n print(\"logits: \", logits)\n print(\"loss: \", loss)\n print(\"predicted_labels: \", correct_pred)\n \n tf.set_random_seed(1234)\n sess = tf.Session()\n \n sess.run(tf.global_variables_initializer())\n \n # training of the neural net\n for i in range(201):\n print(\"EPOCH\", i)\n _, accuracy_val = sess.run([train_op, accuracy],\n feed_dict = {x: images28, y: labels})\n if i % 10 == 0:\n print(\"Loss: \", loss)\n print(\"DONE WITH EPOCH\")\n \n \n # pick 10 random images\n sample_indexes = random.sample(range(len(images28)), 10)\n sample_images = [images28[i] for i in sample_indexes]\n sample_labels = [labels[i] for i in sample_indexes]\n \n predicted = sess.run([correct_pred], feed_dict = {x: sample_images})[0]\n \n print(sample_labels)\n print(predicted)\n \n #fig = plt.figure(figsize = (10, 10))\n for i in range(len(sample_images)):\n truth = sample_labels[i]\n prediction = predicted[i]\n plt.subplot(5, 2, i + 1)\n plt.axis(\"off\")\n color = \"green\" if truth == prediction else \"red\"\n plt.text(40, 10, \"Truth: {0}\\nPrediction: {1}\".format(truth, prediction), \n fontsize=12, color=color)\n plt.imshow(sample_images[i], cmap=\"gray\")\n\n plt.show()\n \n test_data_dir = os.path.join(ROOT_PATH, 'Testing')\n test_images, test_labels = load_data(test_data_dir)\n # preprocess test images\n test_images28 = preprocess_imgs(test_images)\n # predict labels\n predicted = sess.run([correct_pred], feed_dict = {x: test_images28})[0]\n # Calculate correct matches \n match_count = sum([int(y == y_) for y, y_ in zip(test_labels, predicted)])\n # Calculate the accuracy\n accuracy = match_count / len(test_labels)\n \n # Print the accuracy\n print(\"Accuracy: {:.3f}\".format(accuracy))\n \n sess.close()\n\n# root path to training and test data\nROOT_PATH = r'C:\\Users\\D062271\\Documents\\Data\\Belgium_Traffic_Signs'\ntrain_data_dir = os.path.join(ROOT_PATH, 'Training')\n\n\nimages, labels = load_data(train_data_dir)\nimages28 = preprocess_imgs(images)\n\n#images = np.array(images)\n#labels = np.array(labels)\n\n#print(images.ndim)\n#print(images.size)\n#print(images[0])\n\n# count number of labels\n#print(len(set(labels)))\n\n#plt.hist(labels, 62)\n#plt.show()\n\n#display_imgs(images)\ndisplay_img_per_class(images28, labels)\ntrain_ann(images28, labels)"
}
] | 9 |
allenpark/7k-of-science
|
https://github.com/allenpark/7k-of-science
|
66616f80525c4928cb2781450c787c03b24c1054
|
d55fbff7adc05f460891f1ee23f905296d5173a3
|
121ca3a5af91da1f0a9adcd1930d878cfd98bde7
|
refs/heads/master
| 2021-01-17T01:08:58.206015 | 2013-03-07T00:38:25 | 2013-03-07T00:38:25 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6705882549285889,
"alphanum_fraction": 0.6801470518112183,
"avg_line_length": 24.679244995117188,
"blob_id": "489180ebd8dbb43439cb09fd4e8b7f8b1e1c6552",
"content_id": "b86a379aef380ce9d0702d40f4cbf96de8aa9ee0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1360,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 53,
"path": "/display.php",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "<html>\n\t<?php\n\tinclude('globalsetup.php');\n \tinclude('picasaFunctions.php');\n \t\tsetupHeader();\n \t\t//Could directly pass to JSON...\n \t\t//Or...\n \t\t\n\t?>\n\t\n\n<script language=\"JavaScript1.2\">\n\nvar howOften = <?php printHowOftenAdvance() ?>; //number often in seconds to rotate\nvar current = 0; //start the counter at 0\nvar ns6 = document.getElementById&&!document.all; //detect netscape 6\n\n\n// place your images, text, etc in the array elements here\n\nvar items = new Array();\n\n<?php \n$items = getDisplayDisplayItems();\nprintLiveDisplayItemArray($items);\n ?>\n \nfunction rotater() {\n document.getElementById(\"placeholder\").innerHTML = items[current];\n current = (current==items.length-1) ? 0 : current + 1;\n setTimeout(\"rotater()\",howOftenAdvance);\n}\n\nfunction rotater() {\n if(document.layers) {\n document.placeholderlayer.document.write(items[current]);\n document.placeholderlayer.document.close();\n }\n if(ns6)document.getElementById(\"placeholderdiv\").innerHTML=items[current]\n if(document.all)\n placeholderdiv.innerHTML=items[current];\n\n current = (current==items.length-1) ? 0 : current + 1; //increment or reset\n setTimeout(\"rotater()\",howOftenAdvance);\n}\nwindow.onload=rotater;\n//-->\n</script>\n\n<body id=\"fullscreenbody\">\n\n<layer id=\"placeholderlayer\"></layer><div id=\"placeholderdiv\"></div>\n</body>"
},
{
"alpha_fraction": 0.6033350825309753,
"alphanum_fraction": 0.6204423904418945,
"avg_line_length": 24.217864990234375,
"blob_id": "540ab5f7b8bf213440c50788d468e974560a0206",
"content_id": "e78c5adf16f70b50e435c8984a8d80102f7a6d9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 11574,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 459,
"path": "/globalsetup.php",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "<?php\n\tinclude('config.php');\n\n\t$sqlusername = getSQLUsername();\n\t$sqlpassword = getSQLPassword();\n\t$sqldatabase = getSQLDatabase();\n\t$sqlserver = getSQLServer();\n\t$myname = $_SERVER['SSL_CLIENT_S_DN_CN'];\n\t$myemail = $_SERVER['SSL_CLIENT_S_DN_Email'];\n\t$myusername = substr($myemail, 0, strpos($myemail, \"@\"));\n\t$isResident = 0;\n\t$isAdmin = 0;\n\n\t//print(\"Started global<br/>\");\n\tcheckPrivs();\n\t//print(\"Privs checked<br/>\");\n\t\n\t$userList = getUsers();\n\t\n\t\t\t\n\tcheckForNewUser($userList);\n\t\n\t//showSimmonsUsers(getUsers());\n\nfunction setupHeader() {\n\tprint('<head>\n\t<title>Simmons Display</title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"index.css\">\n\t\n\t<script type=\"text/javascript\" src=\"Libraries/jquery.js\"></script>\n\t<script type=\"text/javascript\" src=\"Libraries/jquery.ui.js\"></script>\n\t<script type=\"text/javascript\" src=\"Libraries/jquery.jcoverflip.js\"></script>\n\t\n\t</head>');\n}\n\nfunction setupPanelHeader() {\n\tprint('<head>\n\t<title>Simmons Display</title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"panel.css\">\n\t\n\t<script type=\"text/javascript\" src=\"Libraries/jquery.js\"></script>\n\t<script type=\"text/javascript\" src=\"Libraries/jquery.ui.js\"></script>\n\t<script type=\"text/javascript\" src=\"Libraries/jquery.jcoverflip.js\"></script>\n\t\n\t</head>');\n}\n\n\nfunction setupDatabase() {\n\tglobal $sqlserver, $sqlusername, $sqlpassword, $sqldatabase;\n\n\t$con = mysql_connect($sqlserver, $sqlusername, $sqlpassword);\n\t\n\tif (!$con) {\n \t\tdie('Could not connect: ' . mysql_error());\n \t}\n \t@mysql_select_db($sqldatabase) or die( \"Unable to select database\");\n}\n\nfunction getUsers() {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM User ORDER BY id\");\n}\n\nfunction getUser($un) {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM User WHERE `username` = '$un'\");\n}\n\nfunction getDisplayItem($id) {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM DisplayItem WHERE `id` = '$id'\");\n}\n\nfunction checkPrivs() {\n\tglobal $myusername, $isAdmin, $isResident;\n\n\tcheckDBConnection();\n\t$stuff = mysql_query(\"SELECT `isAdmin` , `isResident` FROM `User` WHERE `username` = '$myusername'\");\n\n\twhile($row = mysql_fetch_array($stuff))\n\t {\n\t\t $isAdmin = $row['isAdmin'];\n\t\t $isResident = $row['isResident'];\t\t \n\t }\n\t\n\treturn;\n}\n\nfunction getDisplayItems() {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM DisplayItem ORDER BY timeCreated DESC\");\n}\n\nfunction getDisplayItemsOf($un) {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM DisplayItem WHERE ownerUsername='\" . $un . \"' ORDER BY timeCreated DESC\");\n}\n\nfunction getDashDisplayItems() {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM DisplayItem WHERE onDash='1' ORDER BY timeCreated DESC\");\n}\n\nfunction getDisplayDisplayItems() {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM DisplayItem WHERE onDisplay='1' ORDER BY timeCreated DESC\");\n}\n\nfunction getDashboardDisplayItems() {\n\tcheckDBConnection();\n\treturn mysql_query(\"SELECT * FROM DisplayItem WHERE onDash='1' ORDER BY timeCreated DESC\");\n}\n\n\n\nfunction checkDBConnection() {\n\tglobal $con;\n\tif($con == NULL) {\n\t\tsetupDatabase();\n\t}\n}\n\nfunction closeDatabase() {\n\tmysql_close($con);\n}\n\nfunction checkForNewUser($users) {\n\tglobal $myusername, $myname;\n\tcheckDBConnection();\n\t$users = mysql_query(\"SELECT * FROM User WHERE username='\" . $myusername . \"'\");\n\n\twhile($row = mysql_fetch_array($users))\n\t {\n\t\t //echo \"\" . $row['username'] . \"<br/>\";\n\t\t if($myusername == $row['username']) {\t\n\t\t \t//echo(\"Gotcha\");\t\n\t\t \treturn;\n\t\t } \n\t \n\t }\n \tprint('<script> window.alert(\"Welcome to the Simmons Display Database for the first time, ' . $myname .'!\"); </script>');\n \t\n \t\n \t//print('<script> window.alert(\"Is it alright if I just call you ' . substr($myname, 0, strpos($myname, \" \")) .'? Thanks bro.\"); </script>');\n \t\n \t$search = $myusername;\n\t$string = file_get_contents(\"residents.txt\");\n\t$string = explode(\"\\n\", $string); // \\n is the character for a line break\n\tif(in_array($search, $string)){\n\t\t$isResident = 1;\n\t\t\n\t\tprint('<script> window.alert(\"You are listed as a resident, congratulations\"); </script>');\n\t\t\n\t\t//print(\"You are so a resident\");\n\t} else {\n\t\t$isResident = 0;\n\t\tprint('<script> window.alert(\"Watchu doing? You are not listed as a resident...\"); </script>');\n\t\t\n\t\t//print(\"Watchu doing? You are not listed as a resident...\");\n\t}\n \t\n \t\n\tcreateNewUser($isResident);\n}\n\nfunction createNewUser($isRes) {\n\tglobal $myname, $myusername, $isAdmin, $isResident;\n\t\n\tcheckDBConnection();\n\t$users = mysql_query(\"INSERT INTO User(username, name, isAdmin, isResident) VALUES('$myusername', '$myname', 0, $isRes)\");\n\t\n}\n\nfunction createNewDisplayItem($file, $thumb) {\n\tglobal $myusername;\n\t\n\tcheckDBConnection();\n\t\n\t$myquery = \"INSERT INTO DisplayItem(ownerUsername, fileLocation, thumbLocation, onDisplay, onDash, duration, doesExist) VALUES('$myusername', '$file', '$thumb', 0, 0, 60, 1)\";\n\t\n\t$display = mysql_query($myquery);\n\t\n\tcloseAndReloadParent();\t\n}\n \nfunction getMultiCheckboxText($un, $category, $value) {\n\n\t$text = '<input type=\"checkbox\" name=\"' . $category . '[]\" ';\n\t$text = $text . 'value=\"' . $un . '\" ';\n\t\n\tif($value == 1) {$text = $text . 'checked=\"checked\"';} \n\t$text = $text . ' />';\n\t//$text = $text . \"My value is \" . $value;\n\treturn $text;\n}\n\nfunction getCheckboxText($category, $value) {\n\n\t$text = '<input type=\"checkbox\" name=\"' . $category . '\" ';\n\t$text = $text . 'value=\"' . '1' . '\" ';\n\t\n\tif($value == 1) {$text = $text . 'checked=\"checked\"';} \n\t$text = $text . ' />';\n\t//$text = $text . \"My value is \" . $value;\n\treturn $text;\n}\n\nfunction editUser($name, $username, $isAdmin, $isResident) {\n\tcheckDBConnection();\n\t\n\t$myquery = \"UPDATE User SET name='$name', isAdmin='$isAdmin', isResident='$isResident' WHERE username = '$username'\";\n\t\n\t$display = mysql_query($myquery);\n\t//print($display);\n}\n\nfunction editDisplayItem($editId, $editOnDisplay, $editOnDashboard) {\n\tcheckDBConnection();\n\t\n\t$myquery = \"UPDATE DisplayItem SET onDisplay='$editOnDisplay', onDash='$editOnDashboard' WHERE id = '$editId'\";\n\t\n\t$display = mysql_query($myquery);\n\n}\n\nfunction showDisplayItemTable($result) {\n\n\tprint(\"<head>\n\t<title>Display Items</title>\n\t</head>\");\n\techo (\"Displays\");\n\techo \"<table border='1'>\n\t<tr>\n\t<th>Id</th>\n\t<th>ownerUsername</th>\n\t<th>fileLocation</th>\n\t<th>thumbLocation</th>\n\t<th>OnDisplay</th>\n\t<th>Duration</th>\n\t<th>doesExist</th>\n\t<th>timeCreated</th>\n\t</tr>\";\n\t\n\twhile($row = mysql_fetch_array($result))\n\t {\n\t echo \"<tr>\";\n\t echo \"<td>\" . $row['id'] . \"</td>\";\n\t echo \"<td>\" . $row['ownerUsername'] . \"</td>\";\n\t echo \"<td>\" . $row['fileLocation'] . \"</td>\";\n\t echo \"<td>\" . $row['thumbLocation'] . \"</td>\";\n\t echo \"<td>\" . getTextOf($row['onDisplay']) . \"</td>\";\n\t echo \"<td>\" . $row['duration'] . \"</td>\";\n\t echo \"<td>\" . getTextOf($row['doesExist']) . \"</td>\";\n\t echo \"<td>\" . $row['timeCreated'] . \"</td>\";\n\t \n\t echo \"</tr>\";\n\t }\n\techo \"</table>\";\n\t\n\t//var_dump($user);\n }\n\nfunction getTextOf($binaryValue) {\n\tif($binaryValue) { \n\t\treturn \"Yes\"; \n\t} else {\t\n\t\treturn \"No\";\n\t} \n}\n\nfunction closeAndReloadParent() {\n\t//return;\n\tprint(\"<br/>Successfully inserted!\");\n\t\n\t\n\tprint(\"<script type=\\\"text/javascript\\\">\n\t\t opener.location.href = unescape(opener.location.pathname);\n\t\t //opener.location.reload(true);\n\t\t self.close();\n\t\t</script>\");\n\t//\t*/\n\t\n\t//window.location = location.href;\n\t\n\t/*\n\tprint(\"<script type=\\\"text/javascript\\\">\n\t\t opener.location.reload(true);\n\t\t self.close();\n\t\t</script>\");\n\t//\t*/\n\t\t\n}\n\nfunction showSliderUserPhotos($result) {\n\tglobal $myusername;\n\n\n\twhile($row = mysql_fetch_array($result))\n\t\t {\n\t\n\t\t$url = 'picLookup.php?picId=' . $row['id'];\n\t\t\n\t\techo '<li>';\n\n\t\t\n\t\t//Image\n\t\techo '<img src=\"' . $row['thumbLocation'] . '\" alt=\"Pic\" />\n\t\t';\n\t\techo '<span class=\"title\">';\n\t\techo '<a href=\"' . $url . '\" class=\"editLink\" \n\t\t';\n\t\t\n\t\t//Link javascript popup\n\t\techo 'onclick=\"javascript:void window.open(\\'' . $url . '\\',\\'1352593439021\\',\\'width=400,height=350,toolbar=0,menubar=0,location=0,status=1,scrollbars=0,resizable=1,left=0,top=0\\');return false;\">';\n\t\t\n\t\techo \"Edit \" . $row['title'] . \"</a>\n\t\t\n\t\t\";\n\t\t\n\t\techo \"\";\n\t\t//Insert title here\n\t\t\n\t\t\n\t\techo'</span></li>';\n\t\n\t }\n}\n\nfunction showUserPhotos($result) {\n\tglobal $myusername;\n\nwhile($row = mysql_fetch_array($result))\n\t {\n\t //echo \"<tr>\";\n\t //echo \"<td>\" . $row['id'] . \"</td>\";\n\t //echo \"<td>\" . $row['ownerId'] . \"</td>\";\n\t //echo \"<td>\" . $row['fileLocation'] . \"</td>\";\n\n\t$url = 'picLookup.php?picId=' . $row['id'];\n\t//print($url);\n\t\n\techo '<a href=\"' . $url . '\" onclick=\"javascript:void window.open(\\'' . $url . '\\',\\'1352593439021\\',\\'width=500,height=350,toolbar=0,menubar=0,location=0,status=1,scrollbars=0,resizable=1,left=0,top=0\\');return false;\">';\n\t echo '<img src=\"' . $row['thumbLocation'] . '\" alt=\"Pic\" draggable=\"true\" ondragstart=\"dragIt(event);\" >';\n\t\n\techo('</a>');\n\t\n\t//echo '<input type=\"image\" src=\"' . $row['thumbLocation'] . '\" name=\"image\">';\n\t \n\t //echo \"<td>\" . getTextOf($row['onDisplay']) . \"</td>\";\n\t //echo \"<td>\" . $row['duration'] . \"</td>\";\n\t //echo \"<td>\" . getTextOf($row['doesExist']) . \"</td>\";\n\t //echo \"<td>\" . $row['timeCreated'] . \"</td>\";\n\t \n\t //echo \"</tr>\";\n\t }\n\t\n\t\n\t//var_dump($user);\n\n\t\n }\n\n\nfunction getLink($text, $url) {\n\treturn '<a href=\"' . $url . '\">' . $text . '</a> ';\n}\n\nfunction getPopupLink($text, $url) {\n\treturn '<a class=\"popup\" href=\"' . $url . '\" onclick=\"javascript:void window.open(\\'' . $url . '\\',\\'1352593439021\\',\\'width=300,height=200,toolbar=0,menubar=0,location=0,status=1,scrollbars=0,resizable=1,left=0,top=0\\');return false;\">' . $text . '</a>';\n}\n\n\nfunction getBinaryImage($value) {\n\tif($value) {\n\t\treturn '<img src=\"img/yes_check.png\" alt=\"Yes\" height=\"10\" width=\"10\">';\n\t} else {\n\t\treturn '<img src=\"img/no_check.gif\" alt=\"No\" height=\"10\" width=\"10\">';\t\n\t}\n}\n\nfunction printLiveDisplayItemArray($result) {\n\t$counter = 0;\n\n\twhile($row = mysql_fetch_array($result)) {\n\n\t\t$width = 7280;\n\t\t$height = 1920;\n\n\t\t echo \"items[\" . $counter . ']=\"';\n\t\t echo \"<a href='' ><img alt='Please contact Simmons Tech'\";\n\t\t echo \" src='\" . $row['fileLocation'] . \"' height='\" . $height . \"' width='\" . $width . \"' border='0' />\";\n\t\t echo'</a>\";\n\t \n\t\t ';\n\n\t$counter++;\n\t }\n\n}\n\nfunction printLiveDisplaySection($result, $section) {\n$counter = 0;\n\n\twhile($row = mysql_fetch_array($result))\n\t {\n\t\t//Print the piece in the rotating image section\n\t\t\n\t\t$width = 7280;\n\t\t$height = 1920;\n\t\t\n\t\techo \"items[\" . $counter . ']=\"';\n\t\techo \"<img id='s\" . $section . \"' alt='Please contact Simmons Tech'\";\n\t\techo \" src='\" . $row['fileLocation'] . \"' height='\" . $height . \"' width='\" . $width . \"' border='0' opacity='0'/>\";\n\t\techo'\";\n\t\t\n\t\t';\n\t\t\n\t\t$counter++;\n\t }\n\n}\n\nfunction printHowOftenAdvance() {\n\techo \"30000\";\n\t//Every 30 seconds\n}\n\nfunction printHowOftenUpdate() {\n\techo \"600000\";\n\t//Every 10 minutes\n}\n\nfunction printControlButtons() {\n\t\tprint('<div id=\"controlButtons\">');\n\t\tprint(\"\t<a href=\\\"addPicPopup.php\\\" onclick=\\\"javascript:void window.open('addPicPopup.php','1352593439001','width=500,height=150,toolbar=0,menubar=0,location=0,status=1,scrollbars=0,resizable=1,left=0,top=0');return false;\\\">\");\n\t\tprint('<img src=\"http://aux.iconpedia.net/uploads/1331050018396872710.png\" alt=\"Add Image\" height=\"42\" width=\"42\"> ');\n\t\tprint(\"</a>\");\n}\n\nfunction printAdminStatus() {\n\tglobal $isAdmin, $isResident;\n\tcheckPrivs();\n\t\n\tif(isset($isAdmin) && $isAdmin) {\n\t\tprint(\"<h3>Welcome back, Admin!</h3>\n\t\t\");\n\t} else if(isset($isResident) && $isResident) {\n\t\tprint(\"<h3>Welcome back, Simmmons Resident!</h3>\n\t\t\");\n\t} else {\n\t\tprint(\"Welcome, Guest!\n\t\t\");\n\t}\n\t\n}\n\n?>"
},
{
"alpha_fraction": 0.7488889098167419,
"alphanum_fraction": 0.7599999904632568,
"avg_line_length": 36.5,
"blob_id": "fdb818ed3a880cf4227707ae299f6ab7a804dd2f",
"content_id": "080c18f725d49258c9b98661acbd73fd20c7c22f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 450,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 12,
"path": "/README.md",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "7k-of-science\n=============\nMost Important Files:\n\ndisplayMaster = Main page, view and sort others\nmods = Pretty UI management for users and permissions\ns1 = individual panel, specify which by setting screen to a number 1-12 with $_GET\npicasaFunctions = manages Picasa interaction, only necessary when storing a photo\nglobalsetup = Common functions, ex. database access, outputting photos, etc\n\n\n***Contact Ada Taylor for the mySQL/Picasa config file***\n"
},
{
"alpha_fraction": 0.5667299032211304,
"alphanum_fraction": 0.5755850672721863,
"avg_line_length": 19.02531623840332,
"blob_id": "4407413fd9574bffe0be93e38ce929d8337ef6e8",
"content_id": "c4363fd701cd940b22c7d2d18f49aefa58d26f28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1581,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 79,
"path": "/showPicsClone.php",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "<html>\n\t<?php\n//\tphpinfo();\n//\texit();\n\t\n\tinclude('globalsetup.php');\n \tinclude('picasaFunctions.php');\n \t\tsetupHeader();\n\t?>\n\t\n\t<script>\n\t \n\t //alert(\"Preshifted\");\n\t \n\t //$(window).load(function() {\n\t $(document).ready(function() {\n\t \t$('#flip').jcoverflip();\n\t \t$('#flip2').jcoverflip();\n\t \t$('#flip3').jcoverflip();\n\t \n\t \t//alert(\"midshifted\");\n\t });\n\t \n\t //alert(\"Postshifted\");\n\t \n\t</script>\n\t\n\t<body>\n\t\n\t<h2>Simmons 7k Display Manager</h2>\n\t\n\t<?php\n\tprintAdminStatus();\n\t\t\n\tprintControlButtons();\n\t\n\t//Editing button options\n\t\tif($isAdmin) {\n\t\t\t//Add admin controls\n\t\t\t//print('<div id=\"place2\" ondrop=\"dropIt(event);\" ondragover=\"event.preventDefault();\">');\n\t\t\t//print(\"<b>Trash</b>\");\n\t\t\t//print(\"</div>\");\t\n\t\t}\n\t\t\n\t\tprint(\"</div>\n\t\t<br/><br/>\");\n\t\techo '<div id=\"wrapper1\">\n\t\t\t<div id=\"label1\" class=\"label\">Active Photos</div>\n \t\t<ul id=\"flip\" class=\"theslider\">';\n\t\tshowSliderUserPhotos(getDisplayDisplayItems());\n \techo '</ul>\n\t </div>';\n\t //showUserPhotos(getActiveDisplayItems());\n\t\n\t \t//showUserPhotos(getDisplayItemsOf($myusername));\n\t \t\n\t \techo '<div id=\"wrapper2\">\n\t \t\t<div id=\"label2\" class=\"label\">My Photos</div>\n \t\t<ul id=\"flip2\" class=\"theslider\">';\n\t\tshowSliderUserPhotos(getDisplayItemsOf($myusername));\n \techo '</ul>\n\t </div>';\n\t\t\n\t \t//showUserPhotos(getDisplayItems());\n\t \techo '<div id=\"wrapper3\">\n\t \t<div id=\"label3\" class=\"label\">All Photos</div>\n \t\t<ul id=\"flip3\" class=\"theslider\">';\n\t\tshowSliderUserPhotos(getDisplayItems());\n \techo '</ul>\n\t </div>';\n\t \n\t ?>\n\t\n\t<br/>\n\t\n\t\n\t\n\t</body>\n</html>"
},
{
"alpha_fraction": 0.4113873243331909,
"alphanum_fraction": 0.6535685658454895,
"avg_line_length": 30.174999237060547,
"blob_id": "76d097bc5620c63ab6b5009a475f6a1d575a010f",
"content_id": "ed81d238f8ca937db562499b84074576578bcac0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1247,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 40,
"path": "/generateCSS.py",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "def getWidthHeight(name, x, y, width, height, newWidth, newHeight):\n #\" 01.png\",0,67,500,893,720,1280\n ratioX = newWidth/width\n ratioY = newHeight/height\n\n offsetX = x*(ratioX)*-1\n offsetY = y*(ratioY)*-1\n\n #width: 7280px;\n #height: 1920px;\n \n finalHeight = (1920)*ratioX\n finalWidth = (7280)*ratioY\n\n print \"#s\" + int(name[1:3]) + \" {\"\n \n print \"\\tposition: fixed;\"\n\n print \"\\ttop: \" + str(offsetY) + \"px;\"\n print \"\\tleft: \" + str(offsetX) + \"px;\"\n\n print \"\\twidth: \" + str(finalWidth) + \"px;\"\n print \"\\theight: \" + str(finalHeight) + \"px;\"\n\n print \"}\"\n print \"\"\n\n\ngetWidthHeight(\" 01.png\",0,67,500,893,720,1280)\ngetWidthHeight(\" 02.png\",580,71,700,1245,720,1280)\ngetWidthHeight(\" 03.png\",1376,466,1511,850,1920,1080)\ngetWidthHeight(\" 04.png\",45,1420,893,500,1280,720)\ngetWidthHeight(\" 05.png\",1029,1420,893,500,1280,720)\ngetWidthHeight(\" 06.png\",2001,1420,893,500,1280,720)\ngetWidthHeight(\" 07.png\",2995,0,1080,1920,1080,1920)\ngetWidthHeight(\" 08.png\",4181,4,700,1245,720,1280)\ngetWidthHeight(\" 09.png\",4978,675,700,1245,720,1280)\ngetWidthHeight(\" 10.png\",5777,91,500,893,720,1280)\ngetWidthHeight(\" 11.png\",6375,93,893,500,1280,720)\ngetWidthHeight(\" 12.png\",5769,1067,1511,850,1920,1080)\n"
},
{
"alpha_fraction": 0.5794804096221924,
"alphanum_fraction": 0.5812417268753052,
"avg_line_length": 21.465347290039062,
"blob_id": "83f9b4bcbcae760395493f7db085a7902817d391",
"content_id": "fee5f9cc971d0e2dca8d13e2ee7cb1456a1daf0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 2271,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 101,
"path": "/modadmin.php",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "<?php\n$nl = \"<br/>\";\n$myname = $_SERVER['SSL_CLIENT_S_DN_CN'];\n$myemail = $_SERVER['SSL_CLIENT_S_DN_Email'];\n$myusername = substr($myemail, 0, strpos($myemail, \"@\"));\n\nfunction writeChanges($changed) {\n $changed = \"test\";\n\t$myFile = \".ltaccess.mit\";\n $fh = fopen($myFile, 'w') or die(\"Can't open file\");\n\tfwrite($fh, $changed);\n fclose($fh);\n}\n\nfunction getCurrentAuthsString() {\n $filename = '.htaccess.mit';\n $string = \"Require user \";\n\n $fh = fopen($filename, \"r\");\n $original = fread($fh, filesize($filename));\n fclose($fh);\n\n $current = substr($original, strpos($original, \"\\n\"));\n $current = substr($current, 14);\n $current = substr($current, 0, strpos($current, \"\\n\"));\n return $current;\n}\n\nfunction getCurrentAuthsArray() {\n return explode(\" \", getCurrentAuthsString());\n}\n\n\necho(\"Welcome, \" . $myname . \"<br/>\");\n\n$auths = getCurrentAuthsArray();\n\nif(in_array($myusername, $auths)) {\n\techo(\"<br/>You have admin priviledges! <br/>\");\n} else {\n\techo(\"<br/>You are not an admin, sorry bro. <br/>\");\n\texit();\t\n}\n\n\nprint(\"Authenticated users are: <br/>\");\n\n#var_dump($current);\n#print(\"<br/>\");\n#var_dump($authusers);\n#exit();\nprint(\"<form action=\\\"modadmin.php\\\" method=\\\"post\\\">\");\n\nforeach($auths as $user) {\n print(\"<input type=\\\"checkbox\\\" checked=\\\"checked\\\" name=\\\"check$user\\\"> $user </input>\");\n print(\"<br/>\");\n # print(\"$user . \"</li>\");\n}\n#print(\"<input type=\\\"submit\\\" />\");\nprint(\"</form>\");\n\n\nprint(\"<br/>\");\nprint(\"Add an admin? Type name below and press submit\");\n\n?>\n\n<form action=\"modadmin.php\" method=\"post\">\nUsername: <input type=\"text\" name=\"newuname\" />\n<input type=\"submit\" />\n</form>\n\n<?php \n$auths = getCurrentAuthsArray();\n$newauths = \"\";\nforeach($auths as $user) {\n $checkid = \"check\" . $user;\n if(isset($_POST[$checkid])) {\n#\tprint $checkid;\t\n\t$newauths = \" \" + $checkid;\t\n\t}\n else {\n#\tprint \"Unchecked\";\n }\n writeChanges($newauths);\n\n}\n\nif(isset($_POST[\"newuname\"])) {\n\t$newuname = $_POST[\"newuname\"];\n\n\t#print(\"</br>Old List: \" . $current);\n\t$newcurrent = getCurrent() . \" \" . $newuname;\n\t#print(\"<br/>New List: \" . $newcurrent);\n\n\t$output = str_replace($current, $newcurrent, $original);\n\twriteChanges($output);\n\n}\n\n?>\n\n\n"
},
{
"alpha_fraction": 0.6084656119346619,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 10.875,
"blob_id": "bddb216819237c24abafe423dd300750651002a3",
"content_id": "2ab52bc64e6ed178f67cd79b9932694f483f071b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 189,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 16,
"path": "/sandbox.php",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "<script>\n\n\nfunction loopAdvance() {\n\tif (navigator.onLine) {\n\t alert('ONLINE!');\n\t} else {\n\t alert('Connection flaky');\n\t}\n\n\tsetTimeout(\"loopAdvance()\",5000);\n}\n\nloopAdvance();\n\n</script>"
},
{
"alpha_fraction": 0.5569842457771301,
"alphanum_fraction": 0.5938047766685486,
"avg_line_length": 21.233766555786133,
"blob_id": "0208e54d8aaccb0eded243609ee6aacb1e773be6",
"content_id": "292192c4d640d9166207a71b8449ac9283064190",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1711,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 77,
"path": "/DEVshowPics.php",
"repo_name": "allenpark/7k-of-science",
"src_encoding": "UTF-8",
"text": "<html>\n\t<?php\n\tinclude('globalsetup.php');\n \tinclude('picasaFunctions.php');\n \t\tsetupHeader();\n\t?>\n\t\n\t\n\t\n\t<script>\n\t $(function(){\n\t\t$('#flip').jcoverflip();\n\t \t$('#flip2').jcoverflip();\n\t \t$('#flip3').jcoverflip();\n\t });\n\t \n\t \n\t</script>\n\t\n\t<body>\n\t\n\t<h3>Pictures</h3>\n\t\n\t<?php\n\t\t\n\t\tprint('<div id=\"controlButtons\">');\n\t\tprint(\"\t<a href=\\\"addPicPopup.php\\\" onclick=\\\"javascript:void window.open('addPicPopup.php','1352593439001','width=500,height=350,toolbar=0,menubar=0,location=0,status=1,scrollbars=0,resizable=1,left=0,top=0');return false;\\\">\");\n\t\tprint('<img src=\"http://aux.iconpedia.net/uploads/1331050018396872710.png\" alt=\"Add Image\" height=\"42\" width=\"42\"> ');\n\t\t\n\t\t\n\t\tprint(\"</a>\n\t<br/><br/>\");\n\t\n\t//Editing button options\n\t\tif($isAdmin) {\n\t\t\t//Add admin controls\n\t\t\t//print('<div id=\"place2\" ondrop=\"dropIt(event);\" ondragover=\"event.preventDefault();\">');\n\t\t\t//print(\"<b>Trash</b>\");\n\t\t\t//print(\"</div>\");\t\n\t\t}\n\t\t\n\t\tprint(\"</div>\");\n\t\t\n\t\techo(\"<h3>Active Photos</h3>\");\n\t\t\n\t\techo '<div class=\"wrapper\">\n \t\t<ul id=\"flip\" class=\"theslider\">';\n\t\tshowSliderUserPhotos(getActiveDisplayItems());\n \techo '</ul>\n\t </div>';\n\t //showUserPhotos(getActiveDisplayItems());\n\t\n\t\techo(\"<h3>My Photos</h3>\");\n\t \t//showUserPhotos(getDisplayItemsOf($myusername));\n\t \t\n\t \techo '<div class=\"wrapper\">\n \t\t<ul id=\"flip2\" class=\"theslider\">';\n\t\tshowSliderUserPhotos(getDisplayItemsOf($myusername));\n \techo '</ul>\n\t </div>';\n\t\t\n\t\techo(\"<h3>All Photos</h3>\");\n\t \t//showUserPhotos(getDisplayItems());\n\t \techo '<div class=\"wrapper\">\n \t\t<ul id=\"flip3\" class=\"theslider\">';\n\t\tshowSliderUserPhotos(getDisplayItems());\n \techo '</ul>\n\t </div>';\n\t \n\t ?>\n\t\n\t<br/>\n\t\n\t\n\t\n\t</body>\n</html>"
}
] | 8 |
beka312/online-store
|
https://github.com/beka312/online-store
|
95d1a78813d0296bbb8e32127767870181a383d3
|
60b1789b1351c0878ef606c0bfd7faf30305a37d
|
a9ba0d8c73cabc5bddf838068a9e5e835b2133d2
|
refs/heads/master
| 2023-01-18T21:36:53.986154 | 2020-11-24T09:35:30 | 2020-11-24T09:35:30 | 315,582,981 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5488505959510803,
"alphanum_fraction": 0.6034482717514038,
"avg_line_length": 19.47058868408203,
"blob_id": "72700f126c481f4e56d854d483a406d7a4e4c1a8",
"content_id": "9c745504e87958e930c474ce5e42b153fee7344a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 348,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 17,
"path": "/mysite/migrations/0003_auto_20201124_1058.py",
"repo_name": "beka312/online-store",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.3 on 2020-11-24 04:58\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mysite', '0002_notebookproduct_smartphone'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='NotebookProduct',\n new_name='Notebook',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5243055820465088,
"alphanum_fraction": 0.5902777910232544,
"avg_line_length": 17,
"blob_id": "22f371bcc2c69a42e8944cc93c9bcdfc49d35192",
"content_id": "6919d465987b61d8cb2aa836b11e891bb110adcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 16,
"path": "/mysite/migrations/0005_delete_somemodel.py",
"repo_name": "beka312/online-store",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.3 on 2020-11-24 08:00\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mysite', '0004_somemodel'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='SomeModel',\n ),\n ]\n"
}
] | 2 |
Tim-Barton/gogs-migration
|
https://github.com/Tim-Barton/gogs-migration
|
723c1c8ca8c04b2da8c62b49120d75669f7d72d0
|
1bf3b297f3b0c88b04dec1e1a98bc1861106c76a
|
d64b4627803668e316d16e6a2ea66100da6e6029
|
refs/heads/master
| 2020-03-10T00:39:31.724744 | 2018-11-06T16:20:34 | 2018-11-06T16:20:34 | 129,087,294 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7309941649436951,
"alphanum_fraction": 0.7378167510032654,
"avg_line_length": 34.379310607910156,
"blob_id": "aa4dbdf1c6752ab3fdec1209a521c04ac68e05a5",
"content_id": "f245cb34ba257ca338afb10a6297dece1aa897f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1026,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 29,
"path": "/README.md",
"repo_name": "Tim-Barton/gogs-migration",
"src_encoding": "UTF-8",
"text": "# gogs-migration\nMigrating projects from Gitlab to Gogs - both repos and wikis (eventually)\n\nThis was developed to migrate from Gitlab 7.X to Gogs v0.11 via a mix of API references and experimentation.\nYour usage for different versions may vary\n\n## Prerequisites\n\n* Python 3.X ( I'm using 3.7 - your usage may vary)\n * The scripts expect /usr/bin/python to be this version as per the usage below\n* PyCurl installed\n* GitPython installed\n* SSH Keys set up for Gitlab & Gogs, such that a `git clone repo_url` will succeed (i.e. it's the default key )\n * Ensure that you have SSH cloned or otherwise interacted with the server to the key fingerprint has been saved\n\n## Can Do\nRead the project list from Gitlab \nCreate new project in Gogs under the specified owner\nMigrate the code\n\n## Cant do yet\nMigrate the issues\nMigrate the Wiki\n\n## Usage\n./main.py -?\nusage: Migrate projects (repos & Wikis) from Gitlab to Gogs\n [-h] [-glp GITLABPAT] [-gl GITLABURL] [-go GOGSURL] [-goo GOGSOWNER]\n [-gop GOGSPAT] [-tmp TEMPDIR]\n"
},
{
"alpha_fraction": 0.5927977561950684,
"alphanum_fraction": 0.5966758728027344,
"avg_line_length": 38.239131927490234,
"blob_id": "93a99f9240de9dab18bfec236e6c96716e113b47",
"content_id": "436e1a23d0c9cdc685b039d2b33ff7f73555402f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1805,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 46,
"path": "/gogsclient.py",
"repo_name": "Tim-Barton/gogs-migration",
"src_encoding": "UTF-8",
"text": "from curl import CurlClient\nimport json\n\nclass GogsClient:\n\n def __init__(self, args):\n self.url = args.gogsurl + \"/api/v1/\"\n self.pat = args.gogspat\n self.curlClient = CurlClient()\n self.curlClient.addStaticHeader( \"Authorization: token {}\".format(self.pat))\n \n def getAllProjects(self):\n status, projectsJson = self.curlClient.Get(self.url + \"user/repos\")\n return json.loads(projectsJson)\n \n def getAllProjectsForOwner(self, owner=None):\n if owner is not None:\n status, projectsJson = self.curlClient.Get(self.url + \"orgs/{}/repos\".format(owner))\n else:\n status, projectsJson = self.curlClient.Get(self.url + \"user/repos\")\n if status == 200:\n return json.loads(projectsJson)\n else:\n return []\n \n def ownerOrgExists(self, owner):\n status, orgsJson = self.curlClient.Get(self.url + \"user/orgs\")\n orgs = json.loads(orgsJson)\n return owner in [org[\"username\"] for org in orgs]\n \n def transformShortName(self, fullname):\n return fullname[fullname.find('/'):]\n \n def createProject(self, name, description, private, owner=None):\n projectInfo = { \"name\": name,\n \"description\" : description,\n \"private\" : private}\n projectInfoJson = json.dumps(projectInfo)\n if owner is not None:\n status, returnJson = self.curlClient.Post(self.url + \"org/{}/repos\".format(owner), projectInfoJson)\n else:\n status, returnJson = self.curlClient.Post(self.url + \"user/repos\", projectInfoJson)\n if status == 201:\n return True, json.loads(returnJson)\n else:\n return False, json.loads(returnJson)[\"message\"]\n"
},
{
"alpha_fraction": 0.5836206674575806,
"alphanum_fraction": 0.5931034684181213,
"avg_line_length": 27.268293380737305,
"blob_id": "286e1e2b864d06f83ff162f80466b9c3add415cd",
"content_id": "31f12084b560cf58cc7eb08cdcad809049e6de64",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 41,
"path": "/curl.py",
"repo_name": "Tim-Barton/gogs-migration",
"src_encoding": "UTF-8",
"text": "\nimport pycurl\nfrom io import BytesIO\nfrom urllib.parse import urlencode\n\n\nclass CurlClient:\n\n def __init__(self):\n self.statichttpheaders = []\n\n def addStaticHeader(self, header):\n self.statichttpheaders.append(header)\n\n def Get(self, url, headers=[]):\n buffer = BytesIO()\n c = pycurl.Curl()\n c.setopt(c.URL, url)\n c.setopt(c.WRITEDATA, buffer)\n c.setopt(c.HTTPHEADER, self.statichttpheaders + headers)\n c.perform()\n status = c.getinfo(pycurl.HTTP_CODE)\n c.close()\n\n body = buffer.getvalue()\n return status, body.decode('iso-8859-1')\n\n def Post(self, url, data, headers=[]):\n buffer = BytesIO()\n c = pycurl.Curl()\n c.setopt(c.URL, url)\n c.setopt(c.WRITEDATA, buffer)\n c.setopt(c.HTTPHEADER, self.statichttpheaders + headers + ['Content-Type: application/json'])\n c.setopt(c.POST, 1)\n c.setopt(c.POSTFIELDS, data)\n #c.setopt(c.VERBOSE, True)\n c.perform()\n status = c.getinfo(pycurl.HTTP_CODE)\n c.close()\n\n body = buffer.getvalue()\n return status, body.decode('iso-8859-1')\n"
},
{
"alpha_fraction": 0.7193155884742737,
"alphanum_fraction": 0.7203027606010437,
"avg_line_length": 40.6301383972168,
"blob_id": "9fa5e7a0ce77470a886c6821c676e47ab281d684",
"content_id": "00075ee16667c41ecc189293d164617c0c655861",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3039,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 73,
"path": "/main.py",
"repo_name": "Tim-Barton/gogs-migration",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python\n\nimport argparse\nimport sys\nfrom gitlabclient import GitlabClient\nfrom gogsclient import GogsClient\nimport os\nimport git\n\ndef gogsIsPrivate(public, visibility_level):\n if public:\n return False\n elif visibility_level == 10: #found from experimentation that my 'Internal' repos have this value\n return False\n else:\n return True\n\nparser = argparse.ArgumentParser(\"Migrate projects (repos & Wikis) from Gitlab to Gogs\")\nparser.add_argument('-glp', '--gitlabpt', help=\"The Private Token for the Gitlab user\")\nparser.add_argument('-gl', '--gitlaburl', help=\"The base url of the gitlab instance\")\nparser.add_argument('-go', '--gogsurl', help=\"The base url of the gogs instance\")\nparser.add_argument('-goo', '--gogsowner', help=\"The owner on gogs that all the projects to be migrated to\")\nparser.add_argument('-gop', '--gogspat', help=\"The Personal Access Token (PAT) for the Gogs user\")\nparser.add_argument('-tmp', \"--tempdir\", default=\"/tmp\", help=\"Directory to use for temporary storage of artifacts during migration\")\nargs = parser.parse_args()\n\ngitlab = GitlabClient(args)\ngogs = GogsClient(args)\n\nprint(\"Checking prerequisites\")\n\nif args.gogsowner is not None and not gogs.ownerOrgExists(args.gogsowner):\n print(\"You need to create organisation/user {} in order to proceed with migration\".format(args.gogsowner))\n sys.exit(1)\n\nprint(\"Calculating Migration context\")\n\ngitlabProjects = gitlab.getAllGroupProjects() #ignoring user's personal projects\ngogsProjects = gogs.getAllProjectsForOwner(args.gogsowner)\ngogsProjectNames = [gogs.transformShortName(project[\"full_name\"]) for project in gogsProjects ]\nmigrateProjects = [project for project in gitlabProjects if project[\"name\"] not in gogsProjectNames ]\n\n#for project in migrateProjects:\n #print(project[\"name\"], project[\"public\"], project[\"visibility_level\"])\n\n#print(migrateProjects)\n\nprint(\"Starting Migration\")\nfor project in migrateProjects:\n print(\"Migrating {} as {}\".format(project[\"name\"], project[\"path\"]))\n success, gogsProject = gogs.createProject(project[\"path\"], project[\"description\"], gogsIsPrivate(project[\"public\"], project[\"visibility_level\"]), args.gogsowner)\n if not success:\n print(\"Unable to create {}: {}\\n\".format(project[\"name\"],gogsProject))\n continue # skip any more processing if we can't create the project\n print(\"\\tMigrating git repo\")\n #migrate git repo\n tempdir = args.tempdir + \"/\" + project[\"name\"]\n if not os.path.exists(tempdir):\n os.mkdir(tempdir)\n repo = git.Repo.clone_from(project[\"ssh_url_to_repo\"], tempdir, bare=True)\n \n print(gogsProject[\"ssh_url\"])\n gogsRemote = repo.create_remote(\"Gogs\", gogsProject[\"ssh_url\"])\n gogsRemote.push(None, None, mirror=True)\n os.rmdir(tempdir)\n print(\"\\tMigrating issues - not yet implemented\")\n #migrate issues\n print(\"\\tMigrating wiki - not yet implemented\")\n #migrate wiki\n print(\"Migrated {} successfully\\n\".format(project[\"name\"]))\n pass\n \nprint(\"Migration Complete\")\n"
},
{
"alpha_fraction": 0.6632782816886902,
"alphanum_fraction": 0.6658195853233337,
"avg_line_length": 38.29999923706055,
"blob_id": "e382795248e0d714b68a81df05703e38814eddd4",
"content_id": "9e778e5a4e6e9774a82633e09d54a5c5d9396b36",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 787,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 20,
"path": "/gitlabclient.py",
"repo_name": "Tim-Barton/gogs-migration",
"src_encoding": "UTF-8",
"text": "\nfrom curl import CurlClient\nimport json\n\n\nclass GitlabClient:\n\n def __init__(self, args):\n self.url = args.gitlaburl\n self.pat = args.gitlabpat\n self.curlClient = CurlClient()\n self.curlClient.addStaticHeader(\"Private-Token: {}\".format(self.pat))\n\n def getAllGroupProjects(self):\n status, namespaceJson = self.curlClient.Get(self.url + \"/api/v3\" + \"/namespaces\")\n namespaces = json.loads(namespaceJson)\n groupNamespaces = [ namespace[\"id\"] for namespace in namespaces if namespace[\"kind\"] == \"group\"]\n\n status, projectsJson = self.curlClient.Get(self.url + \"/api/v3\" + \"/projects\")\n projects = json.loads(projectsJson)\n return [project for project in projects if project[\"namespace\"][\"id\"] in groupNamespaces]\n"
}
] | 5 |
ChenyunZhang/mini-project
|
https://github.com/ChenyunZhang/mini-project
|
492e1376366d0a5e5f284c514e30c6c68e137f4c
|
b044a345d4467d25855db3fa89376365b60440f3
|
99df497451546895efd864981109609b9cbce3cc
|
refs/heads/main
| 2023-04-07T02:11:31.613250 | 2021-04-19T04:13:03 | 2021-04-19T04:13:03 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5062560439109802,
"alphanum_fraction": 0.5110683441162109,
"avg_line_length": 20.66666603088379,
"blob_id": "65c75dd92d45eb0f38b3d63c26dcdb83a75f1bda",
"content_id": "206be18f92d7d82b5b5564d49bcc0a2b3ef992e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1039,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 48,
"path": "/09_data_structure_js/stack.js",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "const isPalindrone = (word) => {\n let letters = [], rword = \"\";\n for (let i in word) letters.push(word[i])\n for (let i in word) rword += letters.pop()\n if (rword === word) {\n return `${word} is palindrome.`\n } else {\n return `${word} is not palindrome.`\n }\n\n}\n// const word = \"freecodecamp\";\n// console.log(isPalindrone(word))\n\n/////////////////////////////////////////////////////////////////////\n\nclass Stack {\n constructor() {\n this.count = 0;\n this.storage = {};\n }\n\n push(value) {\n this.strorage[this.count] = value;\n this.count++\n }\n\n pop() {\n if (this.count === 0) return undefined\n this.count--\n const res = this.strorage[this.count]\n delete this.strorage[this.count]\n return res\n }\n size() {\n return this.count\n }\n\n peek() {\n return this.storage[this.count - 1]\n }\n}\n\nconst myStack = new Stack();\nconsole.log(myStack.storage)\nmyStack.push(1)\n// myStack.push(2)\n// console.log(myStack.peek())"
},
{
"alpha_fraction": 0.7966101765632629,
"alphanum_fraction": 0.8135592937469482,
"avg_line_length": 59,
"blob_id": "73f07362699a5702624321729315d46a91f9d679",
"content_id": "55dce5acd34f3198502b12860e9a8cf3dd78bc08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 59,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 1,
"path": "/04_quiz_app/Readme.md",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "Step1 run npx create-react-app my-app --template typescript"
},
{
"alpha_fraction": 0.6958763003349304,
"alphanum_fraction": 0.6958763003349304,
"avg_line_length": 28.615385055541992,
"blob_id": "7a3a5f93b9477713bd78498891032d5e46adbc6a",
"content_id": "1b1ddb3120251b16d676391c529b9d4875cd39aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 13,
"path": "/11_react_to_do_app/to-do/README.md",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "How I create the app\n- npx create-react-app <a href=\"https://create-react-app.dev/\" target=\"_blank\">to-do</a>\n- npm install react-icons --save\n\nFor src\n - I only need App.css, App.js, and index.js, delete the others.\n\n Create Component Folder (inside of src)\n - rfce shortcut for functional component\n - nfn arrow function\n - clg console.log\n - fin for in loop\n - fof for of loop\n \n"
},
{
"alpha_fraction": 0.5634058117866516,
"alphanum_fraction": 0.60326087474823,
"avg_line_length": 19.462963104248047,
"blob_id": "7aeec3fd74ca04ff79d22864e7ee0fa751660eca",
"content_id": "93c2bed0039f62a5d37ac7a69083e307d9ef0912",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1104,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 54,
"path": "/emmet.html",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "div.container\n<div class=\"container\"></div>\n\ndiv.imaclass.imaclass\n<div class=\"imaclass imaclass\"></div>\n\ndiv#myid\n<div id=\"myid\"></div>\n\ndiv.container#myid\n<div class=\"container\" id=\"myid\"></div>\n\na[_target=\"blank\"]\n<a href=\"\" _target=\"blank\"></a>\n\nh1+p\n<h1></h1>\n<p></p>\n\nh1#imaid+p\n<h1 id=\"imaid\"></h1>\n<p></p>\n\nh1>h2>h3\n<h1>\n <h2>\n <h3></h3>\n </h2>\n</h1>\n\nmain>(section>h1{my heading})+div\n<main>\n <section>\n <h1>my heading</h1>\n </section>\n <div></div>\n</main>\n\nul>li#li$.broseph${my list item $}*8\n<ul>\n <li id=\"li1\" class=\"broseph1\">my list item 1</li>\n <li id=\"li2\" class=\"broseph2\">my list item 2</li>\n <li id=\"li3\" class=\"broseph3\">my list item 3</li>\n <li id=\"li4\" class=\"broseph4\">my list item 4</li>\n <li id=\"li5\" class=\"broseph5\">my list item 5</li>\n <li id=\"li6\" class=\"broseph6\">my list item 6</li>\n <li id=\"li7\" class=\"broseph7\">my list item 7</li>\n <li id=\"li8\" class=\"broseph8\">my list item 8</li>\n</ul>\n<div>\n <a href=\"https://marketplace.visualstudio.com/items?itemName=xabikos.JavaScriptSnippets\">\n ES6 shortcut\u001d</a>\n</div>"
},
{
"alpha_fraction": 0.524316132068634,
"alphanum_fraction": 0.5379939079284668,
"avg_line_length": 18.352941513061523,
"blob_id": "36cef99a1690973852678d4a14e5dae948cc8629",
"content_id": "941dffa4eb1a5192e294067dba56e14a314691ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 658,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 34,
"path": "/06_Type_script/fundamentals.js",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "// strong typing\n// Object-oriented features\n// compile-time errors\n// great tooling\n// TypeScript ===> JS\n// let log = (message) =>{\n// console.log(message);\n// }\n// log(\"message\");\n// let doSomething = ()=>{\n// let i = 1\n// while(i<=5){\n// console.log(i)\n// i++\n// }\n// }\n// doSomething()\nvar a;\nvar b;\nvar c;\nvar d;\nvar e;\nvar f = [1, true, \"a\", false];\nvar ColorRed = 0;\nvar ColorGreen = 1;\nvar ColorBlue = 2;\nvar Color;\n(function (Color) {\n Color[Color[\"Red\"] = 0] = \"Red\";\n Color[Color[\"Green\"] = 1] = \"Green\";\n Color[Color[\"Blue\"] = 2] = \"Blue\";\n})(Color || (Color = {}));\n;\nvar backgroundColoe = Color.Green;\n"
},
{
"alpha_fraction": 0.6234384179115295,
"alphanum_fraction": 0.6258179545402527,
"avg_line_length": 34.02083206176758,
"blob_id": "10c2bdb5f880135fcce576ebc92a79d8165115c2",
"content_id": "2ada6b71dd1aee0ed114539916107d426940d8b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1681,
"license_type": "no_license",
"max_line_length": 304,
"num_lines": 48,
"path": "/10_pwa/app.js",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "const key = \"JC0T7bCpESCb2SGJBe6axdrSFW0X03H7\"\nconst main = document.querySelector(\"main\")\nconst section = [\"arts\", \"automobiles\", \"books\", \"business\", \"fashion\", \"food\", \"health\", \"home\", \"insider\", \"magazine\", \"movies\", \"nyregion\", \"obituaries\", \"opinion\", \"politics\", \"realestate\", \"science\", \"sports\", \"sundayreview\", \"technology\", \"theater\", \"t-magazine\", \"travel\", \"upshot\", \"us\", \"world\"]\nconst sectionSelector = document.querySelector(\"#sectionSelector\")\nconst defaultSection = \"home\"\n\nwindow.addEventListener(\"load\", e => {\n updateNews()\n updateSections()\n sectionSelector.value = defaultSection\n sectionSelector.addEventListener(\"change\", e => {\n updateNews(e.target.value)\n })\n // service worker here\n if (\"serviceWorker\" in navigator) {\n try {\n navigator.serviceWorker.register(\"sw.js\");\n console.log(\"SW register Successfully\")\n } catch (error) {\n console.log(\"SW register failed\");\n }\n }\n})\n\nwindow.addEventListener('online', () => updateNews(sectionSelector.value)); \n\nasync function updateNews(section = defaultSection) {\n const res = await fetch(`https://api.nytimes.com/svc/topstories/v2/${section}.json?api-key=${key}`)\n const json = await res.json()\n main.innerHTML = json.results.map(creatArticle).join('\\n')\n}\n\nasync function updateSections() {\n sectionSelector.innerHTML = section.map(sec => `<option value=${sec}>${sec}</option>`)\n}\n\n\nfunction creatArticle(article) {\n return `\n <div class=\"article\">\n <a href=\"${article.url}\">\n <h2></h2>\n <img src=\"${article.multimedia[0].url}\">\n <p>${article.abstract}</p>\n </a>\n </div>\n `\n}\n"
},
{
"alpha_fraction": 0.5680851340293884,
"alphanum_fraction": 0.5787234306335449,
"avg_line_length": 16.44444465637207,
"blob_id": "7bdb29619871badf237e18355ae0dc1ff268d659",
"content_id": "f2d65986875de63965f7069d3e82999e8155c770",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 27,
"path": "/09_data_structure_js/queue.js",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "class Queue {\n constructor(){\n this.collection = []\n }\n enque(val){\n this.collection.push(val)\n }\n deque(){\n return this.collection.shift()\n }\n front(){\n return this.collection[0]\n }\n size(){\n return this.collection.length\n }\n isEmpty(){\n return this.collection.length===0\n }\n}\n\nconst myQueue = new Queue\nmyQueue.enque(1)\nmyQueue.enque(2)\nmyQueue.enque(3)\nmyQueue.deque()\nconsole.log(myQueue)"
},
{
"alpha_fraction": 0.7424733638763428,
"alphanum_fraction": 0.7524316906929016,
"avg_line_length": 67.52381134033203,
"blob_id": "570d79859c0a811e793744229a2e7bb5062f87e2",
"content_id": "f73cb237fcd969f320b6bad26d055ca7b4daac13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4352,
"license_type": "no_license",
"max_line_length": 543,
"num_lines": 63,
"path": "/0_css_basics/flexbox/Note.md",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "\nFor each of the directions below, add these declarations to the existing ul or li elements.\n\nThe first grouping has to do with the flex-container, or the ul in this example.\n\n1. ul { display: flex; }\nThis gets everything on a single line. By default, the direction is in a row and in standard order.\n\n2. ul {display: flex; flex-direction: ***; }\n*** can equal\nrow,\nrow-reverse,\ncolumn,\ncolumn-reverse\n\nThis takes the elements and places them in a single row or a single column. Ordering is either in source order or the reverse of the source order. Flex-direction defines our main axis.\n\n3. ul { display: flex; flex-direction: as before, flex-wrap: ***;}\n*** can equal wrap, wrap-reverse, or nowrap\nflex-direction orders the individual items.\nflex-wrap orders the rows/columns created\n\n4. ul { display: flex; flex-flow: ***;}\nflex-flow is shorthand for flex-direction and flex-wrap\nIt takes two arguments, just like the individual properties.\nExample: row wrap, row-reverse wrap, column nowrap, column-reverse wrap-reverse, etc\nJust because the row/column is reversed does not mean the wrap has to be reversed\n\n5. ul { display: flex; flex-flow: row wrap; justify-content: ***; }\n*** can equal flex-start, flex-end, center, space-between, space-around\nJustify content determines the distribution of the flex-items within the flex-container on the main axis — in other words, how should space be allocated relative to the width of each item?\nIf flex-direction is row, then horizontal is the main axis. When flex-direction is column, then column is the main axis.\n\n6. ul { display: flex; flex-flow: row wrap; justify-content: center; height: 400px;}\nThis artificially gives our row some height. In the next step, we’ll try some cross axis alignment. Since we’re working with a row, the cross axis is the vertical axis.\n\n7. ul { display: flex; flex-flow: row wrap; justify-content: center; height: 400px; align-items: ***}\n*** can equal flex-start, flex-end, center, baseline, stretch\nThis aligns our items on the cross axis. Since we’re working with a row currently, this is aligning elements in vertical space. \n\n\n\nThe next set of properties are about the individual flex-items, or the li’s in this example.\n\n8. .flex2{ border: 2px dotted blue; order: ***; }\n*** can be an integer.\n1 will move the .flex2 boxes to the end of the list, while -1 will move them to the start of the list. 0 is neutral. The border styling is to help us differentiate between the li’s with the .flex2 class and those that don’t have it.\n\n9. .flex2{ border: 2px dotted blue; flex-basis: ***; }\nflex-basis is analogous to width, but not quite the same thing. Width is an absolute measurement — an element is that wide, all the time. We can measure width in relative units (say 25% instead of 250px), but in the end, the measurement itself never changes. For flex-basis, we try to achieve a given width with the space available. It could be smaller than this width, or it could be wider, depending on the extra space and how that’s supposed to be distributed. Distribution of extra space is controlled by flex-grow and flex-shrink (below).\n\n10. .flex2{ border: 2px dotted blue; flex-grow: ***; }\n*** can be 0 or a positive integer. (It won’t break with a negative integer, but it won’t do anything either.)\nFlex-grow, like flex-shrink (below), distributes extra space once each element is displayed on the page. In this example, our flex-items are center-aligned (see justify-content: center on the ul). By assigning a value to flex-grow, any extra space will be assigned in greater proportion to this element, making it larger relative to the other items. Note there is no unit with this measurement — it’s simply a proportion.\n\n11. .flex2{ border: 2px dotted blue; flex-shrink: ***; }\n*** can be 0 or a positive integer. (It won’t break with a negative integer, but it won’t do anything either.)\nFlex-shrink controls what happens to extra space as elements shrink. By assigning a value to flex-shrink, as elements get smaller on the page, this element will get smaller faster than the others. Note there is no unit with this measurement — it’s simply a proportion.\n\n12. .flex2{ border: 2px dotted blue; flex: G S B; }\nG = flex-grow\nS = flex-shrink\nB = flex-basis\nThis is the shorthand for combining flex-grow, flex-shrink, and flex-basis. "
},
{
"alpha_fraction": 0.6020151376724243,
"alphanum_fraction": 0.6347606778144836,
"avg_line_length": 34.90909194946289,
"blob_id": "eb710d95927f55618e0e03b2a6b49e152659f560",
"content_id": "ac060b7934839051fae20d35c1785e52413896f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 11,
"path": "/01_digital_clock/pomodoro.js",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "function showTime() {\n let date = new Date()\n let h = date.getHours()<10? `0${date.getHours()}`: date.getHours()\n let m = date.getMinutes()<10? `0${date.getMinutes()}`: date.getMinutes()\n let s = date.getSeconds()<10? `0${date.getSeconds()}`: date.getSeconds()\n\n const time = h +\":\"+m+\":\"+s;\n document.getElementById(\"time\").innerText= time\n setTimeout(showTime,1000)\n}\nshowTime()\n\n\n"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.7971014380455017,
"avg_line_length": 13,
"blob_id": "e0c6983ff9c18ec799be480f5327e75544b17f78",
"content_id": "fe9fe40627938ded802227d59701b38ba40e7e75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 5,
"path": "/0_css_basics/note.md",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "# Psudoselectors\nExample: h1.hover\nfirst-child\nnth-child()\nlast-child"
},
{
"alpha_fraction": 0.6140350699424744,
"alphanum_fraction": 0.6725146174430847,
"avg_line_length": 20.125,
"blob_id": "f62824985a2ac63d0861f33ce48c2fd4ce8e1d0b",
"content_id": "b4ba98026f913d0e3d82153bbd8aa9475e31a38d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 171,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 8,
"path": "/README.md",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "# 1.Web Clock\n\n\n# 2.Counter\n\n\n# 11.Todo App\n\n\n\n"
},
{
"alpha_fraction": 0.5414692163467407,
"alphanum_fraction": 0.5426540374755859,
"avg_line_length": 27.133333206176758,
"blob_id": "591ab24cce29b37e6af380d5f257b2d958afa72e",
"content_id": "74d5f0ba1f104dd4b405c38d861e0d6d578d9e49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 844,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 30,
"path": "/08_coursera_react_redux_course/confusion/src/App.js",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "import { Navbar, NavbarBrand, NavbarToggler, Collapse, NavItem, NavLink, Nav } from \"reactstrap\";\nimport React, { useState } from \"react\"\n\n\nfunction App() {\n const [collapsed, setCollapsed] = useState(true);\n const toggleNavbar = () => setCollapsed(!collapsed);\n return (\n <>\n <Navbar dark color=\"info\">\n <div className=\"container\">\n <NavbarBrand>CHENYUN ZHANG</NavbarBrand>\n <NavbarToggler onClick={toggleNavbar} className=\"mr-2\" />\n <Collapse isOpen={!collapsed} navbar>\n <Nav navbar>\n <NavItem>\n <NavLink href=\"\">Resume</NavLink>\n </NavItem>\n <NavItem>\n <NavLink href=\"\">Projects</NavLink>\n </NavItem>\n </Nav>\n </Collapse>\n </div>\n </Navbar>\n </>\n );\n}\n\nexport default App;\n"
},
{
"alpha_fraction": 0.5559440851211548,
"alphanum_fraction": 0.5716783404350281,
"avg_line_length": 15.764705657958984,
"blob_id": "f67772c0af3c1f3e24aedbadd707a7f5589c7550",
"content_id": "999332527a35035c4fbe03c1bbfa76e7dce7960c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 34,
"path": "/06_Type_script/fundamentals.ts",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "// strong typing\n// Object-oriented features\n// compile-time errors\n// great tooling\n\n// TypeScript ===> JS\n\n// let log = (message) =>{\n// console.log(message);\n// }\n// log(\"message\");\n\n// let doSomething = ()=>{\n// let i = 1\n// while(i<=5){\n// console.log(i)\n// i++\n// }\n// }\n// doSomething()\n\nlet a: number;\nlet b: boolean;\nlet c: string;\nlet d: any;\nlet e: number[] \nlet f: any[] = [1,true,\"a\",false]\n\nconst ColorRed = 0;\nconst ColorGreen = 1;\nconst ColorBlue = 2;\n\nenum Color {Red=0,Green=1,Blue=2};\nlet backgroundColoe = Color.Green\n\n\n"
},
{
"alpha_fraction": 0.5446428656578064,
"alphanum_fraction": 0.7321428656578064,
"avg_line_length": 27.25,
"blob_id": "f4ee71c021f591de68c284e9c1143857ef6f0923",
"content_id": "f756878d664c3a6d476375ab480beaca5571b549",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 4,
"path": "/run.py",
"repo_name": "ChenyunZhang/mini-project",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nplt.plot([40,48,61,75,100,99,98,39,30,10])\nplt.ylabel('some numbers')\nplt.show()"
}
] | 14 |
datho7561/Fourseal
|
https://github.com/datho7561/Fourseal
|
2985d05d655821d80d894c817cff4bfda42b28a1
|
2008517c72bfe9fe2cd678859ccd25314967eff2
|
6adf5dff7d0ca45f55cb7cb515486ca4778245e9
|
refs/heads/master
| 2020-04-02T16:21:23.182336 | 2019-01-21T00:55:41 | 2019-01-21T00:55:41 | 154,608,890 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47284218668937683,
"alphanum_fraction": 0.4852854013442993,
"avg_line_length": 34.40559387207031,
"blob_id": "d009e6637e39a7d78f7b5c136f15b80c345466db",
"content_id": "f4f40dd5211e2a18153427ed9b36483f37b48978",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5067,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 143,
"path": "/map.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 11 April, 2017\n\nfrom pygame import Surface\nfrom sprite import Sprite\nfrom constants import *\n\nimport math, random\n\ndef pixelSpread(num):\n \"\"\" A quartic function to control where the pixels are placed.\n Places most of the pixels near the edge. \"\"\"\n return num**4\n\nclass Map:\n\n \"\"\" Holds the data for an entire game map, and allows the data to be read and accessed easily \"\"\"\n\n global SPREAD_MIN, SPREAD_MAX\n\n SPREAD_MIN, SPREAD_MAX = 80, 150\n\n def __init__(self, data):\n \"\"\" Sets up the map given the data from the map file \"\"\"\n\n # Create variables to store the map information\n self.background = []\n self.foreground = []\n self.decoration = []\n self.objects = []\n\n # Split the data by lines\n\n cleanedData = data.split('\\n')\n\n # Process data into memory as ints\n # TODO: read more than the first 9 lines\n for i in range(2*BOXES_HIGH):\n\n # Clean up each line\n line = cleanedData[i].split(\",\")\n cleanedLine = []\n\n for element in line:\n cleanedLine.append(int(element.strip()))\n\n # TODO: read the other information as well\n if (i//BOXES_HIGH == 0):\n self.background.append(cleanedLine)\n elif (i//BOXES_HIGH == 1):\n self.foreground.append(cleanedLine)\n elif (i//BOXES_HIGH == 2):\n self.decoration.append(cleanedLine)\n else:\n self.objects.append(cleanedLine)\n\n\n\n def getBg(self, images):\n \"\"\" Returns the background, stitched together and 'nicely' blended \"\"\"\n\n bgImage = Surface(SIZE)\n\n for y in range(BOXES_HIGH):\n for x in range(BOXES_WIDE):\n\n # Find the texture for this Sprite\n textureNum = self.background[y][x]\n texture = images[textureNum].copy()\n\n # Edit the pixels based on the textures around\n\n # In each of the four cardinal directions\n for q in ((-1,0), (1,0), (0,-1), (0, 1)):\n\n # Makes sure the spot that s being checked actually exists\n if (y+q[0] >= 0 and x+q[1] >= 0 and\n y+q[0] < BOXES_HIGH and x+q[1] < BOXES_WIDE):\n\n blendingTextureNum = self.background[y + q[0]][x + q[1]]\n\n if blendingTextureNum <= 0 or blendingTextureNum > 9:\n print(blendingTextureNum)\n\n # If the textures are different\n if (textureNum != blendingTextureNum):\n\n numSpread = random.randrange(SPREAD_MIN, SPREAD_MAX)\n\n for i in range(numSpread):\n\n # Generate a number that is usually just less than 1\n gamma = pixelSpread(random.random())\n\n # Calculate the point\n if (q==(-1, 0)):\n x_comp = int(random.randrange(0, BOX_SIZE))\n y_comp = int(gamma*BOX_SIZE / 4)\n elif (q==(0, -1)):\n x_comp = int(gamma*BOX_SIZE / 4)\n y_comp = int(random.randrange(0, BOX_SIZE))\n elif (q==(1, 0)):\n x_comp = int(random.randrange(0, BOX_SIZE))\n y_comp = BOX_SIZE - 1 - int(gamma*BOX_SIZE / 4)\n else:\n x_comp = BOX_SIZE - 1 - int(gamma*BOX_SIZE / 4)\n y_comp = int(random.randrange(0, BOX_SIZE))\n\n # “We don't make mistakes,\n # just happy little accidents.” - Bob Ross\n texture.set_at((x_comp, y_comp), (30, 30, 30))\n\n blockSprite = Sprite(BOX_SIZE * x, BOX_SIZE * (BOXES_HIGH-y-1), texture)\n\n blockSprite.draw(bgImage)\n\n return bgImage\n\n\n def getFg(self, images):\n \"\"\" Returns a texture that represents the foreground, as well as a list\n of sprites that represent the objects that can be collided with. \"\"\"\n\n # Holds all the sprites that will be used for collision with the player\n fgSprites = []\n\n for y in range(BOXES_HIGH):\n for x in range(BOXES_WIDE):\n\n textureNum = self.foreground[y][x]\n\n if (textureNum != 0):\n\n texture = images[textureNum].copy()\n\n # If this block isn't water, randomly shift it\n toBeShifted = textureNum != 15\n\n collisionSprite = Sprite(BOX_SIZE * x, BOX_SIZE * (BOXES_HIGH-y-1),\n texture, toBeShifted)\n fgSprites.append(collisionSprite)\n\n return fgSprites\n"
},
{
"alpha_fraction": 0.5299080014228821,
"alphanum_fraction": 0.5475460290908813,
"avg_line_length": 23.60377311706543,
"blob_id": "6a5ecb5bbee7599af0833000447a33c449a8be18",
"content_id": "702b2168d12d9e1134d1d3487875b457e7c74da4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1304,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 53,
"path": "/MapGenerator.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "import random\nfrom constants import *\n\nfile = open(\"NewRandomMap\", 'w')\n\ntext = \"\"\n\n# FULL RANDOM BACKGROUND\n#for row in range (BOXES_HIGH):\n# for col in range(BOXES_WIDE):\n# text += str(random.randrange(1, 9))\n# if col!=BOXES_WIDE-1:\n# text+=\", \"\n# text += '\\n'\n\n# ONE TILE FOREGROUND (1 represents grass)\nfor row in range(BOXES_HIGH):\n for col in range(BOXES_WIDE):\n text += str(1) # Change number for different \n if col!=BOXES_WIDE-1:\n text+=\", \"\n text += '\\n'\n\n# FULL RANDOM FOREGROUND\n#for row in range (BOXES_HIGH):\n# for col in range(BOXES_WIDE):\n# text += str(random.randrange(9, 16) * random.randrange(0, 2))\n# if col!=BOXES_WIDE-1:\n# text+=\", \"\n# text+='\\n'\n\n# RANDOM OUTLINE FOREGROUND\n#for row in range (BOXES_HIGH):\n# for col in range(BOXES_WIDE):\n# if row == 0 or row == BOXES_HIGH-1 or col == 0 or col == BOXES_WIDE-1:\n# text += str(random.randrange(9, 16))\n# else:\n# text += str(0)\n# if col!=BOXES_WIDE-1:\n# text+=\", \"\n# text+='\\n'\n\n# BLANK FOREGROUND\nfor row in range (BOXES_HIGH):\n for col in range(BOXES_WIDE):\n text += str(0)\n if col!=BOXES_WIDE-1:\n text+=\", \"\n text+='\\n'\n\nfile.write(text)\n\nfile.close()\n"
},
{
"alpha_fraction": 0.4515151381492615,
"alphanum_fraction": 0.4848484992980957,
"avg_line_length": 25.399999618530273,
"blob_id": "84289091586f825c479ae7a8146c22060bfbaa9c",
"content_id": "9cd18cda9f045717ab2e047b545ba40605e6052b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 25,
"path": "/threemason.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 18 June, 2018\n\nfrom player import Player\nfrom constants import *\n\nclass Threemason(Player):\n\n def __init__(self, images, xpos, ypos):\n\n super().__init__(images, xpos, ypos,\n maxHealth = 100, # Values for character contants\n resistance = 15,\n damage = 50,\n range = 40,\n speed = 2.5,\n attackSpeed = 30,\n specialCooldown = 150)\n\n # OVERRIDE\n def special(self, direction, obstacles, entities):\n\n # TODO: implement special\n\n return direction\n"
},
{
"alpha_fraction": 0.5810810923576355,
"alphanum_fraction": 0.5968468189239502,
"avg_line_length": 24.823530197143555,
"blob_id": "adb1da600864e5c4bbdd7649fa1f2468e65a43cb",
"content_id": "2a58533e524edb0c0c794141c3af6be698161e01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/totem.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 24 September, 2018\n\nfrom entity import Entity\n\nclass Totem(Entity):\n\n def __init__(self, images, xpos, ypos, health):\n \"\"\" Make a totem with given location and health \"\"\"\n\n # Speed is zero so that nothing can move this totem\n super().__init__(images, xpos, ypos, speed=0)\n\n # OVERRIDE\n def attack(self):\n \"\"\" The totem should be unable to attack \"\"\"\n return False\n\n "
},
{
"alpha_fraction": 0.5332884788513184,
"alphanum_fraction": 0.5521183609962463,
"avg_line_length": 35.26829147338867,
"blob_id": "80335cbc2cbe918b9220d775ef47db7721de558b",
"content_id": "46c9afafb279c43d7d32b7db0f9257de7b19ef50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1487,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 41,
"path": "/foursealer.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 18 June, 2018\n\nfrom player import Player\nfrom constants import *\n\nclass Foursealer(Player):\n\n def __init__(self, images, xpos, ypos):\n\n super().__init__(images, xpos, ypos,\n maxHealth = 100, # Values for character contants\n resistance = 25,\n damage = 50,\n range = 60,\n speed = 2,\n attackSpeed = 45,\n specialCooldown = 150)\n\n # OVERRIDE\n def special(self, direction, obstacles, entities):\n\n # If within the first 10 frames of the burst\n if self.specialTimer > self.specialCooldown - 10:\n # Move in the direction that the player was facing\n # when the special was initiated. Move them 3 times what they\n # normally would move. Note that this prevents clipping\n # through walls because the player is moving in short steps\n for i in range(5):\n self.move(self.direction, obstacles)\n \n\n # Attack during the dash at four times the speed\n if self.attack(entities, obstacles):\n self.attackTimer = self.attackSpeed // 4\n \n # If the player is still moving forward, prevent them from changing directions\n return None\n\n # If the burst of movement is done, allow the player to move as normal\n return direction\n"
},
{
"alpha_fraction": 0.7118353247642517,
"alphanum_fraction": 0.7409948706626892,
"avg_line_length": 25.545454025268555,
"blob_id": "00321172da8c96a08355d62d9bee2517aa6251b1",
"content_id": "869290b495a67f26c33007e2a15b6f0569d2eec9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 583,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 22,
"path": "/constants.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 13 April, 2018\n\n# This module holds a lot of important game variables that must be accessed by multiple classes\n\nglobal BOX_SIZE, BOXES_WIDE, BOXES_HIGH, WIDTH, HEIGHT, SIZE, ENEMY_TIME, RECOIL\n\n# The width/height of a standard block\nBOX_SIZE = 32\n\n# Number of boxes horizontally and veritcally\nBOXES_WIDE, BOXES_HIGH = 32, 18\n\n# The size (width and height) of the window in pixels\nSIZE = WIDTH, HEIGHT = BOX_SIZE*BOXES_WIDE, BOX_SIZE*BOXES_HIGH\n\n# How fast the enemies spawn\nENEMY_TIME = 120\n\n# How much entities get knocked back\nRECOIL = 8\nRECOIL_SPEED = 8"
},
{
"alpha_fraction": 0.5822151303291321,
"alphanum_fraction": 0.5932305455207825,
"avg_line_length": 33.91608428955078,
"blob_id": "bb16ce1053df835961734a3083e697871d0d4f67",
"content_id": "953f79fb15bd02d10c4828b92c24c58833643224",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4993,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 143,
"path": "/pawn.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 3 October, 2018\n\nfrom constants import *\nfrom entity import Entity\nfrom sprite import Sprite\nfrom direction import Direction\nfrom direction import perpendicular, opposite, dirsAsArray\nfrom totem import Totem\nfrom foe import Foe\nimport random\n\n\nclass Pawn(Foe):\n\n def __init__(self, images, xpos = 0, ypos = 0, maxHealth = 80,\n resistance = 0, damage = 25, range = 5, speed = .5,\n attackSpeed = 45, direction = None):\n # Want same properties except make the default really slow\n super().__init__(images, xpos, ypos, maxHealth,\n resistance, damage, range, speed,\n attackSpeed, direction)\n\n # OVERRIDE\n def update(self, direction, obstacles, entities):\n \"\"\" Move this enemy according to its AI \"\"\"\n\n # Note that the passed movement direction is completely ignored.\n # Movement and actions are controlled by the AI\n direction = None\n\n # Find all the the directions that yield valid motion\n possibleDirections = []\n\n for d in dirsAsArray():\n if (self.motionIsValid(d, obstacles)):\n possibleDirections.append(d)\n\n # Find the totem in the list of entities\n totem = None\n\n for e in entities:\n if (isinstance(e, Totem)):\n totem = e\n\n # The goal is to get to the totem and attack it\n # This pawn has poor path finding. It just goes in which ever direction\n # gets it closest to the totem, which can get it stuck\n\n \n if (totem == None):\n\n # Totem is dead. GG. No need to move\n direction = None\n\n elif (self.distance(totem) < self.range):\n\n # If they can successfully attack the totem, do so\n self.attack(entities, obstacles)\n\n elif (possibleDirections != None):\n\n # If this entity can make a valid motion\n\n # Need to get to the totem\n\n # For each direction, make a dummy sprite and check if this\n # direction gets the pawn closer. Find the best direction\n # to go to get to the totem\n\n direction = possibleDirections[0]\n shortDist = 400000000000 # Really big number\n\n for dir in possibleDirections:\n xChange, yChange = self.getChangeFromDir(dir)\n dummySprite = Sprite(self.x + xChange, self.y + yChange)\n if dummySprite.distance(totem) < shortDist:\n shortDist = dummySprite.distance(totem)\n direction = dir\n \n super().update(direction, obstacles, entities)\n\n\n def motionIsValid(self, direction, obstacles):\n \"\"\" Checks to see if moving in this direction results in a change in\n position \"\"\"\n\n # If there is no direction, moving doesn't change location\n if direction == None:\n return False\n\n # Get the maximum change in location for the movement direction\n xChange, yChange = self.getChangeFromDir(direction)\n\n # Make a dummy sprite to check the collisions\n possible = Sprite(self.x, self.y)\n\n # Apply vertical movement. If this means it is now colliding,\n # snap to grid vertically.\n possible.y += yChange\n if possible.isColliding(obstacles):\n possible.y = int(possible.y/BOX_SIZE)*BOX_SIZE + round(possible.y/BOX_SIZE - int(possible.y/BOX_SIZE))*BOX_SIZE\n\n # Same except horizontally\n possible.x += xChange\n if possible.isColliding(obstacles):\n possible.x = int(possible.x/BOX_SIZE)*BOX_SIZE + round(possible.x/BOX_SIZE - int(possible.x/BOX_SIZE))*BOX_SIZE\n\n # If the motion would put the enemy in roughly the same place, this\n # doesn't count as a valid motion. Otherwise, it is\n if (int(possible.x) == int(self.x)\n and int(possible.y) == int(self.y)):\n return False\n return True\n \n def getChangeFromDir(self, direction):\n\n # Used to store the maximum movement in each direction\n yChange = 0\n xChange = 0\n\n if direction == Direction.UP:\n yChange = self.speed\n elif direction == Direction.UP_RIGHT:\n yChange = self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.RIGHT:\n xChange = self.speed\n elif direction == Direction.DOWN_RIGHT:\n yChange = -self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.DOWN:\n yChange = -self.speed\n elif direction == Direction.DOWN_LEFT:\n yChange = -self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n elif direction == Direction.LEFT:\n xChange = -self.speed\n elif direction == Direction.UP_LEFT:\n yChange = self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n \n return (xChange, yChange)\n"
},
{
"alpha_fraction": 0.5794681310653687,
"alphanum_fraction": 0.5893630385398865,
"avg_line_length": 34.53845977783203,
"blob_id": "bfddc0d70bc0bbd168d8018f5dd47e3697860611",
"content_id": "2428692bedfd678603ee897639693740ddb06af2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3234,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 91,
"path": "/enemy.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 18 June, 2018\n\nfrom constants import *\nfrom entity import Entity\nfrom sprite import Sprite\nfrom direction import Direction\nfrom direction import perpendicular, opposite\nimport random\n\nclass Enemy(Entity):\n\n # OVERRIDE\n def update(self, direction, obstacles, entities):\n \"\"\" Move this enemy according to its AI \"\"\"\n\n # Note that the passed movement direction is completely ignored\n direction = None\n\n if self.motionIsValid(self.direction, obstacles):\n direction = self.direction\n else:\n try:\n # Pick a direction that will not result in running\n directionsToGo = list(Direction)\n random.shuffle(directionsToGo)\n\n for dir in directionsToGo:\n if self.motionIsValid(dir, obstacles):\n direction = dir\n except:\n # If the enemy has yet to move, give it\n # a random initial motion\n direction = random.choice(list(Direction))\n\n super().update(direction, obstacles, entities)\n\n\n def motionIsValid(self, direction, obstacles):\n \"\"\" Checks to see if moving in this direction results in a change in\n position \"\"\"\n\n # If there is no direction, moving doesn't change location\n if direction == None:\n return False\n\n # Used to store the maximum movement in each direction\n yChange = 0\n xChange = 0\n\n if direction == Direction.UP:\n yChange = self.speed\n elif direction == Direction.UP_RIGHT:\n yChange = self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.RIGHT:\n xChange = self.speed\n elif direction == Direction.DOWN_RIGHT:\n yChange = -self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.DOWN:\n yChange = -self.speed\n elif direction == Direction.DOWN_LEFT:\n yChange = -self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n elif direction == Direction.LEFT:\n xChange = -self.speed\n elif direction == Direction.UP_LEFT:\n yChange = self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n\n # Make a dummy sprite to check the collisions\n possible = Sprite(self.x, self.y)\n\n # Apply vertical movement. If this means it is now colliding,\n # snap to grid vertically.\n possible.y += yChange\n if possible.isColliding(obstacles):\n possible.y = int(possible.y/BOX_SIZE)*BOX_SIZE + round(possible.y/BOX_SIZE - int(possible.y/BOX_SIZE))*BOX_SIZE\n\n # Same except horizontally\n possible.x += xChange\n if possible.isColliding(obstacles):\n possible.x = int(possible.x/BOX_SIZE)*BOX_SIZE + round(possible.x/BOX_SIZE - int(possible.x/BOX_SIZE))*BOX_SIZE\n\n # If the motion would put the enemy in roughly the same place, this\n # doesn't count as a valid motion. Otherwise, it is\n if (int(possible.x) == int(self.x)\n and int(possible.y) == int(self.y)):\n return False\n return True\n"
},
{
"alpha_fraction": 0.58708256483078,
"alphanum_fraction": 0.5982803702354431,
"avg_line_length": 34.72142791748047,
"blob_id": "980ecbec2fc5dfb90e5546b40238801b0cc27ab4",
"content_id": "71cd9b4917bdf83d864a7ba165d76bfc6e929c07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5001,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 140,
"path": "/bishop.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 3 October, 2018\n# Purpose: Like the Pawn, except it goes after the player, goes faster, and hits stronger\n\nfrom constants import *\nfrom entity import Entity\nfrom sprite import Sprite\nfrom direction import Direction\nfrom direction import perpendicular, opposite, dirsAsArray\nfrom foe import Foe\nfrom player import Player\nimport random\n\n\nclass Bishop(Foe):\n\n def __init__(self, images, xpos = 0, ypos = 0, maxHealth = 80,\n resistance = 0, damage = 30, range = 5, speed = 1.2,\n attackSpeed = 45, direction = None):\n # Want same properties except make the default really slow\n super().__init__(images, xpos, ypos, maxHealth,\n resistance, damage, range, speed,\n attackSpeed, direction)\n\n # OVERRIDE\n def update(self, direction, obstacles, entities):\n \"\"\" Move this enemy according to its AI. This is the same algorithm as the pawn,\n just targetting the player \"\"\"\n\n # Note that the passed movement direction is completely ignored.\n # Movement and actions are controlled by the AI\n direction = None\n\n # Find all the the directions that yield vlaid motion\n possibleDirections = []\n\n for d in dirsAsArray():\n if (self.motionIsValid(d, obstacles)):\n possibleDirections.append(d)\n\n # Find the player in the list of entities\n player = None\n\n for e in entities:\n if (isinstance(e, Player)):\n player = e\n\n if (player == None):\n\n # The player is not present or dead, so don't bother moving\n direction = None\n\n elif (self.distance(player) < self.range):\n\n # If they can successfully attack the player, do so\n self.attack(entities, obstacles)\n\n elif (possibleDirections != None):\n\n # If there is a direction this entity can go\n\n # Need to get to the player\n\n # For each direction, make a dummy sprite and check if this\n # direction gets the pawn closer. Find the best direction\n # to go to get to the player\n\n direction = possibleDirections[0]\n shortDist = 400000000000 # Really big number\n\n for dir in possibleDirections:\n xChange, yChange = self.getChangeFromDir(dir)\n dummySprite = Sprite(self.x + xChange, self.y + yChange)\n if dummySprite.distance(player) < shortDist:\n shortDist = dummySprite.distance(player)\n direction = dir\n \n super().update(direction, obstacles, entities)\n\n\n def motionIsValid(self, direction, obstacles):\n \"\"\" Checks to see if moving in this direction results in a change in\n position \"\"\"\n\n # If there is no direction, moving doesn't change location\n if direction == None:\n return False\n\n # Get the maximum change in location for the movement direction\n xChange, yChange = self.getChangeFromDir(direction)\n\n # Make a dummy sprite to check the collisions\n possible = Sprite(self.x, self.y)\n\n # Apply vertical movement. If this means it is now colliding,\n # snap to grid vertically.\n possible.y += yChange\n if possible.isColliding(obstacles):\n possible.y = int(possible.y/BOX_SIZE)*BOX_SIZE + round(possible.y/BOX_SIZE - int(possible.y/BOX_SIZE))*BOX_SIZE\n\n # Same except horizontally\n possible.x += xChange\n if possible.isColliding(obstacles):\n possible.x = int(possible.x/BOX_SIZE)*BOX_SIZE + round(possible.x/BOX_SIZE - int(possible.x/BOX_SIZE))*BOX_SIZE\n\n # If the motion would put the enemy in roughly the same place, this\n # doesn't count as a valid motion. Otherwise, it is\n if (int(possible.x) == int(self.x)\n and int(possible.y) == int(self.y)):\n return False\n return True\n \n def getChangeFromDir(self, direction):\n\n # Used to store the maximum movement in each direction\n yChange = 0\n xChange = 0\n\n if direction == Direction.UP:\n yChange = self.speed\n elif direction == Direction.UP_RIGHT:\n yChange = self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.RIGHT:\n xChange = self.speed\n elif direction == Direction.DOWN_RIGHT:\n yChange = -self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.DOWN:\n yChange = -self.speed\n elif direction == Direction.DOWN_LEFT:\n yChange = -self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n elif direction == Direction.LEFT:\n xChange = -self.speed\n elif direction == Direction.UP_LEFT:\n yChange = self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n \n return (xChange, yChange)\n"
},
{
"alpha_fraction": 0.5514737367630005,
"alphanum_fraction": 0.560871422290802,
"avg_line_length": 30.635135650634766,
"blob_id": "a3a51f7b27acb964ca23b38aa1698ca39b810366",
"content_id": "4470da8a422a2c5e1f254bd3e03d610db8a4b043",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2341,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 74,
"path": "/sprite.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 5 April, 2018\n\nimport random\nimport operator\n\nfrom pygame import Surface\nfrom constants import *\n\ndef sortSprites(sprites):\n \"\"\" Sorts a list of sprites so that they can be drawn from the back to the front. \"\"\"\n\n sprites.sort(key = operator.attrgetter('y'), reverse = True)\n\nclass Sprite:\n\n \"\"\" Represents an object that has a position on the screen and\n image that is associated with it \"\"\"\n\n def __init__(self, xpos = 0, ypos = 0, images = None, isShifted=False):\n \"\"\" Creates a new sprite. \"\"\"\n self.x = xpos\n self.y = ypos\n\n self.xShift = 0\n self.yShift = 0\n\n if isShifted:\n self.xShift, self.yShift = random.randrange(-3,4), random.randrange(-3,4)\n\n self.isShifted = isShifted\n\n self.imgs = images\n if images == None:\n self.imgs = []\n\n\n def draw(self, surface, textureNum=0):\n \"\"\" Draws this in place onto the given surface. Notice how this flips\n everything around so that the zero in the the y direction is\n in the bottom left of the screen. \"\"\"\n\n try:\n if self.imgs == []:\n raise IndexError\n else:\n surface.blit(self.imgs[textureNum], (int(self.x) + self.xShift,\n surface.get_size()[1] - int(self.y) - self.imgs[textureNum].get_size()[1] + self.yShift))\n\n except TypeError:\n surface.blit(self.imgs, (int(self.x) + self.xShift,\n surface.get_size()[1] - int(self.y) - self.imgs.get_size()[1] + self.yShift))\n\n\n def distance(self, other):\n \"\"\" Finds the distance between this Sprite and another one. \"\"\"\n return ((other.x-self.x)**2 + (other.y-self.y)**2)**(1/2)\n\n def isColliding(self, other):\n \"\"\" Checks if Sprites are colliding \"\"\"\n\n try:\n # If it is a list of sprites, go through them all, checking for collisions\n collision = False\n for s in other:\n if (abs(self.x - s.x) < BOX_SIZE and abs(self.y - s.y) < BOX_SIZE):\n collision = True\n break\n\n return collision\n\n except:\n # If it's just one sprite, just check it\n return (abs(self.x - other.x) < BOX_SIZE and abs(self.y - other.y) < BOX_SIZE)\n"
},
{
"alpha_fraction": 0.5207239389419556,
"alphanum_fraction": 0.5442293882369995,
"avg_line_length": 27.425390243530273,
"blob_id": "28e6ae7a26360c0e089df46574945cd5d9289751",
"content_id": "79af4deb2eed516b1541b0e9d3cb13f3ec45249c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12763,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 449,
"path": "/main.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 6 April, 2018\n# Note: Most of the code to interact with pygame such as the initialization\n# and parts of the gameloop, as well as the getResourcePath function\n# are copied from my FlapPY game: https://github.com/datho7561/FlapPY\n\n\nimport pygame, sys, os\n\nfrom pygame import Color\nfrom random import randint, random\n\nfrom sprite import Sprite, sortSprites\nfrom entity import Entity\n\nfrom player import Player\n\n# Totem\nfrom totem import Totem\n\n# Characters\nfrom foursealer import Foursealer\nfrom threemason import Threemason\nfrom dialic import Dialic\n\n# Baddies\nfrom pawn import Pawn\nfrom bishop import Bishop\nfrom enemy import Enemy\n\n# UI elements\nfrom menu import menu\nfrom damagebar import DamageBar\n\nfrom direction import Direction\n\nfrom map import Map\nfrom constants import *\n\ndef getResourcePath(name):\n \"\"\" Function to get a resource that's in the same folder as the script \"\"\"\n\n return os.path.join(os.path.realpath(__file__)[0:len(os.path.realpath(__file__))-len(os.path.basename(__file__))], name)\n\ndef readMapFile(name):\n \"\"\" Given the file name, reads the corresponding map file and loads it into memory. \"\"\"\n\n # Open the map file abd read it\n filepath = getResourcePath(os.path.join(\"maps\", name))\n file = open(filepath, 'r')\n data = file.read()\n\n return Map(data)\n\ndef loadImage(name):\n \"\"\" Loads the given image resource \"\"\"\n\n image = pygame.image.load(getResourcePath(os.path.join(\"assets\", name)))\n image.convert_alpha()\n\n return image\n\ndef borderCoords():\n \"\"\" Gives a randon coordinate pair along the borders \"\"\"\n\n # generate a random number to indicate side\n side = randint(1, 4)\n\n # intepret results\n if (side == 1):\n # TOP\n return (int(random() * WIDTH), 0)\n elif (side == 2):\n # RIGHT\n return (WIDTH, int(random() * HEIGHT))\n elif (side == 3):\n # BOTTOM\n return (int(random() * WIDTH), HEIGHT)\n else:\n # LEFT\n return (0, int(random() * HEIGHT))\n\n\n\n## INITIALIZE PYGAME ##\n\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.mixer.init()\npygame.init()\npygame.display.set_caption(\"Fourseal\")\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\n# Load images\n\ntextures = []\n\nfor i in range(16):\n textures.append(loadImage(str(i) + \".png\"))\n\nplayerSprites = []\n\ntotemSprite = loadImage(\"totem.png\")\npawnSprite = loadImage(os.path.join(\"pawn\", \"pawn_0.png\"))\nbishopSprite = loadImage(os.path.join(\"bishop\", \"bishop_0.png\"))\n\n# \"Player\" Sprites\n# TODO: replace with individual character's textures\nplayerSprite = loadImage(os.path.join(\"player\", \"player_0.png\"))\n\n# This font is used for game over\ntheFont = pygame.font.SysFont(\"Consolas\", 250)\n\n# TODO: add more players\n# Create the player(s)\n\n\n# TODO: read totem location and health from the map file\ntotem = Totem(totemSprite, WIDTH//2, HEIGHT//2, 200)\n\n# TODO: intelligent enemies that spawn periodically\nenemies = []\nenemyCooldown = ENEMY_TIME\n\n# TODO: centralize UI creation\n\n# TODO: figure out how many players/enemies/other things\n# there are and add health bars to all of them\nplayerHB = DamageBar(0, 0)\nplayerCDB = DamageBar(0, 20, 10, Color(200, 200, 255), Color(\"blue\"))\ntotemHB = DamageBar(WIDTH//2, 0)\n\n# TODO: automate this\n# : do for all players\n# : read from a JSON file in order to allow user customization\n# Initialize the keyboard key variables\n# W, D, A, S, Shift, Space\n# U, I, O, P, N, M\nP1KEYS = [False, False, False, False, False, False]\nP2KEYS = [False, False, False, False, False, False]\n\n# Load the default map with all the default textures\n\ntheMap = readMapFile(\"1.4clmap\")\n\nbackground = theMap.getBg(textures)\nfgSprites = theMap.getFg(textures)\n\n# Create the sprite list\n\nsprites = []\n\nsprites.append(totem)\nsprites += fgSprites\n\n# Create the entity list\nentities = []\n\nentities.append(totem)\n\n# Character select loop\nplayerType = menu(screen)\n\n# TODO: Finish character and game setup before starting the game\n\nplayer = None\nplayer2 = None\n\nif (playerType == \"T\"):\n player = Threemason(playerSprite, BOX_SIZE, BOX_SIZE)\nelif (playerType ==\"D\"):\n # If Dialic is selected, there are two players\n player = Foursealer(playerSprite, BOX_SIZE, BOX_SIZE)\n player2 = Foursealer(playerSprite, BOX_SIZE, BOX_SIZE)\nelif (playerType == \"F\"):\n player = Foursealer(playerSprite, BOX_SIZE, BOX_SIZE)\nelse:\n player = Foursealer(playerSprite, BOX_SIZE, BOX_SIZE)\n\nentities.append(player)\nsprites.append(player)\n\nif (not (player2 is None)):\n entities.append(player2)\n sprites.append(player2)\n \n\n\n# Main loop\n\ngameRunning = True\n\nwhile True:\n\n pygame.time.Clock().tick(75)\n\n if gameRunning:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # If the close button is pressed, exit the program\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n\n if event.key == 119: # P1 UP\n # If 'w' is pressed\n P1KEYS[0] = True\n elif event.key == 100: # P1 RIGHT\n # If 'd' is pressed\n P1KEYS[1] = True\n elif event.key == 115: # P1 DOWN\n # If 's' is pressed\n P1KEYS[2] = True\n elif event.key == 97: # P1 LEFT\n # If 'a' is pressed\n P1KEYS[3] = True\n elif event.key == 304: # P1 SPECIAL\n # If 'Shift' is pressed\n P1KEYS[4] = True\n elif event.key == 32: # P1 ATTACK\n # If 'Space' is pressed\n P1KEYS[5] = True\n elif event.key == 117: # P2 LEFT\n # If 'U' is pressed\n P2KEYS[0] = True\n elif event.key == 105: # P2 DOWN\n # If 'I' is pressed\n P2KEYS[1] = True\n elif event.key == 111: # P2 UP\n # If 'O' is pressed\n P2KEYS[2] = True\n elif event.key == 112: # P2 RIGHT\n # If 'P' is pressed\n P2KEYS[3] = True\n elif event.key == 110: # P2 ATTACK\n # If 'N' is pressed\n P2KEYS[4] = True\n elif event.key == 109: # P2 SPECIAL\n # If 'M' is pressed\n P2KEYS[5] = True\n\n elif event.type == pygame.KEYUP:\n\n if event.key == 119:\n # If 'w' is pressed\n P1KEYS[0] = False\n elif event.key == 100:\n # If 'd' is pressed\n P1KEYS[1] = False\n elif event.key == 115:\n # If 's' is pressed\n P1KEYS[2] = False\n elif event.key == 97:\n # If 'a' is pressed\n P1KEYS[3] = False\n elif event.key == 304:\n # If 'Shift' is pressed\n P1KEYS[4] = False\n elif event.key == 32:\n # If 'Space' is pressed\n P1KEYS[5] = False\n elif event.key == 117: # P2 LEFT\n # If 'U' is pressed\n P2KEYS[0] = False\n elif event.key == 105: # P2 DOWN\n # If 'I' is pressed\n P2KEYS[1] = False\n elif event.key == 111: # P2 UP\n # If 'O' is pressed\n P2KEYS[2] = False\n elif event.key == 112: # P2 RIGHT\n # If 'P' is pressed\n P2KEYS[3] = False\n elif event.key == 110: # P2 ATTACK\n # If 'N' is pressed\n P2KEYS[4] = False\n elif event.key == 109: # P2 SPECIAL\n # If 'M' is pressed\n P2KEYS[5] = False\n\n\n\n ## GAME LOGIC ##\n\n\n\n ### UPDATE THE PLAYERS ###\n\n\n\n ### PLAYER 1 ###\n\n player1Dir = None\n\n if P1KEYS[0] and P1KEYS[1]:\n player1Dir = Direction.UP_RIGHT\n elif P1KEYS[1] and P1KEYS[2]:\n player1Dir = Direction.DOWN_RIGHT\n elif P1KEYS[2] and P1KEYS[3]:\n player1Dir = Direction.DOWN_LEFT\n elif P1KEYS[3] and P1KEYS[0]:\n player1Dir = Direction.UP_LEFT\n elif P1KEYS[0]:\n player1Dir = Direction.UP\n elif P1KEYS[1]:\n player1Dir = Direction.RIGHT\n elif P1KEYS[2]:\n player1Dir = Direction.DOWN\n elif P1KEYS[3]:\n player1Dir = Direction.LEFT\n\n if P1KEYS[5]:\n # TODO: verify this is fixed\n ## # TODO: player shouldn't be able to attack totem\n player.attack(entities, fgSprites)\n\n player.update(player1Dir, fgSprites, entities, usingSpecial = P1KEYS[4])\n\n\n\n ### PLAYER 2 ###\n\n # Check if the player even exists\n if (not (player2 is None)):\n \n player2Dir = None\n\n if P2KEYS[2] and P2KEYS[3]:\n player2Dir = Direction.UP_RIGHT\n elif P2KEYS[1] and P2KEYS[3]:\n player2Dir = Direction.DOWN_RIGHT\n elif P2KEYS[0] and P2KEYS[1]:\n player2Dir = Direction.DOWN_LEFT\n elif P2KEYS[0] and P2KEYS[2]:\n player2Dir = Direction.UP_LEFT\n elif P2KEYS[2]:\n player2Dir = Direction.UP\n elif P2KEYS[3]:\n player2Dir = Direction.RIGHT\n elif P2KEYS[1]:\n player2Dir = Direction.DOWN\n elif P2KEYS[0]:\n player2Dir = Direction.LEFT\n\n if P2KEYS[4]:\n player2.attack(entities, fgSprites)\n\n player2.update(player2Dir, fgSprites, entities, usingSpecial = P2KEYS[5])\n\n\n\n\n \n\n # If it is time to add another foes, add one\n if (enemyCooldown == 0):\n\n # Chose type of enemy\n typeNewEnemy = randint(0, 9)\n\n # Figure out where the foe goes\n newEnemyX, newEnemyY = borderCoords()\n\n # Make the enemy\n # TODO: make other types of enemies appear\n\n # Bishop, which follows player, is rarer than pawn\n if (typeNewEnemy == 9):\n newEnemy = Bishop(bishopSprite, newEnemyX, newEnemyY)\n else:\n newEnemy = Pawn(pawnSprite, newEnemyX, newEnemyY)\n\n # Add the new foe to the necessary lists\n enemies.append(newEnemy)\n sprites.append(newEnemy)\n entities.append(newEnemy)\n\n enemyCooldown = ENEMY_TIME\n\n else:\n # Otherwise just count down\n enemyCooldown -= 1\n\n for enemy in enemies:\n enemy.update(None, fgSprites, entities)\n\n # Remove dead entities from list\n allDeadRemoved = False\n\n while not allDeadRemoved:\n\n # Find the first dead entity in the list\n toRemove = None\n for e in entities:\n if e.dead:\n toRemove = e\n break\n \n if toRemove == None:\n # If no dead entities were found, flag for the loop to end\n allDeadRemoved = True\n else:\n # Otherwise remove the found entity and proceed with the loop\n entities.remove(toRemove)\n \n \n ## DRAW ##\n\n # TODO: draw everything\n\n # DRAW THE BACKGROUND #\n screen.blit(background, (0,0))\n\n # DRAW THE SPRITES #\n sortSprites(sprites)\n for s in sprites:\n s.draw(screen)\n\n # DRAW THE HUD #\n\n # TODO: Draw image representations of the players faces\n # TODO: automate health bar drawing of everyone\n\n playerHB.update(player)\n playerCDB.update(player.attackTimer / player.attackSpeed)\n totemHB.update(totem)\n \n playerHB.draw(screen)\n playerCDB.draw(screen)\n totemHB.draw(screen)\n\n # Draw the GAME OVER\n if (player.dead or totem.dead):\n txtPlacement = (WIDTH // 2) - (theFont.size(\"FAILURE\")[0] // 2)\n screen.blit(theFont.render(\"FAILURE\", False, Color(\"red\")), (txtPlacement,HEIGHT//2-125))\n gameRunning = False\n\n\n # Update the double buffer\n pygame.display.flip()\n\n else:\n\n # If the game is complete wait for exit\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # If the close button is pressed, exit the program\n sys.exit()\n"
},
{
"alpha_fraction": 0.5508698225021362,
"alphanum_fraction": 0.5840801000595093,
"avg_line_length": 29.11111068725586,
"blob_id": "4b126e066971fddda39e417c69d70005eac6338b",
"content_id": "363ff5ebdf8ecb9c55798f20b7ba2f1748fd4268",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1897,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 63,
"path": "/damagebar.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "from pygame import Surface\nfrom pygame import Color\nfrom pygame import Rect\nfrom entity import Entity\n\nclass DamageBar:\n\n LIFE_COLOUR = Color(102, 255, 51, 255)\n DEATH_COLOUR = Color(102, 0, 0, 255)\n\n def __init__(self,\n x,\n y, \n height=10, # How tall the box is (width is computed from this)\n colour1=Color(102, 255, 51, 255), # Life colour\n colour2=Color(102, 0, 0, 255)): # Death colour\n\n # The size of the health bar\n self.width = height * 8\n self.height = height\n\n # The colours to use for the health bar\n self.colour1 = colour1\n self.colour2 = colour2\n\n # Position of health bar on screen\n self.x = x\n self.y = y\n\n # The image representation of the health bar\n self.bar = Surface((self.width, self.height))\n\n # A fraction that represents how much life this thing has left\n self.fraction = 1.0\n\n def update(self, value):\n\n if (isinstance(value, Entity)):\n self.fraction = value.health / value.maxHealth\n else:\n self.fraction = value\n \n # Fraction should be at most 1\n if (self.fraction > 1):\n self.fraction = 1\n\n def draw(self, other):\n\n # TODO: make border less sloppy and more parameter based\n # currently it is 2 pixels thick all around\n\n # Outside border\n self.bar.fill(Color(\"black\"))\n\n # Fill the inside with the empty colour\n toFillPart = self.bar.subsurface(Rect(2, 2, self.width-4, self.height-4))\n toFillPart.fill(self.colour2)\n\n # Cover up part of it with the 'life' colour\n filledPart = toFillPart.subsurface(Rect(0, 0, int((self.width - 4) * self.fraction), self.height - 4))\n filledPart.fill(self.colour1)\n\n other.blit(self.bar, (self.x, self.y))\n"
},
{
"alpha_fraction": 0.7516778707504272,
"alphanum_fraction": 0.7557526230812073,
"avg_line_length": 58.61428451538086,
"blob_id": "5c4c9ca2c5c0f833d8c18f839c5510d94d43f7d3",
"content_id": "88d8e6a873aaf652e6dd11b674c0e391b2dad1ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4172,
"license_type": "no_license",
"max_line_length": 436,
"num_lines": 70,
"path": "/README.md",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Fourseal\n\n## ABOUT\n\nThis is a bird's eye view action game where you play as one of three sword wielding classes.\n\nEventually, I intend for it to be a multiplayer PVP game.\n\nRight now, I have set up a demo, where you must defend the totem/obelisk in the center of the map from an onslaught of chess pieces.\n\nIf you are interested in trying it out, please refer to the RUNNING IT section. Have fun!\n\n## CONTROLS\n\nUse WASD to move, SPACE to attack, and SHIFT to use your special move. Each class has a different special, as well as different attack strengths and movement speeds.\n\n## RUNNING IT\n\nThis game is written in python 3 with pygame, which means that to run it you will need these installed.\n1. Installing Python\n * If you already have python, run `python --version` or `python3 --version` in a terminal or command prompt to make sure that you have python 3 installed\n * Otherwise, [download and run the installer for python 3](https://www.python.org/downloads/)\n * Make sure that python is added to the path so that you can run it in terminal or command prompt\n2. Installing pygame\n * If you are on Mac/Linux, run `pip3 install pygame`\n * If you are on Windows, run `pip install pygame`\n3. Launching the game:\n * In a command prompt/terminal, make sure that you are in the same folder as the downloaded source code\n * For Mac/Linux, run `python3 main.py`\n * For Windows, run `python main.py`\n4. Character selection:\n * You will be prompted to enter a letter representing the class you pick before the game launches\n * Once you have done this, the game should have launched. Have fun!\n\n## FAQ\n__Q:__ Why am I a jubejube?\n\n__A:__ Many of the images in the game are placeholders. I have worked on the back end of animating the characters, but haven't put in the time to create all the images needed to animate the characters. I expect that this will take some time for me to get around to.\n\n## DEVELOPMENT\n\nThis game is currently incredibly rough around the edges:\n* It contains bugs and exploits\n * Because directional isn't implemented yet, you can stand on the obelisk and hold attack\n* It is very lacking in visual effects\n* The HUD leaves a lot to be desired\n* There are files of code which are written butuntested and not implemented in the main game\n\nI currently intend to be working on it in my spare time for fun. I will do my best to push or release stable versions of this game when it gets to that stage.\n\nIf you are interested in what I have planned, please see `Decisions.txt` file or the `TODO:` notes scattered throughout the src.\n\n## CURRENT BARRIERS FOR DEVELOPMENT\n\nThese are the issues I'm facing and why currently (20 Jan, 2019) I feel an inhibition to work on this at the moment.\n\n * The core concept isn't as fun as I expected it to be, at least with the features that are available now\n * I think fleshing out the special attacks, adding directional attacking, and adding better maps might help with this, but I'm not entirely sure\n * I don't have sufficient time or skill to complete the character pixel art\n * I need to redesign the main file in order to make it more maintainable\n * This means spending a decent amount of time planning how to do this\n * I will have to do a refactor, which will take a decent amount of coding\n * This is also necessary in order to make a better menu system and configurable matches (i.e. different maps)\n * I need to implement a good pathfinding algorithm for the enemies\n * I have read a bit about A* with path smoothing and Theta* but need to read more in order to understand them and implement them\n * I need to make a custom key mapping system and figure out the default mappings for up to 4 players\n\n## MAP FILES\n\nI am working on creating a specific standard for the map file, so that it will be easy for anyone to create their own maps for the game. The file `MapFilePlanning.txt` has some information. As of right now, many of the features described in that map are not yet implemented. (Only the background and foreground are actually read into the game). Also of note is the file `MapGenerator.py`, which I use to help me create maps to test on."
},
{
"alpha_fraction": 0.5516807436943054,
"alphanum_fraction": 0.5641445517539978,
"avg_line_length": 35.10454559326172,
"blob_id": "3503bc364d6e910b3e724c7841925b0829eb170b",
"content_id": "7acdebbbc2714efd2ec98eb918a658c500325f7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7943,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 220,
"path": "/entity.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 5 April, 2018\n\nfrom sprite import Sprite\nfrom direction import Direction\nfrom direction import opposite\nfrom constants import *\n\nclass Entity(Sprite):\n\n \"\"\" Describes a being that can exist in the game, whether it's a player,\n enemy, or non-player character \"\"\"\n\n def __init__(self, images, xpos = 0, ypos = 0, maxHealth = 100,\n resistance = 20, damage = 25, range = 5, speed = 3,\n attackSpeed = 45, direction = None):\n \"\"\" Creates a new entity. The default is a dummy at (0,0) \"\"\"\n\n # Sets this up as a sprite\n super().__init__(xpos, ypos, images)\n\n # Sets up additional variables\n self.maxHealth = maxHealth\n self.resistance = resistance\n self.health = maxHealth\n self.damage = damage\n self.range = range\n self.speed = speed\n self.direction = direction\n self.attackSpeed = attackSpeed\n\n # These variables don't need to be initalized to any other value\n self.killsSinceDeath = 0\n self.dead = False\n self.step = 0\n self.attackTimer = 0\n self.recoilTimer = 0\n self.reDir = None\n\n\n def setPos(self, newX, newY):\n \"\"\" Changes the position of the Entity, for use in setting up\n the game and teleporting \"\"\"\n\n self.x = newX\n self.y = newY\n\n\n def attack(self, entities, obstacles):\n \"\"\" Attacks all entities within range of this one. Kills them if\n they need to be dead. Returns True if the attack is successful. \"\"\"\n\n # TODO: make the entity only attack in front of themselves\n\n # The entity needs to be alive and cooled down to attack\n if not self.dead and self.attackTimer == 0:\n\n for e in entities:\n\n # If this entity isn't itself and it is within range\n if not self is e and self.distance(e) < self.range:\n\n # Check if damage is actually going to be done. Otherwise,\n # no recoil nor killing\n if not self.damage - e.resistance == 0:\n e.health -= self.damage - e.resistance\n\n # If the entity gets killed, \n if e.health <= 0:\n e.health = 0 # make sure health isn't negative\n self.killsSinceDeath += 1 # increment the killer's kills since death\n e.dead = True # set the other entity to dead\n e.killsSinceDeath = 0 # the other entity's kill count is zero\n else:\n e.recoilTimer = RECOIL # The entity must face recoil\n e.reDir = self.direction # Pass own direction as entities recoil\n\n self.attackTimer = self.attackSpeed\n\n return True\n\n return False\n\n\n def move(self, direction, obstacles):\n \"\"\" Move the entity in the given direction \"\"\"\n\n # If there is not direction, don't bother trying to move\n if direction == None:\n return\n\n # Find which direction the entity is moving and figure out what\n # sequence of movements that corresponds to.\n self.direction = direction\n\n yChange = 0\n xChange = 0\n\n if direction == Direction.UP:\n yChange = self.speed\n elif direction == Direction.UP_RIGHT:\n yChange = self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.RIGHT:\n xChange = self.speed\n elif direction == Direction.DOWN_RIGHT:\n yChange = -self.speed/2**(1/2)\n xChange = self.speed/2**(1/2)\n elif direction == Direction.DOWN:\n yChange = -self.speed\n elif direction == Direction.DOWN_LEFT:\n yChange = -self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n elif direction == Direction.LEFT:\n xChange = -self.speed\n elif direction == Direction.UP_LEFT:\n yChange = self.speed/2**(1/2)\n xChange = -self.speed/2**(1/2)\n\n # Apply vertical movement. If this means it is now colliding,\n # snap to grid vertically.\n self.y += yChange\n if self.isColliding(obstacles):\n self.y = int(self.y/BOX_SIZE)*BOX_SIZE + round(self.y/BOX_SIZE - int(self.y/BOX_SIZE))*BOX_SIZE\n\n # Same except horizontally\n self.x += xChange\n if self.isColliding(obstacles):\n self.x = int(self.x/BOX_SIZE)*BOX_SIZE + round(self.x/BOX_SIZE - int(self.x/BOX_SIZE))*BOX_SIZE\n\n # Take a step\n self.step += 1\n if self.step >= 23:\n self.step = 0\n\n\n def recoil(self, obstacles):\n \"\"\" Move the player backwards due to a previous attack \"\"\"\n\n yChange = 0\n xChange = 0\n\n # Figure out what change in coords is necessary for the recoil\n if self.reDir == Direction.UP:\n yChange = RECOIL_SPEED\n elif self.reDir == Direction.UP_RIGHT:\n yChange = RECOIL_SPEED/2**(1/2)\n xChange = RECOIL_SPEED/2**(1/2)\n elif self.reDir == Direction.RIGHT:\n xChange = RECOIL_SPEED\n elif self.reDir == Direction.DOWN_RIGHT:\n yChange = -RECOIL_SPEED/2**(1/2)\n xChange = RECOIL_SPEED/2**(1/2)\n elif self.reDir == Direction.DOWN:\n yChange = -RECOIL_SPEED\n elif self.reDir == Direction.DOWN_LEFT:\n yChange = -RECOIL_SPEED/2**(1/2)\n xChange = -RECOIL_SPEED/2**(1/2)\n elif self.reDir == Direction.LEFT:\n xChange = -RECOIL_SPEED\n elif self.reDir == Direction.UP_LEFT:\n yChange = RECOIL_SPEED/2**(1/2)\n xChange = -RECOIL_SPEED/2**(1/2)\n\n # Apply vertical movement. If this means it is now colliding,\n # snap to grid vertically.\n self.y += yChange\n if self.isColliding(obstacles):\n self.y = int(self.y/BOX_SIZE)*BOX_SIZE + round(self.y/BOX_SIZE - int(self.y/BOX_SIZE))*BOX_SIZE\n\n # Same except horizontally\n self.x += xChange\n if self.isColliding(obstacles):\n self.x = int(self.x/BOX_SIZE)*BOX_SIZE + round(self.x/BOX_SIZE - int(self.x/BOX_SIZE))*BOX_SIZE\n\n\n def update(self, direction, obstacles, entities):\n \"\"\" Update this entity: do everything that doesn't involve drawing.\n Should be performed every frame \"\"\"\n\n # TODO: handle attacking and prevent movement during attack\n\n if (self.recoilTimer > 0):\n self.recoilTimer -= 1 # Adavance to next fram of recoil\n self.recoil(obstacles) # Perform the recoil action\n elif (self.attackTimer == 0):\n self.move(direction, obstacles)\n else:\n self.move(direction, obstacles)\n self.attackTimer -= 1\n\n\n # OVERRIDE\n def draw(self, surface):\n \"\"\" Draws the entity to the surface \"\"\"\n\n if not self.dead:\n\n # TODO: handle the sprites for the entity to attack\n\n # Calculate direction modifier\n\n if self.direction == Direction.UP or self.direction == Direction.UP_LEFT or self.direction == Direction.UP_RIGHT:\n directionModifier = 0\n elif self.direction == Direction.RIGHT:\n directionModifier = 1\n elif self.direction == Direction.DOWN or self.direction == Direction.DOWN_LEFT or self.direction == Direction.DOWN_RIGHT:\n directionModifier = 2\n else:\n directionModifier = 3\n\n # Calculate step modifier\n\n stepModifier = self.step//6\n if stepModifier%2 == 1:\n stepModifier = 1\n\n # Select the appropriate texture\n texture = directionModifier * 3 + stepModifier\n super().draw(surface, textureNum = texture)\n"
},
{
"alpha_fraction": 0.621268630027771,
"alphanum_fraction": 0.6305969953536987,
"avg_line_length": 24.571428298950195,
"blob_id": "3522ef18f7983ffcbcd53454769b38b729d5db0e",
"content_id": "2dcc3dd3b059f2e9bb9d48cce85a138e188e2bfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 21,
"path": "/foe.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 3 Oct, 2018\n\nfrom entity import Entity\n\n# All entities that are inheritly evil are an instance of this class \nclass Foe(Entity):\n \n # OVERRIDE\n def attack(self, entities, obstacles):\n\n # Find all the non-foe entities when attacking,\n # and only allow the foe to attack them, not its own kin\n\n filteredEntities = []\n\n for e in entities:\n if (not isinstance(e, Foe)):\n filteredEntities.append(e)\n\n super().attack(filteredEntities, obstacles)"
},
{
"alpha_fraction": 0.600561797618866,
"alphanum_fraction": 0.6146067380905151,
"avg_line_length": 30.785715103149414,
"blob_id": "6334e3dc4810d4acb217636de8fe2d6e02bc5c15",
"content_id": "e1f95c60d4326ecb080cfb82a98484aadb8a4643",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1780,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 56,
"path": "/player.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 11 April, 2018\n\nfrom entity import Entity\nfrom totem import Totem\n\nclass Player(Entity):\n\n def __init__(self, images, xpos = 0, ypos = 0, maxHealth = 100,\n resistance = 20, damage = 25, range = 5, speed = 3,\n attackSpeed = 45, direction = 0, specialCooldown = 30,\n specialTimer = 0):\n\n super().__init__(images, xpos, ypos, maxHealth, resistance, damage,\n range, speed, attackSpeed, direction)\n\n self.specialCooldown = specialCooldown\n self.specialTimer = specialTimer\n\n\n def special(self, direction, obstacles, entities):\n \"\"\" Performs the special move, then returns the direction\n that the player should move after performing it \"\"\"\n\n return direction\n\n\n # OVERRIDE\n def attack(self, entities, obstacles):\n\n # A player should not be able to kill their own totem, because\n # defending the totem is the objective of the player when the\n # totem is present\n\n filteredEntities = []\n \n for e in entities:\n if (not isinstance(e, Totem)):\n filteredEntities.append(e)\n\n super().attack(filteredEntities, obstacles)\n\n # OVERRIDE\n def update(self, direction, obstacles, entities, usingSpecial):\n\n if self.specialTimer > 0:\n # If the special is in progress\n self.specialTimer -= 1\n direction = self.special(direction, obstacles, entities)\n\n elif usingSpecial:\n # If the special button is pressed and cooldown is finished\n self.specialTimer = self.specialCooldown\n direction = self.special(direction, obstacles, entities)\n\n super().update(direction, obstacles, entities)\n"
},
{
"alpha_fraction": 0.6509009003639221,
"alphanum_fraction": 0.6587837934494019,
"avg_line_length": 30.75,
"blob_id": "fa7be76feeafe73befee0ff7946b78510cf475de",
"content_id": "9097625e2f253437b7f13e882ba5f49650a77d87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 888,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 28,
"path": "/direction.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\nclass Direction(Enum):\n \"\"\" Used to describe which direction an entity is facing and travelling \"\"\"\n\n UP, UP_RIGHT, RIGHT, DOWN_RIGHT, DOWN, DOWN_LEFT, LEFT, UP_LEFT = list(range(8))\n\n\ndef perpendicular(direction):\n \"\"\" Returns a tuple of the two directions perpendicular to this one \"\"\"\n\n # FIXME: Wizardry that will breaks if I decide\n # to add more directions to the Enum\n return (Direction((direction.value + 2) % 8),\n Direction((direction.value + 2) % 8))\n\n\ndef opposite(direction):\n \"\"\" Returns the direction opposite this one \"\"\"\n # FIXME: More wizardry\n return Direction((direction.value + 4) % 8)\n\n\ndef dirsAsArray():\n return (Direction.UP, Direction.UP_RIGHT, Direction.RIGHT,\n Direction.DOWN_RIGHT, Direction.DOWN,\n Direction.DOWN_LEFT, Direction.LEFT,\n Direction.UP_LEFT)"
},
{
"alpha_fraction": 0.5630685091018677,
"alphanum_fraction": 0.5818678140640259,
"avg_line_length": 33.36458206176758,
"blob_id": "7cb32cdca5bda5da2cf96a2baab90b11fd1a2f69",
"content_id": "f72e71235140c73ea3dc78c1e553370730374bb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3298,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 96,
"path": "/menu.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 26 October, 2018\n\nimport pygame, sys\n\nfrom constants import *\nfrom pygame import Color\n\n\ndef menu(screen):\n \"\"\"\n Run the menu program.\n :returns: The character 'T', 'D', or 'F' in order to indicate which class was selected.\n :param screen: The screen of the pygame instance\n \"\"\"\n\n ### INTIALIZE FONTS AND COLOURS ###\n\n # Get Consolas at size 48\n font = pygame.font.SysFont(\"Consolas\", 48)\n\n # Get Consolas at a quarter the window height\n titleFont = pygame.font.SysFont(\"Consolas\", HEIGHT // 3)\n\n # Colour scheme generated with https://coolors.co/ \n backDarkC = Color(\"#594236\")\n titleC = Color(\"#48acf0\")\n subtitleC = Color(\"#ff9b21\")\n selectedC = Color(\"#6f584b\")\n\n # Define the margin from the top and corner in pixels\n offset = 5\n\n # Character select loop\n characterSelected = False\n\n while not characterSelected:\n\n pygame.time.Clock().tick(75)\n\n # Get the mouse posittion and calculate the quarter of the screen\n # which it is on\n\n locX = pygame.mouse.get_pos()[0]\n selectedBox = int(4 * locX / WIDTH)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # If the close button is pressed, exit the program\n sys.exit()\n\n # If the screen is clicked, determine if and which character was selected\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if (selectedBox == 1):\n playerType = \"T\"\n characterSelected = True\n elif (selectedBox == 2):\n playerType = \"D\"\n characterSelected = True\n elif (selectedBox == 3):\n playerType = \"F\"\n characterSelected = True\n\n # TODO: draw UI so that the user knows what they are picking\n\n # Fill background with dark\n screen.fill(backDarkC)\n\n # Highlight the quarter that is being hovered over\n if (selectedBox > 0):\n # Get a subsurface representing the quarter\n surfaceSelected = screen.subsurface(pygame.Rect(selectedBox * WIDTH // 4, 0, WIDTH // 4, HEIGHT))\n surfaceSelected.fill(selectedC)\n\n # Draw box around the title\n # TODO:\n\n # Draw the title of the game\n screen.blit(titleFont.render(\"4\", False, titleC),\n pygame.Rect(offset + 15, offset,0,0)) # This is shifted over a bit to look better\n screen.blit(titleFont.render(\"C\", False, titleC),\n pygame.Rect(offset, offset + titleFont.get_height(),0,0))\n screen.blit(titleFont.render(\"L\", False, titleC),\n pygame.Rect(offset + 5, offset + 2 * titleFont.get_height(),0,0))\n\n # Draw the text that says each classes name\n screen.blit(font.render(\"Threemason\", False, subtitleC),\n pygame.Rect(1 * WIDTH // 4 + offset,offset,0,0))\n screen.blit(font.render(\"Dialic\", False, subtitleC),\n pygame.Rect(2 * WIDTH // 4 + offset,offset,0,0))\n screen.blit(font.render(\"Foursealer\", False, subtitleC),\n pygame.Rect(3 * WIDTH // 4 + offset,offset,0,0))\n\n pygame.display.flip()\n\n return playerType"
},
{
"alpha_fraction": 0.5546780228614807,
"alphanum_fraction": 0.570473849773407,
"avg_line_length": 31.27450942993164,
"blob_id": "aa6bea9b9b0d6dbbc1d22dd31fa6bf477d52f3e2",
"content_id": "6c524814a166e13cfcbdb376602a7639c4d83495",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1646,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 51,
"path": "/dialic.py",
"repo_name": "datho7561/Fourseal",
"src_encoding": "UTF-8",
"text": "# Author: David Thompson\n# Date: 23 August, 2018\n\nfrom player import Player\nfrom constants import *\n\nclass Dialic(Player):\n\n def __init__(self, images, xpos, ypos):\n\n super().__init__(images, xpos, ypos,\n maxHealth = 100, # Values for character contants\n resistance = 15,\n damage = 0, # This value gets changed in this character\n range = 35,\n speed = 0, # This value gets changed in this character\n attackSpeed = 0, # This value gets changed in this character\n specialCooldown = 10) # Almost instantaneous reactivation\n\n self.strengthMoveSpeed = 2\n self.strengthAttackSpeed = 25\n self.strengthDamage = 35\n\n self.speedMoveSpeed = 6\n self.speedAttackSpeed = 3\n self.speedDamage = 0\n\n self.speed = self.speedMoveSpeed\n self.attackSpeed = self.speedAttackSpeed\n self.damage = self.speedDamage\n\n self.isStrong = False\n\n # OVERRIDE\n def special(self, direction, obstacles, entities):\n\n # TODO: implement special\n\n # Switch between speed and strength modes\n self.isStrong = not self.isStrong\n\n if (self.isStrong):\n self.speed = self.strengthMoveSpeed\n self.attackSpeed = self.strengthAttackSpeed\n self.damage = self.strengthDamage\n else:\n self.speed = self.speedMoveSpeed\n self.attackSpeed = self.speedAttackSpeed\n self.damage = self.speedDamage\n\n return direction\n"
}
] | 19 |
s-razavi/Insight_Pipeline_Failure
|
https://github.com/s-razavi/Insight_Pipeline_Failure
|
5d16e5ec16be0cdfdd2797e04cf21c98487d4f5a
|
ff0cb5005cddc8b63b696ab2acf12d4a71719728
|
c2da842b81f711d4be0148e940093913e750a744
|
refs/heads/master
| 2020-12-15T03:21:27.363887 | 2020-04-06T13:56:13 | 2020-04-06T13:56:13 | 234,978,435 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6062900424003601,
"alphanum_fraction": 0.6163851618766785,
"avg_line_length": 40.13600158691406,
"blob_id": "18150fc8ac475480cf6dd7dca9701ba808bcde9d",
"content_id": "23545e06b805dd5cdc7459d260ce8483123d7317",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5151,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 125,
"path": "/web app/app.py",
"repo_name": "s-razavi/Insight_Pipeline_Failure",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\nimport pandas as pd\nimport re\nfrom nltk.tokenize import word_tokenize\n\napp = Flask(__name__)\n\ndef outputs(some_input):\n import numpy as np\n import pickle\n from sklearn.model_selection import train_test_split\n from tensorflow.keras.optimizers import SGD\n from keras.models import model_from_json\n \n vector = pickle.load(open('vector.pkl','rb'))\n encoded_y = pickle.load(open('encoded_y.pkl','rb'))\n mileage_df = pickle.load(open('mileage.pkl','rb'))\n words = pickle.load(open('used_words.pkl','rb'))\n x, x_test_final, y, y_test_final = train_test_split(vector, encoded_y, test_size=0.1, random_state=44)\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=44)\n TFIDF = pickle.load(open('TFIDF.pkl','rb'))\n label_encoder_y = pickle.load(open('label_encoder_y.pkl','rb'))\n model_rf = pickle.load(open('model_rf.pkl','rb'))\n \n json_file = open('model_nn.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model_nn = model_from_json(loaded_model_json)\n # load weights into new model\n model_nn.load_weights(\"model_nn.h5\")\n #print(\"Loaded model from disk\")\n opt = SGD(lr=0.1, momentum=0.9, decay=0.001)\n model_nn.compile(optimizer = opt, loss='sparse_categorical_crossentropy' , metrics=['accuracy'])\n \n json_file = open('model_ensemble.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model_ensemble = model_from_json(loaded_model_json)\n # load weights into new model\n model_ensemble.load_weights(\"model_ensemble.h5\")\n #print(\"Loaded model_ensemble from disk\")\n model_ensemble.compile(optimizer = opt, loss='sparse_categorical_crossentropy' , metrics=['accuracy'])\n \n model_rf_input = model_rf.predict(TFIDF.transform([some_input]))\n model_nn_input = model_nn.predict(TFIDF.transform([some_input]))\n \n concated = []\n concated.append(np.concatenate((model_nn_input, model_nn_input), axis=1))\n concated = np.asarray(concated)\n concated = np.reshape(concated, (concated.shape[0], concated.shape[2]))\n \n #print(label_encoder_y.inverse_transform(np.argsort(model_ensemble.predict(concated), axis=1)[:,-1:][0])[0])\n k = 11\n model_rf_input = model_rf.predict_proba(TFIDF.transform([some_input]))\n model_nn_input = model_nn.predict(TFIDF.transform([some_input]))\n concated = []\n concated.append(np.concatenate((model_nn_input, model_rf_input), axis=1))\n concated = np.asarray(concated)\n concated = np.reshape(concated, (concated.shape[0], concated.shape[2]))\n \n worst_k = np.argsort(model_ensemble.predict(concated), axis=1)[:,-k:][0]\n #print('Companies to avoid:')\n worst = []\n for j in range(3):\n worst.append(label_encoder_y.inverse_transform([worst_k[k-1-j]])[0])\n #worst = np.asarray(worst)\n #worst = worst.reshape(3,1)\n \n best_k = np.argsort(model_ensemble.predict(concated), axis=1)[:,0:k][0]\n #print('\\n','Companies for the project:')\n company = []\n mileage = []\n for j in range(k):\n comp = label_encoder_y.inverse_transform([best_k[j]])[0]\n if comp in mileage_df.name.values:\n company.append(comp)\n mileage.append(int(mileage_df.loc[mileage_df['name'] == comp, 'miles'].iloc[0]))\n df_best = pd.DataFrame({'Companies with least faiulres':company, 'Mileage': mileage})\n \n cleaned = re.sub('\\W+', ' ', some_input)\n tokens = word_tokenize(cleaned)\n match = len([token for token in tokens if token in words])/len(tokens)\n \n return(df_best, worst, match)\n \ndef outputs_(some_input):\n return (pd.DataFrame([1,2], [2,3]), 'luck')\n\[email protected]('/',methods=[\"GET\",\"POST\"]) #we are now using these methods to get user input\ndef home_page():\n return render_template('index_.html')\n\[email protected]('/output')\ndef recommendation_output():\n# \n # Pull input\n some_input =str(request.args.get('user_input')) \n df_best, worst, match = outputs(some_input) \n\n # Case if empty\n if len(some_input)<35:\n return render_template(\"index_.html\", \n my_input = some_input,\n my_form_result=\"Empty\")\n elif match < 0.2:\n return render_template(\"index_.html\", \n my_input = some_input,\n my_form_result=\"no_match\")\n \n else:\n \n some_output=\"yeay!\"\n some_number= 'Recommendation:'\n some_image=\"giphy.gif\"\n return render_template(\"index_.html\",tables=[df_best.to_html(classes='data')], titles=df_best.columns.values,\n #my_input=label_encoder_y.inverse_transform(model_NLP.predict(TFIDF.transform([some_input])))[0],#some_input,\n my_input = worst,\n my_output=some_output,\n my_number=some_number,\n my_img_name=some_image,\n my_form_result=\"NotEmpty\")\n\n# start the server with the 'run()' method\nif __name__ == \"__main__\":\n app.run(threaded=False)\n \n "
},
{
"alpha_fraction": 0.8137621283531189,
"alphanum_fraction": 0.8204936385154724,
"avg_line_length": 101.84615325927734,
"blob_id": "251f2df7ca0994fa37fee694850b3c5c0c20b57d",
"content_id": "bffab0eeb198605b696afc0d6b1309d2f095e516",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1337,
"license_type": "no_license",
"max_line_length": 446,
"num_lines": 13,
"path": "/readme.md",
"repo_name": "s-razavi/Insight_Pipeline_Failure",
"src_encoding": "UTF-8",
"text": "# Reducing the risk associated with investing in pipeline projects\nThis is a 3-week research project performed for Insight program in Jan 2020 in Toronto. The objective is to avoid high-risk companies for any upcoming pipeline project considering the project specifications. \n\n## Summary\nThis project helps minimizing the risk associated with investing in pipeline industries. It uses feature engineering and NLP for extracting features from text and pipeline specifications, and it suggests companies that have minimum risk considering the output.\n\n## Approach\nData was collected from US department of transportation for pipeline failures in US over past 20 years. For each incident the regulator collected pipeline specifications in details, together with a technical note that explains the observations and details of the incident. One_Hot_Encoding was performed on features and TFIDF on text, and their result was merged and fed to an ensemble stacked model formed from random forest and neural network. \n\n\n## Result\nProviding the details of an incident, the model predicts with 68% of accuracy that which pipeline company caused the failure. This training is used to avoid high risk companies for upcoming projects, also to recommend minimum risk companies considering the project specifications.\n\n"
}
] | 2 |
zaferdurkut/tdk_django_api
|
https://github.com/zaferdurkut/tdk_django_api
|
70d28712ce243b829f2de6200b4da639087dd07b
|
71db374a297ef16e08bf1957e83142377f8b1583
|
a8fad0948f65c2522ed71a3f5a2d3c7d6de1b942
|
refs/heads/master
| 2020-09-12T02:19:24.866839 | 2019-11-18T07:03:55 | 2019-11-18T07:03:55 | 222,268,176 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5743589997291565,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 20.77777862548828,
"blob_id": "8de176cc0c12bdb56c7d6039bfd7a72e945a612e",
"content_id": "548462946fcda96b57c0e747e32f1b14c91b068c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "zaferdurkut/tdk_django_api",
"src_encoding": "UTF-8",
"text": "python-dotenv==0.10.1\nDjango==2.2.7\ndjangorestframework==3.10.3\nipython==7.9.0\ndjango-jet==1.0.8\ndjango-filter==2.2.0\ndjango-health-check==3.11.1\ndjango-rest-swagger==2.2.0\npsycopg2-binary==2.8.4"
},
{
"alpha_fraction": 0.7245283126831055,
"alphanum_fraction": 0.7245283126831055,
"avg_line_length": 24.285715103149414,
"blob_id": "e32ba65433e3493fb233c03b70a6f7c888e0c27a",
"content_id": "236ef31c609aa5860fffd4752b1505ddd06966b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 21,
"path": "/tdkapi/urls.py",
"repo_name": "zaferdurkut/tdk_django_api",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.conf.urls import url, include\nfrom django.urls import path\nfrom dotenv import load_dotenv\nimport os\nfrom rest_framework_swagger.views import get_swagger_view\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nload_dotenv(dotenv_path=BASE_DIR)\n\n\nschema_view = get_swagger_view(title='TDK Django Rest API')\n\n\nurlpatterns = [\n path(r'', include('api.urls')),\n # path(str(os.getenv('DJANGO_ADMIN_URL')), admin.site.urls),\n url(r'^api-docs$', schema_view),\n\n]"
},
{
"alpha_fraction": 0.720893144607544,
"alphanum_fraction": 0.7304624915122986,
"avg_line_length": 27.827587127685547,
"blob_id": "c9f4fba42907e2786b08e3f5b5c10dcb03136345",
"content_id": "c590618079ff31f2ea6a23213ac390315d7bcbae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2648,
"license_type": "no_license",
"max_line_length": 243,
"num_lines": 87,
"path": "/README.md",
"repo_name": "zaferdurkut/tdk_django_api",
"src_encoding": "UTF-8",
"text": "# TDK Django Api\nBu projede Django Rest ile [TDK](https://sozluk.gov.tr/) API'si kullanılarak kelime için anlam ve atasözlerini döndüren bir web servisi çalışması yapılmıştır. Proje ile girilen kelime için anlam ve atasözlerine json formatında ulaşabilirsiniz.\n\n Projede;\n- Django Rest Framework\n- Django Swagger\n- Django Health Check \\\nkullanılmıştır.\n\n## Servisin Kurulumu\n\nKurulum için docker kurulu olmalıdır. Docker kullanılmadan kurulum için requirements.txt içerisinde lib'ler python 3 için kurulmalıdır. Docker ile kurulum için sırası ile;\n\n```\ngit clone https://github.com/zaferdurkut/tdk_django_api.git\n```\n\n\nProjeye gidilir\n```\ncd tdk_django_api\n```\nÇalışıcak servis için .env dosyasının düzelenmesi gerekmektedir.\n```\ncp default.env .env\n```\nenv dosyası düzenlendikten sonra servisi başlatmak için\n```\ndocker-compose up --build -d\n```\nServis ayağa kalktından sonra servise giriş için aşağıdaki komut çalıştırılır.\n```\ndocker exec -it tdkapi_app_1 bash\n```\n\n## Servisin Çalıştırılması\nEğer python sürümü 3 değilse (alias python='/usr/bin/python3') komutunu çalıştırınız.\n**Servisi çalıştırmak** için (eğer farklı bir porttan çıkılacak ise docker **üzerinden ilgili port dışarıya açılmalıdır.)\n```\npython manage.py runserver 0.0.0.0:8080\n```\n## Servisin Kullanımı\nServis endpointlerini görmek için [http://localhost:8080/api-docs](http://localhost:8080/api-docs) adresini kullanbilirsiniz. \n- **postman_collections** dizininde bulunan collections'lar ile servisin end pointlereini test edebilirsiniz.\n\n\n### get_word - GET \nBu method ile aşağıda bulunan body ile gönderildiğinde verdiğiniz parametrelere uygun olarak ilgili kelime için anlam ve atasözlerini opsiyonel olarak gösterir.\n\n- request body \n```\n\n{\n\t\"word\" : \"toz\",\n\t\"anlam\" : \"True\",\n\t\"atasozu\" :\"True\"\n}\n```\n\n- response \n```\n\n{\n \"anlam\": [\n \"Çok küçük ve hafif parçacıklara bölünmüş toprak\",\n \"Çok küçük parçacıklara bölünmüş olan herhangi bir madde\",\n \"Bu durumda olan\"\n ],\n \"atasozu\": [\n \"toz almak\",\n \"tozdan dumandan ferman okunmamak\",\n \"toz etmek\",\n \"toz kondurmamak\",\n \"toz koparmak\",\n \"toz olmak\",\n \"tozu dumana katmak\",\n \"tozunu almak (veya atmak veya silkelemek veya silkmek)\"\n ]\n}\n```\n#### anlam is True\nResponse'da ilgili kelimenin anlamını döndürür\n#### atasozu is True\nResponse'da üretilen kelime için atasözlerini döndürür.\n\n### health_check - GET\nhttp://localhost:8080/health_check/ adresinden servis durumunu görüntüleyebilirsiniz.\n"
},
{
"alpha_fraction": 0.5880101919174194,
"alphanum_fraction": 0.5914115905761719,
"avg_line_length": 29.960525512695312,
"blob_id": "b3d5fb4c5a74ffa606cc457c269ba7e7cfbcda6a",
"content_id": "d1c71b98dbe502a5633d22f0e72b9efb28b40977",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2354,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 76,
"path": "/api/views.py",
"repo_name": "zaferdurkut/tdk_django_api",
"src_encoding": "UTF-8",
"text": "# encoding=utf8\nfrom rest_framework.response import Response\nfrom django.http import FileResponse\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework import permissions\nfrom dotenv import load_dotenv\nimport json\nimport ast\nimport os\nimport requests\nfrom django.http import HttpResponse\n\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nload_dotenv(dotenv_path=BASE_DIR)\n\n\n\n@api_view(['GET'])\n@permission_classes((permissions.AllowAny,))\ndef get_word(request):\n \"\"\"\n get word from TDK\n \"\"\"\n\n try:\n data = json.loads( request.body.decode('utf-8') )\n except KeyError:\n Response(\"Malformed data!\")\n\n try:\n # ast.literal_eval(data[\"meaning\"])\n # ast.literal_eval(data[\"adage\"])\n \n base_url = os.getenv('TDK_API_URL')\n params = {\"ara\":data[\"word\"]}\n response = requests.get(base_url, params=params)\n response.encoding = \"utf-8\"\n response_data = response.text\n response_data = response.text[1:-1]\n response_data = json.loads(response_data)\n\n result = {}\n if 'atasozu' in response_data:\n try:\n if ast.literal_eval(data[\"atasozu\"]) is True :\n atasozu_list = []\n for atasozu in response_data[\"atasozu\"]:\n atasozu_list.append(atasozu[\"madde\"])\n result[\"atasozu\"] = atasozu_list\n except ValueError:\n pass\n\n if 'anlamlarListe' in response_data:\n try:\n if ast.literal_eval(data[\"anlam\"]) is True:\n anlam_list = []\n for anlam in response_data[\"anlamlarListe\"]:\n anlam_list.append(anlam[\"anlam\"])\n print(anlam)\n result[\"anlam\"] = anlam_list\n except ValueError:\n pass\n \n return Response(result)\n # todo: JSONDecodeError hatasi düzeltilecek (coklu json calisması)\n except json.decoder.JSONDecodeError as e:\n return HttpResponse(\"JSONDecodeError\" , status=500)\n\n except Exception as e:\n raise e\n return Response(\"Try Again!\")"
}
] | 4 |
weizhousjtu/trio_sequence_analysis
|
https://github.com/weizhousjtu/trio_sequence_analysis
|
fcac3ea5a0e2f92841f0b7ef57131af64989c0a8
|
0320780d82853d8244c3a3a4a7eab9410070b793
|
5e1bc02e0a2c538de50da85e316e623eb4527a58
|
refs/heads/master
| 2020-12-14T22:16:40.179814 | 2019-02-19T21:04:26 | 2019-02-19T21:04:26 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6154299378395081,
"alphanum_fraction": 0.6295641660690308,
"avg_line_length": 30.462963104248047,
"blob_id": "b97d284cab81c585f3ebe541979db653344fdb21",
"content_id": "b4e9edbfb337c6eba9abc0907acc179df1976906",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1698,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 54,
"path": "/dnm_calling/processTDT.py",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "\"\"\"\n:File: ProcessTDT.py\n:Author: Jack A. Kosmicki\n:Last updated: 2014-09-08\n\nread output file from TDT.py\n\nUsage:\n ProcessTDT.py doTDTstat <inputFile> <outputFile_Name> [options]\n\nOptions:\n --genes=G_FILE file with names of the genes you want to look at\n --norm flag to compute a normalized variance and chi-squared stat\n default is false\n --M=Val Missingness threshold [default: 0.99]\n -h, --help Print this message and exit.\n -v, --version Print the version and exit.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom collections import OrderedDict\nfrom scipy import stats\nfrom docopt import docopt\n\ndef read_calcTDT(inputFile, outputFile_Name, missThresh):\n\n df = pd.read_csv(inputFile, sep='\\t', header=0)\n # remove all rows that begin with CHROM\n mask = df.applymap(lambda x: x in ['CHROM', 'REF'])\n df = df[-mask.any(axis=1)]\n df['AN'] = df['AN'].apply(int)\n df['transmitted'] = df['transmitted'].apply(int)\n df['untransmitted'] = df['untransmitted'].apply(int)\n\n max = df['AN'].max(axis=0)\n\n df = df.ix[df.index[df['AN'] > max * missThresh]] # check for missingness\n df = df.ix[df.index[df['transmitted'] + df['untransmitted'] != 0]]\n\n\n df['TDT_SCORE'] = ((df['transmitted'] - df['untransmitted'])**2) / (df['transmitted'] + df['untransmitted'])\n df['TDT_PVAL'] = 1- stats.chi2.cdf(df['TDT_SCORE'], 1)\n\n df.to_csv(outputFile_Name, sep='\\t', index=False)\n\nif __name__ == \"__main__\":\n args = docopt(__doc__, version='0.1')\n\n missThresh = float(args['--M']) # default is 0.99\n\n if args['doTDTstat']:\n read_calcTDT(args['<inputFile>'], args['<outputFile_Name>'], missThresh)"
},
{
"alpha_fraction": 0.5540605187416077,
"alphanum_fraction": 0.5860163569450378,
"avg_line_length": 24.072288513183594,
"blob_id": "02ce3b4cfcc53842ee34cbabf250bbf0c072b3e9",
"content_id": "0999598dd31e4b55fb25a7bfbdbc0faee0874f9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4162,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 166,
"path": "/gene_enrichment/single_gene_enrichment.R",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "## single_gene_enrichment.R\n\n## Author: Daniel P. Howrigan ([email protected])\n## Last Modified: February 2019\n\n## ----- DESCRIPTION:\n\n## Run a poisson test on each gene\n\n## account for X chromosome genes (males + 2*females)\n\n## tests:\n# - all DNM\n# - ptv\n# - ptv+mis\n# - mis\n# - syn\n\n## poisson.test against mutation expectation\n\n## poisson.test([DNM count],[Inherited chromosome count],[mutation expectation])\n\n# example:\n# > poisson.test(2,(2772*2),(8.287213e-05*2))\n\n# Exact Poisson test\n\n# data: 2 time base: (2772 * 2)\n# number of events = 2, time base = 5544, p-value = 0.2344\n# alternative hypothesis: true event rate is not equal to 0.0001657443\n# 95 percent confidence interval:\n# 4.368854e-05 1.303154e-03\n# sample estimates:\n# event rate \n# 0.0003607504 \n\n\n\n## ----- SCRIPT SET UP:\n\n## PART 1: get chromosome counts\n## PART 2: read in relevant files\n## PART 3: Run poisson test in each gene\n## PART 4: combine results and write to file\n\n\n## ----- NOTES:\n\n## restricting to genes passing coverage QC \n## Using gencode_pct75_gene17925_haploid.tsv, where the expectation is per-inherited chromosome\n\n\n\n## PART 1: get chromosome counts\n\n## inherited autosomal chromosomes (numer of trios * 2)\nauto_nchrobs <- 2772*2\n## inherited sex chromosomes (number of trios + number of female probands)\nx_nchrobs <- 2772+1118\n\n\n\n## PART 2: read in relevant files\n\n## get DNM list\ndnm <- read.table('../files/combined_cohorts_DNM_list.tsv',h=T,stringsAsFactors=F)\nscz <- subset(dnm,dnm$DISEASE=='SCZ')\n\nscz_lof <- subset(scz,scz$annotation_used=='ptv')\nscz_mis <- subset(scz,scz$annotation_used=='missense')\nscz_lofmis <- subset(scz,scz$annotation_used=='ptv' | scz$annotation_used=='missense')\nscz_syn <- subset(scz,scz$annotation_used=='synonymous')\n\n## read in per-gene adjusted mutation rates\ngene_list <- read.table('../files/gencode_pct75_gene17925_haploid.tsv',h=T,sep='\\t',stringsAsFactors=F)\n\n## get percentiles\ngene_list <- gene_list[order(gene_list$bp),]\ngene_list$bp_prop <- seq(1,nrow(gene_list),1) / nrow(gene_list)\n\n\n\n\n## PART 3: Run poisson test in each gene\n\n## ======= output variables\n\n\nmut_type <- c('all','lof','lofmis','mis','syn')\n\ngene <- NA\nmut <- NA\ncount <- NA\nexp_rate <- NA\nobs_rate <- NA\npval <- NA\n\n\nX <- 1\n## LooP through genes\nfor (a in 1:nrow(gene_list)) {\n\n # subset to gene\n gg <- gene_list[a,]\n\n ## LooP through mut type\n for (b in 1:length(mut_type)) {\n\n \t## collect gene level info\n\t gene[X] <- gg$gene\n\t \t mut[X] <- mut_type[b]\n\t\t \t if (mut[X]=='all') { dd <- scz ; gm <- as.numeric(gg$p_all) }\n\t\t\t if (mut[X]=='lof') { dd <- scz_lof ; gm <- as.numeric(gg$p_lof) }\n\t\t\t \tif (mut[X]=='lofmis') { dd <- scz_lofmis ; gm <- as.numeric(gg$p_lof) + as.numeric(gg$p_mis) }\n\t\t\t\t if (mut[X]=='mis') { dd <- scz_mis ; gm <- as.numeric(gg$p_mis) }\n\t\t\t\t if (mut[X]=='syn') { dd <- scz_syn ; gm <- as.numeric(gg$p_syn) }\n\n\t\t\t\t \t ## subset dd to gene\n\t\t\t\t\t count[X] <- sum(dd$gene_symbol_used %in% gg$gene)\n\n\t\t\t\t\t \t ## autosomal test\n\t\t\t\t\t\t \tif (gg$chr != 'X') {\n\t\t\t\t\t\t\t \t mod <- poisson.test(count[X],auto_nchrobs,gm,alternative='greater')\n\t\t\t\t\t\t\t\t \t exp_rate[X] <- mod$null.value\n\t\t\t\t\t\t\t\t\t \t \t obs_rate[X] <- mod$estimate\n\t\t\t\t\t\t\t\t\t\t\t \t \tpval[X] <- mod$p.value\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t} ## END if\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ## Xchr test\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t if (gg$chr == 'X') {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\tmod <- poisson.test(count[X],x_nchrobs,gm,alternative='greater')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exp_rate[X] <- mod$null.value\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t obs_rate[X] <- mod$estimate\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t pval[X] <- mod$p.value\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t } ## END if\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t print(X)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t X <- X+1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t} ## END of b LooP\n\n} ## END of a LooP\n\n\n\n\n## PART 4: combine results and write to file\n\nres <- cbind.data.frame(gene,\nmut,\ncount,\nexp_rate,\nobs_rate,\npval)\n\n## combine with gene information\nres2 <- merge(res,gene_list,by='gene',all=T)\n\n## order by lowest p-value\nres2 <- res2[order(res2$pval),]\n\nwrite.table(res2,'SCZ_poisson_test_gencode_pct75_gene17925.txt',col=T,row=F,quo=F,sep='\\t')\n\n\n\n## END of single_gene_enrichment.R\n"
},
{
"alpha_fraction": 0.5966219305992126,
"alphanum_fraction": 0.6036564707756042,
"avg_line_length": 55.439491271972656,
"blob_id": "374ae6ebc7b45d59a473914d9c279e7ac43a6c78",
"content_id": "3541cd9b9ca1273f6dedd6c571f0bfe98d702bd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26583,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 471,
"path": "/dnm_calling/run_lof_annotation.py",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\n__author__ = 'konradjk'\n\nimport argparse\nimport subprocess\nimport sys\nimport gzip\nimport os\nimport os.path\nimport time\nimport re\nimport pipes\nimport tempfile\n\n# Great hack for 2.X and 3.X to use input()\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ndef main(args, pass_through_args):\n if args.lookup:\n if args.dbnsfp or args.gtex or args.ancestral or args.context or args.add_rank_filter \\\n or args.filter is not None or args.lof_only or args.refseq or args.basic or args.apply_all \\\n or args.skip_conservation or args.position != 0.05 or args.intron_size != 15:\n print('Note that --lookup can only be used with the default options. Please re-run without this option.', file=sys.stderr)\n sys.exit(1)\n bhosts = subprocess.check_output([\"bhosts\"], stderr=subprocess.STDOUT)\n\n if args.cache_version != args.vep_version:\n print('WARNING: Cache version is not the same as VEP version. Continuing, but results may be off...', file=sys.stderr)\n if '.vcf' not in args.vcf:\n print('ERROR: \".vcf\" is not in your input file - is this a VCF?', file=sys.stderr)\n sys.exit(1)\n fasta_type = ''\n try:\n subprocess.check_output(['samtools'], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n if 'Version: 0.1' in e.output:\n print('Samtools version 0.1 detected.', file=sys.stderr)\n fasta_type = '.rz'\n elif 'Version: 1' in e.output:\n print('Samtools version 1 detected.', file=sys.stderr)\n fasta_type = '.gz'\n except OSError:\n print('ERROR: You do not have samtools on your path. Needed for LOFTEE! (Run: use Samtools, and/or put it in your ~/.my.bashrc)', file=sys.stderr)\n sys.exit(1)\n try:\n subprocess.check_output(['git'], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n print('ERROR: You do not have git on your path. Please run: use Git-2.0, and/or put it in your ~/.my.bashrc')\n sys.exit(1)\n if args.mysql or args.lookup:\n conf_file = os.path.join(os.path.expanduser('~'), '.my.cnf')\n if not os.path.exists(conf_file) or '[loftee]' not in open(conf_file).read():\n print('ERROR: [loftee] tag not found in ~/.my.cnf - please add this tag', file=sys.stderr)\n sys.exit(1)\n\n konrad_vep_dir_raw = '/humgen/atgu1/fs03/konradk/vep/%s'\n loftee_data_location = args.loftee_data_location\n combine_script_path = os.path.join(os.path.dirname(__file__), \"combine_and_delete.py\")\n check_script_path = os.path.join(os.path.dirname(__file__), \"check_vep_jobs.py\")\n vep_location = args.vep_location or '/humgen/atgu1/fs03/konradk/vep/ensembl-tools-release-%s/scripts/variant_effect_predictor/variant_effect_predictor.pl' % args.vep_version\n vep_cache_dir = args.vep_cache_dir if args.vep_cache_dir else konrad_vep_dir_raw % ''\n dev = '_dev' if args.dev else ''\n git_version = subprocess.check_output(['git', '--git-dir=%s/loftee%s/.git/' % (konrad_vep_dir_raw % '', dev), 'describe', '--tags']).strip().split('-')[0]\n num_known = 0\n if args.no_split:\n file_no = 1\n out_base = os.path.basename(args.vcf)\n all_current_line = 'all'\n else:\n cat_file = 'zcat' if args.vcf.endswith('.gz') else 'cat'\n\n lines = None\n # Make decision on how many lines per file\n if args.split_size is not None:\n number_to_split = args.split_size if args.split_size > 0 else 3E9\n elif args.default_submission:\n number_to_split = 20000\n else:\n print(\"Getting number of lines...\", file=sys.stderr)\n p1 = subprocess.Popen([cat_file, args.vcf], stdout=subprocess.PIPE)\n p2 = subprocess.Popen([\"wc\", \"-l\"], stdin=p1.stdout, stdout=subprocess.PIPE)\n p1.stdout.close()\n lines = int(p2.communicate()[0])\n\n print(\"File is %s lines.\" % lines, file=sys.stderr)\n number_to_split = 20000\n print(\"Typical safe limits: <=10K variants on the hour queue; <=500K variants on the priority queue\", file=sys.stderr)\n try:\n s = input('File is %s lines. How many lines per split file? [%s] ' % (lines, number_to_split))\n number_to_split = int(s)\n if number_to_split < 1:\n number_to_split = lines + 1\n except Exception as e:\n pass\n\n # Prepare files\n out_base = os.path.basename(args.vcf)\n file_no = 0\n current_line = 0\n all_current_line = 0\n header = ''\n my_open = gzip.open if args.vcf.endswith('.gz') else open\n g = None\n known = None\n header_line = None\n try:\n os.makedirs(args.output)\n except Exception as e:\n pass\n\n if lines is not None:\n print(\"Splitting into ~%s files of %s lines...\" % (lines/number_to_split + 1, number_to_split), file=sys.stderr)\n else:\n print(\"Splitting into files of %s lines...\" % (number_to_split), file=sys.stderr)\n\n start = time.time()\n if args.lookup:\n import MySQLdb\n import MySQLdb.cursors\n\n db = MySQLdb.connect(read_default_group='loftee', cursorclass=MySQLdb.cursors.DictCursor)\n conn = db.cursor()\n with my_open(args.vcf) as f:\n for line in f:\n if line.startswith('#'):\n if line.startswith('#CHROM'):\n header_line = line.lstrip('#').strip().split('\\t')\n header_line = dict(zip(header_line, range(len(header_line))))\n header += line\n else:\n if args.lookup:\n if known is None:\n if header_line is None:\n print(\"Header line not found in VCF and need it to lookup entries\", file=sys.stderr)\n sys.exit(1)\n\n pipe = pipes.Template()\n pipe.append('bgzip -c /dev/stdin', '--')\n base = out_base.replace('.vcf', '_known.vep.vcf')\n if not base.endswith('.gz'): base += '.gz'\n known = pipe.open(os.path.join(args.output, base), 'w')\n known.write(header)\n fields = line.strip().split('\\t')\n sql_data = [args.vep_version, git_version[:10], fields[header_line['CHROM']], fields[header_line['POS']], fields[header_line['REF']].upper(), fields[header_line['ALT']].upper()]\n conn.execute('SELECT csq from known_variants WHERE vep=%s AND loftee=%s AND chrom=%s AND pos=%s AND ref=%s AND alt=%s', sql_data)\n data = conn.fetchone()\n if data is not None:\n info = ';'.join([x for x in re.split(';(?=\\w)', fields[header_line['INFO']].rstrip(';')) if x.split('=')[0] != 'CSQ'])\n fields[header_line['INFO']] = info + ';CSQ=' + data['csq']\n known.write('\\t'.join(fields) + '\\n')\n num_known += 1\n continue\n if g is None:\n base = out_base.replace('.vcf', '_%04d.vcf' % file_no)\n if not base.endswith('.gz'): base += '.gz'\n g = gzip.open(os.path.join(args.output, base), 'w')\n file_no += 1\n g.write(header)\n if args.sites_only:\n g.write(line.strip().split('\\t')[:8])\n else:\n g.write(line)\n current_line += 1\n all_current_line += 1\n if current_line == number_to_split:\n g.close()\n g = None\n current_line = 0\n if g is not None: g.close()\n if known is not None: known.close()\n if not file_no:\n base = out_base.replace('.vcf', '_known.vep.vcf')\n if not base.endswith('.gz'): base += '.gz'\n known_file = os.path.join(args.output, base)\n final_name = known_file.replace('_known', '')\n os.rename(known_file, final_name)\n subprocess.check_output(['tabix', final_name])\n print('Yay! All your variants have been seen before. Annotated file is now ready at: %s' % os.path.abspath(final_name))\n sys.exit(0)\n\n print(\"Done. Wrote %s files of %s lines (took %s seconds).\" % (file_no, number_to_split, time.time() - start), file=sys.stderr)\n\n possible_queues = {'w': 'week',\n 'p': 'priority',\n 'h': 'hour',\n 'g': 'gsa'}\n queue = 'hour'\n if args.queue is None:\n if args.default_submission:\n args.queue = 'hour'\n else:\n args.queue = input('Which queue to submit to: (w)eek, (p)riority, (h)our? [hour] ')\n\n if len(args.queue.strip()) > 0 and args.queue[0] in possible_queues:\n queue = possible_queues[args.queue[0]]\n\n additional_options = ',filter_position:%s' % float(args.position)\n additional_options += ',min_intron_size:%s' % int(args.intron_size)\n if args.apply_all: additional_options += ',apply_all:true'\n\n if not args.skip_conservation:\n if args.mysql:\n additional_options += ',conservation_file:mysql'\n else:\n additional_options += ',conservation_file:%s/phylocsf.sql' % loftee_data_location\n\n log_dir = os.path.abspath(os.path.join(args.output, 'logs'))\n try:\n os.makedirs(log_dir)\n except Exception as e:\n pass\n\n # Set environment vars\n env = os.environ.copy()\n loftee_dir = konrad_vep_dir_raw % ('loftee_dev/' if args.dev else 'loftee/',)\n env['PERL5LIB'] = ':'.join([loftee_dir, env['PERL5LIB']]) if 'PERL5LIB' in env else loftee_dir\n print(\"PERL5LIB will be: %s\" % env['PERL5LIB'], file=sys.stderr)\n\n print(\"Submitting %s jobs to the %s queue...\" % (file_no, queue), file=sys.stderr)\n started_time = time.strftime(\"%Y_%m_%d_%H_%M_%S\")\n log = open(os.path.join(log_dir, 'starting_log_%s.txt' % started_time), 'w')\n log.write('#command_line_call=%s\\n' % ' '.join(sys.argv))\n log.write('#current_working_directory=%s\\n' % os.getcwd())\n log.write('#loftee_version=%s\\n' % git_version)\n log.write('#vep_version=%s\\n' % args.vep_version)\n log.write('#cache_version=%s\\n' % args.cache_version)\n log.write('#original_file=%s\\n' % os.path.abspath(args.vcf))\n log.write('#time_started=%s\\n' % started_time)\n log.write('#original_size=%s\\n' % all_current_line)\n log.write('#number_of_files=%s\\n' % file_no)\n if args.lookup: log.write('#number_of_known_variants=%s\\n' % num_known)\n\n project = os.path.basename(args.vcf) if args.project is None else args.project\n\n start = time.time()\n all_jobs = []\n error_log = None\n for i in range(file_no):\n if args.no_split:\n input_file = os.path.abspath(args.vcf)\n else:\n base = out_base.replace('.vcf', '_%04d.vcf' % i)\n if not base.endswith('.gz'): base += '.gz'\n input_file = os.path.abspath(os.path.join(args.output, base))\n if file_no > 1 or args.lookup:\n base = out_base.replace('.vcf', '_%04d.vep.vcf' % i)\n else:\n base = out_base.replace('.vcf', '.vep.vcf')\n if not base.endswith('.gz'): base += '.gz'\n output_file = os.path.abspath(os.path.join(args.output, base))\n\n cache_dir_sub = args.cache_version if args.cache_version <= 75 else '%s_GRCh%s' % (args.cache_version, args.assembly_version)\n fasta_sub = args.assembly_version if args.assembly_version > 37 else '%s.%s' % (args.assembly_version, min(args.cache_version, 75))\n fasta_location = '%s/homo_sapiens/%s/Homo_sapiens.GRCh%s.dna.primary_assembly.fa' % (vep_cache_dir, cache_dir_sub, fasta_sub)\n\n job_name = out_base.replace('.vcf', '_%04d' % i).replace('.gz', '')\n bsub_command = ['bsub', '-oo', os.path.join(log_dir, 'log_%04d.txt' % i), '-R', 'rusage[mem=%s]' % args.memory, '-q', queue]\n temp_directory = tempfile.mkdtemp()\n if args.vep_version >= 79:\n bsub_command.extend(['-E', 'ls %(cache_dir)s > /dev/null; mkdir -p %(temp)s; ln -f -s %(fasta)s %(temp)s' % {'cache_dir': vep_cache_dir, 'fasta': fasta_location, 'temp': temp_directory}])\n else:\n bsub_command.extend(['-E', 'cd %s' % os.getcwd()])\n bsub_command.extend(['-g', '/%s' % project, '-P', project, '-J', job_name])\n if file_no == 1 and not args.no_email and not args.lookup:\n bsub_command.append('-N')\n if queue == 'hour':\n bsub_command.extend(['-W', '4:00'])\n\n if args.high_io is not None: bsub_command.extend(['-R', 'rusage[%s]' % args.high_io])\n\n bsub_command.append('/broad/software/free/Linux/redhat_5_x86_64/pkgs/perl_5.10.1/bin/perl')\n bsub_command.append(vep_location)\n\n # VEP options\n if not args.skip_everything: bsub_command.append('--everything')\n bsub_command.extend(['--vcf', '--allele_number', '--no_stats'])\n bsub_command.extend(['--cache', '--offline', '--dir', vep_cache_dir, '--force_overwrite'])\n bsub_command.extend(['--cache_version', str(args.cache_version)])\n if args.force_vcf: bsub_command.extend(['--format', 'vcf'])\n if args.filter: bsub_command.extend(['--filter', args.filter])\n\n if args.vep_version >= 79:\n bsub_command.extend(['--fasta', '%s/%s' % (temp_directory, os.path.basename(fasta_location))])\n else:\n bsub_command.extend(['--fasta', fasta_location])\n if args.vep_version >= 80: bsub_command.append('--minimal')\n if args.cache_version > 75: bsub_command.extend(['--assembly', 'GRCh%s' % args.assembly_version])\n\n if output_file.endswith('.gz'):\n bsub_command.append('--tabix')\n\n if args.basic: bsub_command.append('--gencode_basic')\n if args.refseq: bsub_command.append('--refseq')\n\n # Plugins\n if not args.skip_lof: bsub_command.extend(['--plugin', 'LoF,human_ancestor_fa:%s/human_ancestor.fa%s%s' % (loftee_data_location, fasta_type, additional_options)])\n\n if args.lof_only:\n bsub_command.extend(['--plugin', 'RankFilter,initiator_codon_variant'])\n elif args.add_rank_filter:\n bsub_command.extend(['--plugin', 'RankFilter,intron_variant'])\n if args.gtex: bsub_command.extend(['--plugin', 'TissueExpression,db_location:%s/gtex.db' % loftee_data_location])\n if args.dbnsfp is not None: bsub_command.extend(['--plugin', 'dbNSFP,%s,%s' % (args.dbnsfp_data_location, args.dbnsfp)])\n if args.context is not None: bsub_command.extend(['--plugin', 'context,%s' % args.context])\n if args.ancestral: bsub_command.extend(['--plugin', 'ancestral,human_ancestor_fa:%s/human_ancestor.fa%s' % (loftee_data_location, fasta_type)])\n if args.dbscSNV: bsub_command.extend(['--plugin', 'dbscSNV,%s/misc/dbscSNV.txt.gz' % loftee_data_location])\n bsub_command.extend(pass_through_args)\n bsub_command.extend(['-i', input_file])\n bsub_command.extend(['-o', output_file])\n\n\n print(\"Running: \" + \" \".join(bsub_command), file=sys.stderr)\n\n if not args.dry:\n job_number = run_job_lsf(bsub_command, env)\n if job_number is None:\n if error_log is None:\n error_log = open(os.path.join(log_dir, 'error_log_%s.txt' % started_time), 'w')\n print(' '.join(['\"%s\"' % x if ' ' in x else x for x in bsub_command]), file=error_log)\n else:\n log.write('%s\\t%s' % (job_number, \"\\t\".join(map(str, bsub_command)) + '\\n'))\n all_jobs.append(job_number)\n\n if file_no > 1 or args.lookup:\n base = os.path.abspath(os.path.join(args.output, out_base.replace('.vcf', '_%04d.vep.vcf')))\n output = os.path.abspath(os.path.join(args.output, out_base.replace('.vcf', '.vep.vcf')))\n if not base.endswith('.gz'):\n base += '.gz'\n output += '.gz'\n\n bsub_command = ['bsub', '-oo', os.path.join(log_dir, 'log_done.txt')]\n if args.lookup:\n bsub_command.extend(['-q', queue if queue == 'priority' else 'week'])\n else:\n bsub_command.extend(['-q', queue])\n if queue == 'hour': bsub_command.extend(['-W', '4:00'])\n bsub_command.extend(['-E', 'python %s --base %s --number %s' % (check_script_path, base, file_no), '-g', '/%s' % project, '-P', project])\n if not args.no_email: bsub_command.append('-N')\n bsub_command.extend(['-w', ' && '.join(all_jobs[-1000:])]) # limiting to last 1K jobs since bash doesn't like long argument lists\n bsub_command.extend(['python', combine_script_path])\n\n bsub_command.extend(['--base', base, '--output', output])\n if not args.no_delete: bsub_command.append('--delete')\n if args.lookup: bsub_command.extend(['--lookup', '%s,%s' % (args.vep_version, git_version[:10])])\n if args.skip_lookup_add: bsub_command.append('--skip_lookup_add')\n bsub_command.extend(['--number', str(file_no)])\n\n print(\"Running: \" + \" \".join(bsub_command), file=sys.stderr)\n\n if not args.dry:\n job_number = run_job_lsf(bsub_command, env)\n log.write('%s\\t%s\\n' % (job_number, \"\\t\".join(map(str, bsub_command))))\n\n # Check if any jobs have failed\n new_queue = 'hour' if queue != 'priority' else queue\n dependency = 'ended(%s) && (exit(%s,>0))' % (') && ended('.join(all_jobs), ',>0) || exit('.join(all_jobs))\n bsub_command = ['bsub', '-N', '-q', new_queue, '-w', dependency, '-g', '/%s' % project, '-P', project, '-J', '%s_chk' % project, 'echo']\n bsub_command.append('\"At least one job failed. Please rerun with: cd %s; python /humgen/atgu1/fs03/konradk/src/rerun_failed_jobs.py -o %s [--dry]\"' % (os.getcwd(), args.output))\n rerun_job_number = run_job_lsf(bsub_command, env)\n\n # If final job succeeded, kill fail checkpoint\n bsub_command = ['bsub', '-o', '/dev/null', '-q', new_queue, '-g', '/%s' % project, '-P', project, '-J', '%s_chk2' % project, '-w', job_number, 'bkill', rerun_job_number]\n run_job_lsf(bsub_command, env)\n\n if args.lookup:\n conn.close()\n db.close()\n log.close()\n if error_log is not None: error_log.close()\n\n print(\"Done submitting %s jobs! Took %s seconds.\" % (len(list(range(file_no))) + 1, time.time() - start), file=sys.stderr)\n\n\ndef run_job_lsf(bsub_command, env=None):\n try:\n bsub_output = subprocess.check_output(bsub_command, stderr=subprocess.STDOUT, env=env)\n return [x for x in bsub_output.split('\\n') if x.startswith('Job')][0].split('<')[1].split('>')[0]\n except subprocess.CalledProcessError as e:\n print(e, file=sys.stderr)\n print(e.output, file=sys.stderr)\n print(' '.join(['\"%s\"' % x if ' ' in x else x for x in bsub_command]), file=sys.stderr)\n return None\n\nif __name__ == '__main__':\n INFO = \"\"\"Runs VEP with LOFTEE.\nMinimal usage is: python run_lof_annotation.py -i input.vcf[.gz] -o output_directory\nThis will count the number of lines in the file, and prompt for the number of lines to split the file into and which queue to submit to.\nIf you'd like to omit the interactive step, add -s NUMBER_OF_LINES -q QUEUE. -s 20000 -q hour is a sensible default.\"\"\"\n\n try:\n import configargparse\n parser = configargparse.ArgumentParser(description=INFO,\n default_config_files=['~/.run_lof_config'],\n args_for_setting_config_path=[\"-c\", \"--run-lof-config\"],\n formatter_class=configargparse.DefaultsRawFormatter)\n except ImportError:\n parser = argparse.ArgumentParser(description=INFO, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"-c\", \"--run-lof-config\", dest=\"config\", help=\"To enable this option, please install configargparse\")\n\n parser.add_argument('--vcf', '--input', '-i', help='Input VCF file; may be gzipped', required=True)\n parser.add_argument('--output', '-o', help='Output directory', required=True)\n\n submission_options = parser.add_argument_group('Submission options')\n submission_options.add_argument('--split_size', '-s', help='Number of lines to split file into', type=int)\n submission_options.add_argument('--queue', '-q', help='Which queue to submit to: (h)our, (p)riority, (w)eek')\n submission_options.add_argument('--default_submission', '-d', help='Submit default options (20000 variants to hour queue)', action='store_true')\n submission_options.add_argument('--memory', help='GB of memory to submit', type=int, default=8)\n submission_options.add_argument('--high_io', help='Set I/O requirements (default none, if used as flag, will default to /humgen/atgu1/fs03/ high I/O)', nargs='?', const='argon_io=1000', default=None)\n submission_options.add_argument('--no_email', action='store_true', help='Do not send an email when done')\n submission_options.add_argument('--no_delete', action='store_true', help='Do not delete intermediate files')\n submission_options.add_argument('--no_split', \"-n\", help='Do not split file, but run VEP/LOFTEE on one file', action='store_true')\n submission_options.add_argument('--project', '-P', help='Project name for submission [default = input vcf name]')\n submission_options.add_argument('--dry', help='Dry run (creates directories and splits file, but does not submit any jobs)', action='store_true')\n submission_options.add_argument('--server', help=\"Deprecated. This option is ignored.\")\n submission_options.add_argument('--lookup', help=\"Use annotation lookup to speed up\", action='store_true')\n submission_options.add_argument('--skip_lookup_add', help=\"Do not add new annotations to the database.\", action='store_true')\n submission_options.add_argument('--sites_only', help=\"Create sites VCFs for annotation.\", action='store_true')\n\n vep_options = parser.add_argument_group('VEP options')\n vep_options.add_argument('--cache_version', help='Default: 79', default=79, type=int)\n vep_options.add_argument('--vep_version', help='Default: 79', default=79, type=int)\n vep_options.add_argument('--assembly_version', help='Default: 37 (i.e. GRCh37); can also be 38', default=37, type=int)\n vep_options.add_argument('--force_vcf', help='Force VEP to recognize input file as a VCF', action='store_true')\n vep_options.add_argument('--basic', '--gencode_basic', help='Only use transcripts from Gencode Basic annotation', action='store_true')\n vep_options.add_argument('--refseq', help='Use RefSeq transcript models for annotation', action='store_true')\n vep_options.add_argument('--lof_only', help='Only print possible LoF (initiator_codon_variant and above) variants in output VCF', action='store_true')\n vep_options.add_argument('--skip_everything', help='Skip --everything flag to VEP', action='store_true')\n vep_options.add_argument('--filter', help='Comma-separated list of consequences (eg. SO terms) to pass to the VEP '\n '--filter option. The CSQ key/value will only be added to the INFO field for varaints that *do* have one of '\n 'these consequences. For a list of valid consequence terms, see '\n 'http://useast.ensembl.org/info/genome/variation/predicted_data.html#consequences')\n additional_plugins = parser.add_argument_group('Additional plugins')\n additional_plugins.add_argument('--add_rank_filter', help='Filter for intron variant and up (default: run VEP with --plugin RankFilter,intron_variant)', action='store_true')\n additional_plugins.add_argument('--context', help='Run context plugin', type=int, nargs='?', const=1, default=None)\n additional_plugins.add_argument('--ancestral', help='Run ancestral plugin', action='store_true')\n additional_plugins.add_argument('--gtex', help='Add GTEx information', action='store_true')\n additional_plugins.add_argument('--dbnsfp', help='Add dbNSFP annotations. The value should be a comma-separated list of dbNSFP column names or nothing (eg. use as a flag) to only print all available dbNSFP columns.', nargs='?', const=\"\")\n additional_plugins.add_argument('--dbscSNV', '--dbscsnv', help='Add dbscSNV annotations', action='store_true')\n\n loftee_options = parser.add_argument_group('LOFTEE options')\n loftee_options.add_argument('--skip_lof', help='Skip LOFTEE', action='store_true')\n loftee_options.add_argument('--position', help='Position in transcript where a variant should be filtered (filter_position in LOFTEE)', type=float, default=0.05)\n loftee_options.add_argument('--intron_size', help='Minimum intron size, below which a variant should be filtered (min_intron_size in LOFTEE)', type=int, default=15)\n loftee_options.add_argument('--skip_conservation', help='Skip PhyloCSF filter', action='store_true')\n loftee_options.add_argument('--apply_all', help='Apply LoF filters to all variants, not just LoF', action='store_true')\n loftee_options.add_argument('--mysql', help='Uses MySQL instead of SQLite for PhyloCSF', action='store_true')\n loftee_options.add_argument('--dev', help='Runs development version of LOFTEE (not recommended)', action='store_true')\n\n location_options = parser.add_argument_group('File paths')\n location_options.add_argument(\"--vep_location\", help=\"Full path of variant_effect_predictor.pl\")\n location_options.add_argument(\"--loftee_data_location\", default='/humgen/atgu1/fs03/konradk/loftee_data/')\n location_options.add_argument(\"--vep_cache_dir\", help=\"VEP cache directory\")\n location_options.add_argument(\"--dbnsfp_data_location\", default='/humgen/atgu1/fs03/weisburd/xbrowse/data/reference_data/dbNSFP/dbNSFPv2.9.gz')\n\n args, pass_through_args = parser.parse_known_args()\n\n if parser.__module__ == \"configargparse\":\n print(\"Running with the following settings and default values: \")\n parser.print_values()\n elif args.config:\n parser.error(\"To enable the -c / --run-lof-config option, please run: pip install --user configargparse\")\n main(args, pass_through_args)\n"
},
{
"alpha_fraction": 0.6770473122596741,
"alphanum_fraction": 0.6931949257850647,
"avg_line_length": 29.964284896850586,
"blob_id": "da0294e910be59404e479e62965e702af81b3742",
"content_id": "ab01ca9ab336d30a1c056157c8fb12b36a3e9b99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 867,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 28,
"path": "/geneset_enrichment/dnm_list.R",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "## dnm_list.R\n\n## Author: Daniel P. Howrigan ([email protected])\n## Last Modified: January 2019\n\n\n## ----- DESCRIPTION:\n\n## Interactive Script to generate simple DNM lists for gene set enrichment analysis\n\n## ----- NOTES:\n\n## Does not require trio count as gene set enrichment is relative to total DNM count\n\n\n## read in DNM calls\ndnm <- read.table('../files/combined_cohorts_DNM_list.tsv',h=T,sep='\\t',stringsAsFactors=F)\n\n## remove DNMs in genes outside of CCDS database\ndnm <- subset(dnm,!is.na(dnm$CCDS))\n\n## subet DNMs\nscz <- subset(dnm,dnm2$DISEASE=='SCZ')\ncon <- subset(dnm,dnm2$DISEASE=='SIB_CONTROL')\n\n## write out simple DNM gene lists (gene symbol and annotation)\nwrite.table(scz[,c('gene_symbol_used','annotation_used')],'SCZ_n2772_CCDS.dnm',col=F,row=F,quo=F,sep='\\t')\nwrite.table(con[,c('gene_symbol_used','annotation_used')],'CON_n2216_CCDS.dnm',col=F,row=F,quo=F,sep='\\t')\n"
},
{
"alpha_fraction": 0.7324551343917847,
"alphanum_fraction": 0.7440381646156311,
"avg_line_length": 34.2239990234375,
"blob_id": "1534f5870ce45f7bbd6e155fc8bd7b806b5581f0",
"content_id": "79348d34c5264c5efe7e0b323e5688c74d8966aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4403,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 125,
"path": "/geneset_enrichment/README.md",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "# Table of Contents\n* [Overview](#overview) \n* [dnm_list.R](#dnm_list.R)\n* [geneset_enrichment.R](#geneset_enrichment.R)\n* [process_geneset_enrichment_results.R](#process_geneset_enrichment_results.R)\n\n### Overview\n\nGene set enrichment tests for relative enrichment of DNM hits in a given gene set relative to the whole exome \n * Details of the enrichment test can be found in the supplementary material here: https://personal.broadinstitute.org/howrigan/taiwanese_trios_SCZ_supplement/Taiwanese-trio_Supplement_BioArxiv.pdf \n * Supplementary Section 7 - Statistical tests used to analyze DNM rates and patterns\n * Supplementary Section 14 - DNM burden in candidate gene sets\n\n\n### dnm_list.R\n\nExtracts simple lists of the gene symbol and primary annotation (PTV / missense / synonymous) from exome-wide coding DNM lists \n * primary goal - split DNMs by disease or cohort\n * secondary goal - split by custom annotation (not in ExAC or damaging annotation)\n\n\n### geneset_enrichment.R\n\nTest for enrichment of DNM overlapping genes in a given gene set against:\n * Mutation model expectations using a binomial exact test\n * Another DNM gene list (preferrably unaffected individuals) using a two-sample proportion test\n\n```\n## ----- DESCRIPTION:\n\n## Command line script to generate DNM gene set enrichment results\n\n\n# USAGE:\n\n# Rscript geneset_enrichment.R \\\n# [affected DNM list] \\\n# [unaffected DNM list] \\\n# [mutation expection gene list] \\\n# [gene sets file] \\\n# [output results file] \\\n# [DNM overlap details path] \\\n# [annotation type] \\\n# [header status] \\\n\n## --- argument descriptions:\n\n## affected DNM list: DNM list for affected samples from dnm_list.R containing gene symbol (column 1) and annotation (column 2)\n## unaffected DNM list: DNM list for unaffected samples from dnm_list.R containing gene symbol (column 1) and annotation (column 2)\n## mutation expection gene list: list of genes with mutation expectation for default annotations\n## gene sets file: gene set list with gene symbols (column 1) and gene set name (column 2) \n## output results file: results filename\n## DNM overlap details path: directory path for listing the overlapping genes in each gene set. Produces one file per gene set\n## annotation type: use 'default_annotation' (5 tests: all,ptv,ptv+mis,mis,syn) or 'custom_annotation' (1 aff/unaff test)\n## OPTIONS: [default_annotation] [custom_annotation]\n## header status: does the gene set list have a column header or not? \n## OPTIONS: [header] [no_header]\n\n\n# example usage:\n\n# Rscript geneset_enrichment.R \\\n# SCZ_n2772_CCDS.dnm \\\n# CON_n2216_CCDS.dnm \\\n# ../files/gencode_pct75_gene17925.tsv \\\n# ../files/candidate_genesets.tsv \\\n# SCZ_n2772_CCDS_coverageQC_candidate_genesets.tsv \\\n# overlap/SCZ_n2772_CCDS_coverageQC/ \\\n# default_annotation \\\n# header\n\n\n## ----- SCRIPT SET UP:\n\n## PART 1: read in command line arguments\n## PART 2: read in files / parse command line arguments\n## PART 3: Running enrichment with default annotations (all_types==T)\n## PART 4: Running enrichment with custom annotation (all_types==F)\n\n\n## ----- NOTES:\n\n## gene set enrichment does not test DNM rate, but DNM proportions, and thus doesn't require sample counts\n\n## the script currently restricts to genes in the mutation expection gene list, even for custom annotations and case/control comparisons\n\n## mutation expection gene list requires these four columns:\n## 1) gene - gene symbol\n## 2) p_all - all coding mutation probability\n## 2) p_syn - synonymous mutation probability\n## 2) p_mis - missense mutation probability\n## 2) p_lof - LoF/PTV/LGD mutation probability (sum of nonsense, frameshift, and essential splice probabilities)\n```\n\n### process_geneset_enrichment_results.R\n\nWhen looking at default annotations, convert larger results file into multiple annotation-specific files sorted by p-value\n\n```\n## ----- DESCRIPTION:\n\n## Command line script to convert the default DNM gene set enrichment results into multiple .tsv tables\n## Orders by mutation model p-value\n## Adds prettier column names\n\n# USAGE:\n\n# Rscript process_geneset_enrichment_results.R \\\n# [results file] \\\n# [output path] \\\n# [order by pval] \\\n\n\n# example usage:\n\n# Rscript process_geneset_enrichment_results.R \\\n# SCZ_n2772_CCDS_coverageQC_candidate_genesets.tsv \\\n# results/SCZ_n2772_CCDS_coverageQC/ \\\n# TRUE\n\n\n## ----- NOTES:\n\n## requires the default annotations output from geneset_enrichment.R \n```\n"
},
{
"alpha_fraction": 0.7693548202514648,
"alphanum_fraction": 0.7693548202514648,
"avg_line_length": 32.486488342285156,
"blob_id": "d9f5eaee0be99b510542566e983fb1b3022fe9a1",
"content_id": "bc5c1f040bfd44552881cb4790afece8705c5208",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1240,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 37,
"path": "/README.md",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "# Trio sequence analysis\n\n# Table of Contents\n* [Overview](#overview) \n* [Files](#files)\n* [DNM calling](#dnm-calling)\n* [Exome burden](#exome-burden)\n* [Geneset enrichment](#geneset-enrichment)\n* [Gene enrichment](#gene-enrichment)\n\n### overview\n\nFiles and scripts used for testing enrichment of de novo mutations (DNMs) in schizophrenia probands \n\n### files\n\nFiles used for various analyses\n * DNM list\n * Trio counts\n * Per-gene mutation expectations\n * gene sets\n\n### DNM calling\n\n[Scripts](https://github.com/howrigan/trio_sequence_analysis/tree/master/dnm_calling) used to parse VCF file to get DNM calls and run variant annotation\n\n### Exome burden\n\n[Script](https://github.com/howrigan/trio_sequence_analysis/tree/master/exome_burden) for determining exome-wide burden of coding DNMs against control trios and mutation model expectations\n\n### Geneset enrichment\n\n[Scripts](https://github.com/howrigan/trio_sequence_analysis/tree/master/geneset_enrichment) for testing relative enrichment of DNMs in a specific gene set relative to full DNM list\n\n### Gene enrichment\n\n[Scripts](https://github.com/howrigan/trio_sequence_analysis/tree/master/gene_enrichment) for testing single gene DNM rates against mutation model expectations\n\n"
},
{
"alpha_fraction": 0.7788819670677185,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 46.235294342041016,
"blob_id": "329e49b42e6b98a795df1853124a311cf829e019",
"content_id": "5f6a817d24e2c52d10beb94dc263b3b38ec9059e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 17,
"path": "/gene_enrichment/README.md",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "# Table of Contents\n* [Overview](#overview) \n* [single_gene_enrichment.R](#single_gene_enrichment.R)\n\n### Overview\n\nSingle gene tests for rate of DNMs given your trio count\n * Details of the test can be found in the supplementary material here: https://personal.broadinstitute.org/howrigan/taiwanese_trios_SCZ_supplement/Taiwanese-trio_Supplement_BioArxiv.pdf \n * Supplementary Section 7 - Statistical tests used to analyze DNM rates and patterns\n * Supplementary Section 16 - Single gene DNM enrichment\n\n\n### single_gene_enrichment.R\n\nRuns a one-sided poisson rate test on individual genes relative to mutation expectations \n * Requires per-gene mutation expectations for PTV, missense, and synonymous coding variation\n * Requires total proband count and female proband count (for X-linked genes) \n\n"
},
{
"alpha_fraction": 0.5437081456184387,
"alphanum_fraction": 0.5537583827972412,
"avg_line_length": 36.53241729736328,
"blob_id": "8aca4d01259c618c897255848f86a3b2a1d4a484",
"content_id": "7f1f338801e38cf7d8208909ee4f7c1a51a58d3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19104,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 509,
"path": "/dnm_calling/TDT_CC.py",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "\"\"\"\n:File: TDT_CC.py\n:Author: Jack A. Kosmicki & Kamil Slowikowski\n:Last updated: 2016-04-15\n\nfor each variant count the number of transmitted, untransmitted variants in families\nfind variants in cases and controls\n\nSteps:\n1) Read in the family relations file; save the family relations in a global hash table\n2) Read in the individuals in the VCF and save them in a global hash table\n3) Assign the optional parameters as global variables\n4) Read in the VCF file one line at a time\n5) For each line in the VCF, apply filters to determine if it should be examined\n6) Upon passing the filters, examine each individual in the VCF\n7) For doTDT, only look at the probands (ignore cases/controls/parents)\n8) Apply filters to the proband\n9) If the proband passes the filters, apply filters to\n the parents and determine transmission\n10) After determining transmission, update the number of\n transmitted and untransmitted variants\n11) Print out the number of transmissions and untransmissions\n\nUsage:\n TDT_CC.py doTDT <vcf_File> <ped_File> <outputFile_Name> [options]\n TDT_CC.py doCaseControl <vcf_File> <ped_File> <outputFile_Name> [options]\n\nOptions:\n --pl=VALUE Specify PL threshold [default: 30]\n --dp=DP_VAL Specify minimum depth [default: 10]\n --ab_Ref=AB_Ref_VAL Specify allele balance threshold for homoRef [default: 0.1]\n --ab_Het=AB_Het_VAL Specify allele balance threshold for het [default: 0.3]\n --ab_Alt=AB_Alt_VAL Specify allele balance threshold for homoAlt [default: 0.9]\n --gq_Par=GQ_PAR_VAL GQ threshold for parents [default: 30]\n --gq_Kid=GQ_KID_VAL GQ threshold for child [default: 30]\n --gq_CC=GQ_CC_VAL GQ threshold for cases and controls [default: 30]\n --pass Use only variants with FILTER == PASS. Default is False. -- Note: not implemented yet.\n --vep Pull VEP annotations. Default is False.\n --unaff Examine unaffected probands (doTDT only). Default is False.\n -h, --help Show this message and exit.\n -v, --version Show the version and exit.\n\"\"\"\n\n\nfrom __future__ import division\nimport gzip\nimport sys\nimport os\nimport itertools\nimport cProfile\nimport pstats\nimport StringIO\nimport VCF_VEP\nimport FamilyPed\nimport filters\nimport VEP_Annotation as vepA\nimport numpy as np\nfrom sets import Set\nfrom docopt import docopt\n\n\n__version__ = 1.511\n__author__ = 'Jack A. Kosmicki <[email protected]>'\n__date__ = '04/15/2016'\n\n\ndef doTDT(v, family, thresh):\n \"\"\" Perform the Transmission Disequilibrium Test (TDT).\n\n Parameters\n ----------\n v: line of the VCF\n family is a hash table with:\n Key: individual id Value: [father id, mother id, sex]\n thresh: hash table of thresholds\n \"\"\"\n\n # If filters on the line failed move on.\n if not v:\n return None\n\n TU = [0, 0] # Array of [transmissions, untransmissions]\n TU_m = [0, 0] # same as TU but for males\n TU_f = [0, 0] # same as TU but for females\n mErr = 0 # Count mendelian error: Parents: ref, child: het\n mErr_o = 0 # Count other mendelian errors\n Nproband_alt = 0 # Number of homozygous alt probands that passed all thresholds\n AN = 0 # Number of families that passed all thresholds\n indivs_T = [v['CHROM'], v['POS'], v['ID'], v['REF'], v['ALT']] # Array of individuals who were transmitted the variant\n indivs_U = [v['CHROM'], v['POS'], v['ID'], v['REF'], v['ALT']] # Array of individuals who did not receive the variant\n\n for indiv_id in family.keys(): #loop through all the probands\n # indiv_data is their GT:AD:DP:GQ:PL stats\n indiv_data = v[indiv_id]\n\n if indiv_data == None:\n continue\n\n # Apply quality control filters on proband.\n if not filters.passFilters(indiv_data, thresh, thresh['GQ_Kid_Thresh']):\n continue\n\n # Apply PL filter to child.\n if not filters.PhredScaleFilter(indiv_data, thresh['PL_Thresh']):\n continue\n\n father = v[family[indiv_id][0]]\n mother = v[family[indiv_id][1]]\n\n # Check if the parents have the alternate allele\n # so they can pass it on AND apply quality control filters.\n if filters.TDT_Parent_Filters(father, mother, thresh):\n AN += 1 # all individuals in the nuclear family passed the filters\n\n # TDT operates differently in the hemizygous chromosomes\n # PAR regions defined from\n # http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/\n # in this case we are in the Par region so transmission is normal\n if filters.check_Hemizgyous(v['CHROM'], family[indiv_id][2],\n filters.inPar(v['POS'])):\n TU, TU_m, TU_f, mErr, mErr_o, transFlag = numberTransmissions(indiv_data['GT'], father['GT'], mother['GT'], TU, TU_m, TU_f, family[indiv_id][2], False, mErr, mErr_o)\n\n else:\n TU, TU_m, TU_f, mErr, mErr_o, transFlag = numberTransmissions(indiv_data['GT'], father['GT'], mother['GT'], TU, TU_m, TU_f, family[indiv_id][2], True, mErr, mErr_o)\n\n if indiv_data['GT'] == 'homoAlt':\n Nproband_alt += 1\n\n if transFlag == True: # if the variant was transmitted\n indivs_T.extend((indiv_id, family[indiv_id][2], indiv_data,\n family[indiv_id][0], father, family[indiv_id][1],\n mother))\n elif transFlag == False:\n indivs_U.extend((indiv_id, family[indiv_id][2], indiv_data,\n family[indiv_id][0], father, family[indiv_id][1],\n mother))\n\n # Ignore the cases in which we have 0 transmissions and 0 untransmissions.\n if TU[0] + TU[1] == 0:\n return None\n\n # Calculate percentage of mendelian errors.\n mendErrorPercent = (mErr + mErr_o) / (TU[0] + TU[1] + mErr + mErr_o)\n\n if vepFieldNames:\n gene, anno, pph2, sift, lof = vepA.findVariantAnnotation(v, args, vepFieldNames)\n else:\n gene, anno, pph2, sift, lof = ('', '', '', '', '')\n\n return [v['CHROM'], v['POS'], v['ID'], v['REF'], v['ALT'], v['FILTER'], v['VQSLOD'],\n gene, anno, pph2, sift, lof, v['AF'], v['AC'], AN,\n Nproband_alt, TU[0], TU[1], TU_m[0], TU_m[1], TU_f[0], TU_f[1],\n mErr, mErr_o, mendErrorPercent], indivs_T, indivs_U\n\n\ndef numberTransmissions(kid, dad, mom, TU, TU_m, TU_f, sex, xFlag, mErr, mErr_o):\n \"\"\"\n Determine the number of transmissions and nontransmissions.\n The X chromosome is different for males so xFlag indicates\n if we should examine those unique cases (which otherwise\n are Mendelian errors).\n\n Series of cases:\n Kid Dad Mom Transmissions Untransmissions\n Ref Het Het 0 2\n Ref Ref Het 0 1\n Het Het Het 1 1\n Het Ref Het 1 0\n Het Alt Het 0 1\n Alt Het Het 2 0\n Alt Het Alt 1 0\n\n - - X CHROM specific cases - -\n Ref Het Het 0 1\n Ref Ref Het 0 1\n Ref Alt Het 0 1\n Alt Het Het 1 0\n Alt Ref Het 1 0\n Alt Alt Het 1 0\n \"\"\"\n\n # Child Father Mother: [transmissions, untransmissions]\n transmissions = {\n 'homoRef het het': [0, 2],\n 'homoRef homoRef het': [0, 1],\n 'homoRef het homoRef': [0, 1],\n 'het het het': [1, 1],\n 'het homoRef het': [1, 0],\n 'het het homoRef': [1, 0],\n 'het homoAlt het': [0, 1],\n 'het het homoAlt': [0, 1],\n 'homoAlt het het': [2, 0],\n 'homoAlt het homoAlt': [1, 0],\n 'homoAlt homoAlt het': [1, 0]\n }\n\n xTransmissions = {\n 'homoRef het het': [0, 1],\n 'homoRef homoRef het': [0, 1],\n 'homoRef homoAlt het': [0, 1],\n 'homoAlt het het': [1, 0],\n 'homoAlt homoRef het': [1, 0],\n 'homoAlt homoAlt het': [1, 0]\n }\n\n # mendelian errors we don't care about\n mendelErrors = Set([\n 'homoRef homoRef homoAlt',\n 'homoRef homoAlt homoRef',\n 'homoRef het homoAlt',\n 'homoRef homoAlt het',\n 'homoRef homoAlt homoAlt',\n 'het homoAlt homoAlt',\n 'homoAlt homoRef homoRef',\n 'homoAlt homoRef het',\n 'homoAlt het homoRef',\n 'homoAlt homoAlt homoRef',\n 'homoAlt homoRef homoAlt'\n ])\n\n key = '{} {} {}'.format(kid, dad, mom)\n transFlag = False # flag to indicate whether the variant was transmitted or not\n\n counts = transmissions.get(key)\n\n # If we are in a unique x chromosome case.\n if xFlag:\n counts = xTransmissions.get(key)\n\n # If not in counts, it's a Mendelian Error.\n if key == 'het homoRef homoRef':\n mErr += 1\n\n if key in mendelErrors and not xFlag:\n mErr_o +=1\n\n if not counts:\n return TU, TU_m, TU_f, mErr, mErr_o, None\n\n if counts[0] > 0: # if the variant was transmitted, indicate so\n transFlag = True\n\n # Update the total number of transmissions and untransmissions using vector addition.\n TU = [x+y for x,y in zip(TU, counts)]\n\n if sex == 'male':\n TU_m = [x+y for x,y in zip(TU_m, counts)]\n elif sex == 'female':\n TU_f = [x+y for x,y in zip(TU_f, counts)]\n\n return TU, TU_m, TU_f, mErr, mErr_o, transFlag\n\n\ndef doCaseControl(v, cases, controls, thresh):\n \"\"\" Analyzes case/control data, counting the\n number of reference and alternate alleles.\n\n Parameters\n ----------\n v: line of the VCF\n case and control are hash tables with:\n Key: individual id Value: gender\n thresh is a hash table with:\n Key: name of threshold Value: threshold\n \"\"\"\n\n # If filters on the line failed move on.\n if not v:\n return None\n\n # Count the number of ref and alt alleles in cases and controls.\n caseRefs_m = 0\n caseAlts_m = 0\n controlRefs_m = 0\n controlAlts_m = 0\n caseRefs_f = 0\n caseAlts_f = 0\n controlRefs_f = 0\n controlAlts_f = 0\n\n\n # Loop through all the individuals in the case hash table.\n for indiv_id in cases:\n\n # indiv_data is their GT:AD:DP:GQ:PL stats\n indiv_data = v[indiv_id]\n\n if indiv_data == None:\n continue\n\n # Apply filters and update counts Note: cases[indiv_id] is gender\n if ProcessCC(indiv_data, thresh):\n parFlag = filters.check_Hemizgyous(v['CHROM'], cases[indiv_id],\n filters.inPar(v['POS']) )\n\n if cases[indiv_id] == 'male':\n caseRefs_m, caseAlts_m = Counts(indiv_data, caseRefs_m, caseAlts_m,\n parFlag)\n elif cases[indiv_id] == 'female':\n caseRefs_f, caseAlts_f = Counts(indiv_data, caseRefs_f, caseAlts_f,\n parFlag)\n\n # Loop through all indivs in the control hash table.\n for indiv_id in controls:\n\n # indiv_data is their GT:AD:DP:GQ:PL stats\n indiv_data = v.get(indiv_id)\n\n if indiv_data == None:\n continue\n\n # Apply filters and update counts Note: controls[indiv_id] is gender\n if ProcessCC(indiv_data, thresh):\n parFlag = filters.check_Hemizgyous(v['CHROM'], controls[indiv_id],\n filters.inPar(v['POS']) )\n\n if controls[indiv_id] == 'male':\n controlRefs_m, controlAlts_m = Counts(indiv_data, controlRefs_m,\n controlAlts_m, parFlag)\n elif controls[indiv_id] == 'female':\n controlRefs_f, controlAlts_f = Counts(indiv_data, controlRefs_f,\n controlAlts_f, parFlag)\n\n caseAlts = caseAlts_f + caseAlts_m\n caseRefs = caseRefs_f + caseRefs_m\n controlAlts = controlAlts_f + controlAlts_m\n controlRefs = controlRefs_f + controlRefs_m\n\n if (caseAlts + controlAlts == 0) | (caseRefs + controlRefs == 0):\n return None\n\n AC = caseAlts + controlAlts\n AN = AC + caseRefs + controlRefs\n AF = AC / AN\n\n if vepFieldNames:\n gene, anno, pph2, sift, lof = vepA.findVariantAnnotation(v, args, vepFieldNames)\n else:\n gene, anno, pph2, sift, lof = ('', '', '', '', '')\n\n return [v['CHROM'], v['POS'], v['ID'], v['REF'], v['ALT'], v['FILTER'], v['VQSLOD'],\n gene, anno, pph2, sift, lof, AF, AC, AN,\n caseRefs, caseAlts, controlRefs, controlAlts,\n caseRefs_m, caseAlts_m, controlRefs_m, controlAlts_m,\n caseRefs_f, caseAlts_f, controlRefs_f, controlAlts_f]\n\n\ndef ProcessCC(indivAttr, thresh):\n \"\"\" Apply filters to the individual: True = pass, False = fail\n\n Parameters\n ----------\n indivAttr: [GT:AD:DP:GQ:PL]\n thresh: hash table of thresholds\n \"\"\"\n\n # Apply quality filters.\n if not filters.passFilters(indivAttr, thresh, thresh['GQ_CC_Thresh']):\n return False\n\n # Apply PL filter to individual.\n if not filters.PhredScaleFilter(indivAttr, thresh['PL_Thresh']):\n return False\n\n return True\n\n\ndef Counts(indivAttr, Refcount, Altcount, parFlag):\n \"\"\" Update the number of reference and alternate variant counts.\n\n Parameters\n ----------\n indivAttr: [GT:AD:DP:GQ:PL]\n parFlag indicates if we are in a hemizgyous case or not\n True: not in hemizygous case False: in hemizygous\n \"\"\"\n\n # homoRef has 2 reference alleles\n if indivAttr['GT'] == 'homoRef':\n if parFlag:\n Refcount += 2\n return Refcount, Altcount\n else:\n Refcount += 1\n return Refcount, Altcount\n\n # het has 1 reference allele and 1 alternate allele\n elif indivAttr['GT'] == 'het':\n if parFlag:\n Refcount += 1\n Altcount += 1\n return Refcount, Altcount\n else:\n return Refcount, Altcount\n\n # homoAlt has 2 alternate alleles\n elif indivAttr['GT'] == 'homoAlt':\n if parFlag:\n Altcount += 2\n return Refcount, Altcount\n else:\n Altcount += 1\n return Refcount, Altcount\n\n # If the individual has another genotype then return the input.\n return Refcount, Altcount\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__, version='0.6.1')\n print(args)\n\n # GLOBAL VARIABLES\n # Case and control are hash tables with:\n # Key: individual id Value: gender\n # Family is a hash table with:\n # Key: individual id Value: [father id, mother id, sex]\n\n # Hash table of thresholds\n thresholds = {\n 'PL_Thresh': float(args['--pl']),\n 'DP_Thresh': float(args['--dp']),\n 'AB_Ref_Thresh': float(args['--ab_Ref']),\n 'AB_Het_Thresh': float(args['--ab_Het']),\n 'AB_Alt_Thresh': float(args['--ab_Alt']),\n 'GQ_Kid_Thresh': float(args['--gq_Kid']),\n 'GQ_Parent_Thresh': float(args['--gq_Par']),\n 'GQ_CC_Thresh': float(args['--gq_CC']) }\n\n PASS = args['--pass']\n\n # Set VEP field names line to None unless the VCF is annotated.\n vepFieldNames = None\n\n fname, fextension = os.path.splitext(args['<outputFile_Name>'])\n\n writer = open(args['<outputFile_Name>'], 'wb')\n writer.write('\\t'.join(['CHROM', 'POSITION', 'ID', 'REF', 'ALT', 'FILTER', 'VQSLOD',\n 'GENE_NAME', 'FUNCTIONAL_CLASS', 'SIFT', 'PolyPhen2',\n 'loftee', 'AF', 'AC', 'AN'])+ '\\t')\n\n fn_open = gzip.open if args['<vcf_File>'].endswith('.gz') else open\n\n if args['doTDT']:\n pr = cProfile.Profile()\n pr.enable()\n\n #writer2 = open(fname + '_indivs_T' + fextension, 'wb')\n #writer3 = open(fname + '_indivs_U' + fextension, 'wb')\n\n # Write out the header.\n writer.write('\\t'.join(['Nproband_HomoAlt',\n 'transmitted','untransmitted',\n 'transmitted_m','untransmitted_m',\n 'transmitted_f','untransmitted_f',\n 'De-Novo_Mendel_Errors','Other_Mendel_Errors',\n 'Percent_of_Mendel_Errors']) + '\\n')\n #writer2.write('\\t'.join(['CHROM','POSITION','ID','REF','ALT']) + '\\n')\n #writer3.write('\\t'.join(['CHROM','POSITION','ID','REF','ALT']) + '\\n')\n\n with fn_open(args['<vcf_File>']) as fh:\n for line in fh:\n line = line.rstrip('\\r\\n').rstrip('\\n').rstrip('\\t')\n if line.startswith('##INFO=<ID=CSQ'):\n vepFieldNames = line.split('Format: ')[-1].strip('\">').split('|')\n if line.startswith('#CHROM'):\n indivs = line.split('\\t')[9:] # individual IDs in the VCF\n pedigree, vcfIndivs = FamilyPed.readFamily(args['<ped_File>'],\n indivs, args['--unaff'])\n if line.startswith('#'):\n continue\n # the following lines contain the variants\n else:\n result = doTDT(VCF_VEP.parse(line, vcfIndivs, PASS), pedigree,\n thresholds)\n if result:\n writer.write('\\t'.join(map(str,result[0])) + '\\n')\n #writer2.write('\\t'.join(map(str,result[1])) + '\\n')\n #writer3.write('\\t'.join(map(str,result[2])) + '\\n')\n\n pr.disable()\n s = StringIO.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print s.getvalue()\n\n elif args['doCaseControl']:\n # Write out the header.\n writer.write('\\t'.join(['caseRefs','caseAlts','controlRefs','controlAlts',\n 'caseRefs_m','caseAlts_m','controlRefs_m','controlAlts_m',\n 'caseRefs_f','caseAlts_f','controlRefs_f','controlAlts_f'])+'\\n')\n\n with fn_open(args['<vcf_File>']) as fh:\n for line in fh:\n line = line.rstrip('\\r\\n').rstrip('\\n').rstrip('\\t')\n if line.startswith('##INFO=<ID=CSQ'):\n vepFieldNames = line.split('Format: ')[-1].strip('\">').split('|')\n if line.startswith('#CHROM'):\n indivs = line.split('\\t')[9:] # individual IDs in the VCF\n case, control, caseControl = FamilyPed.readCC(args['<ped_File>'],\n indivs)\n if line.startswith('#'):\n continue\n else:\n result = doCaseControl(VCF_VEP.parse(line, caseControl, PASS), case,\n control, thresholds)\n if result:\n writer.write('\\t'.join(map(str,result)) + '\\n')\n"
},
{
"alpha_fraction": 0.6245414018630981,
"alphanum_fraction": 0.67957603931427,
"avg_line_length": 29.283950805664062,
"blob_id": "f4d55fe7a2d91c26189a8d411adbd39c4c549d63",
"content_id": "72c61560ed1493f55e5b2ab8ecb2b3bc764f4a6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2453,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 81,
"path": "/geneset_enrichment/process_geneset_enrichment_results.R",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "#!bin/Rscript\n\n# process_geneset_enrichment_results.R\n\n## Author: Daniel P. Howrigan ([email protected])\n## Last Modified: February 2019\n\n\n## ----- DESCRIPTION:\n\n## Command line script to convert the default DNM gene set enrichment results into multiple .tsv tables\n## Orders by mutation model p-value\n## Adds prettier column names\n\n# USAGE:\n\n# Rscript process_geneset_enrichment_results.R \\\n# [results file] \\\n# [output path] \\\n# [order by pval] \\\n\n\n# example usage:\n\n# Rscript process_geneset_enrichment_results.R \\\n# SCZ_n2772_CCDS_coverageQC_candidate_genesets.tsv \\\n# results/SCZ_n2772_CCDS_coverageQC/ \\\n# TRUE\n\n\n## ----- NOTES:\n\n## requires the default annotations output from geneset_enrichment.R \n\n\n\n\n\n## read in command line argument\nargs <- commandArgs(TRUE)\ninfile <- args[1]\noutpath <- args[2]\nordering <- args[3]\n\n## check / create results directory\nsystem(paste0('mkdir -p ',outpath))\n\ninput <- read.table(infile,h=T,sep='\\t',stringsAsFactors=F)\n\nnms <- c('gene set','genes listed','genes not tested','genes tested','percent genes tested',\n 'SCZ DNMs tested','Control DNMs tested','SCZ DNMs overlapping','Control DNMs overlapping',\n 'SCZ observed proportion','Model expectation proportion','Model enrichment','Model pval','Model lower 95% CI','Model upper 95% CI',\n 'Control observed proportion','CaseControl enrichment','CaseControl pval','CaseControl lower 95% CI','CaseControl upper 95% CI')\n\nall <- input[,c(1:5,6,7,16,17,27,26,28:31,33:37)]\nif (ordering==T) { all <- all[order(all$all_pval),] }\nnames(all) <- nms\nwrite.table(all,paste0(outpath,'all.tsv'),col=T,row=F,quo=F,sep='\\t')\n\nlofmis <- input[,c(1:5,8,9,18,19,39,38,40:43,45:49)]\nif (ordering==T) { lofmis <- lofmis[order(lofmis$lofmis_pval),] }\nnames(lofmis) <- nms\nwrite.table(lofmis,paste0(outpath,'lofmis.tsv'),col=T,row=F,quo=F,sep='\\t')\n\nlof <- input[,c(1:5,10,11,20,21,51,50,52:55,57:61)]\nif (ordering==T) { lof <- lof[order(lof$lof_pval),] }\nnames(lof) <- nms\nwrite.table(lof,paste0(outpath,'lof.tsv'),col=T,row=F,quo=F,sep='\\t')\n\nmis <- input[,c(1:5,12,13,22,23,63,62,64:67,69:73)]\nif (ordering==T) { mis <- mis[order(mis$mis_pval),] }\nnames(mis) <- nms\nwrite.table(mis,paste0(outpath,'mis.tsv'),col=T,row=F,quo=F,sep='\\t')\n\nsyn <- input[,c(1:5,14,15,24,25,75,74,76:79,81:85)]\nif (ordering==T) { syn <- syn[order(syn$syn_pval),] }\nnames(syn) <- nms\nwrite.table(syn,paste0(outpath,'syn.tsv'),col=T,row=F,quo=F,sep='\\t')\n\n\n## END of process_geneset_enrichment_results.R\n"
},
{
"alpha_fraction": 0.7438507080078125,
"alphanum_fraction": 0.7497879266738892,
"avg_line_length": 32.657142639160156,
"blob_id": "21f8b9bf8f9cf0d7e1a518eabe346b963aceed29",
"content_id": "a28b8565e168c0575c527b84e9ea6ca737d75bf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1179,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 35,
"path": "/exome_burden/README.md",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "# Table of Contents\n* [Overview](#overview) \n* [DNM_burden.R](#DNM_burden.R)\n\n### Overview\n\nExome-wide tests for rate of DNMs given your trio count\n * Details of the test can be found in the supplementary material here: https://personal.broadinstitute.org/howrigan/taiwanese_trios_SCZ_supplement/Taiwanese-trio_Supplement_BioArxiv.pdf \n * Supplementary Section 7 - Statistical tests used to analyze DNM rates and patterns\n * Supplementary Section 10 - DNM burden in combined SCZ cohorts\n\n\n### DNM_burden.R\n\nInteractive script to perform a two-sided poisson rate test across the exome relative to controls and mutation model expectations \n * Requires exome-wide mutation expectations for PTV, missense, and synonymous coding variation\n\n```\n## ----- DESCRIPTION:\n\n## Interactive Script to generate DNM burden results\n\n## ----- SCRIPT SET UP:\n\n## PART 1: read in relevant files\n## PART 2: aggregate DNMs by cohort\n## PART 3: test DNM burden in SCZ vs control\n## PART 4: test DNM burden in SCZ vs mutation model\n\n\n## ----- NOTES:\n\n## The mutation model used has already adjusted X chromosome DNM rates to the proportion of male probands among bothe SCZ and control cohorts\n\n```\n\n"
},
{
"alpha_fraction": 0.7765293121337891,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 65.75,
"blob_id": "b18522bceb203a72c646d124b291be580cb83212",
"content_id": "352b8cb9350aadb7f9d1fdca082c011cadfe4b17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 214,
"num_lines": 12,
"path": "/dnm_calling/README.md",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "# Overview\nThis subdirectory contains the python scripts that were used to parse the VCF for results and generate parimary annotations, along with the shell scripts that call the python scripts with specific files and filters\n\nde_novo_finder_3.py (Author: Kaitlin Samocha) - script to find de novo mutations in a VCF file\n\nTDT_CC.py (Author: Jack Kosmicki) - script to collect variant level transmission results\n * filters.py = helper script to set genotype filters and identify PAR regions\n * FamilyPed.py = helper script to split trios and case-control individuals from pedigree file\n * processTDT.py = helper script to run TDT test on results\n\nrun_lof_annotation.py (Author: Konrad Karczewski) - script to annotation the VCF with VEP (for use on the LSF job system)\n * loftee_utils.py = helper script to parse VEP annotation\n"
},
{
"alpha_fraction": 0.5715034008026123,
"alphanum_fraction": 0.5806705355644226,
"avg_line_length": 26.86861228942871,
"blob_id": "fa71c732e80922816fee6673da08205fdaa443bb",
"content_id": "dd98f1887d1bd96da606dd4756955e62c448f48e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7636,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 274,
"path": "/dnm_calling/filters.py",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "\"\"\"\n:File: filters.py\n:Author: Jack A. Kosmicki\n:Last updated: 2016-04-04\n\nFile of filters for quality control of VCF files.\nAdditional filters for determining Par regions.\n\"\"\"\n\nfrom __future__ import division\nimport sys\n\n\ndef passFilters(stats, thresh, GQ_Thresh):\n \"\"\" Series of filters used to remove bad calls.\n\n We allow case/controls, parents, and kids to have different GQ thresholds,\n therefore to make a generic function, GQ_Thresh is specified despite all\n three GQ thresholds being located in the hash table 'thresh'.\n\n Parameters\n ----------\n stats: individual's values (GT:AD:DP:GQ:PL)\n GT: Genotype (Ref, Het, Alt)\n AD: Allelic Depth\n DP: Number of reads that passed the filters (sum of AD)\n GQ: Genotype Quality\n PL: Phred Quality Score\n thresh: hash table of threshold values\n GQ_Thresh: (int) minimum GQ threshold\n\n Returns\n -------\n True: Individual passed the filters.\n False: Individual failed the filters.\n \"\"\"\n\n genotype = stats['GT']\n\n # ensure individual has a genotype\n if genotype == None:\n return False\n\n # genotype quality score must be >= GQ Threshold [default: 30]\n elif float(stats['GQ']) < GQ_Thresh:\n return False\n\n # read depth must be >= DP threshold [default: 10]\n elif float(stats['DP']) < thresh['DP_Thresh']:\n return False\n\n # Allele balance of <=0.1 for homozygous Reference individuals\n elif genotype == 'homoRef':\n ABratio = AllelicBalance(stats['AD'])\n if ABratio > thresh['AB_Ref_Thresh']:\n return False\n else:\n return True\n\n # Allele balance of between 0.3 and 0.7 for hets\n elif genotype == 'het':\n # calculate the allelic balance from the allelic depth\n ABratio = AllelicBalance(stats['AD'])\n if ABratio < thresh['AB_Het_Thresh'] or ABratio > (1-thresh['AB_Het_Thresh']):\n return False\n else:\n return True\n\n # Allele balance of >=0.9 for homozygous Alternate individuals\n elif genotype == 'homoAlt':\n ABratio = AllelicBalance(stats['AD'])\n if ABratio < thresh['AB_Alt_Thresh']:\n return False\n else:\n return True\n\n\ndef TDT_Parent_Filters(father, mother, thresh):\n \"\"\" Determine if parents have an alternate allele to pass on.\n\n Parameters\n ----------\n father: GT:AD:DP:GQ:PL values\n mother: GT:AD:DP:GQ:PL values\n thresh: hash table of threshold values\n\n Returns\n -------\n True = parents have alternate allele and passed filters\n False = parents don't have alternate allele or did not pass filters\n \"\"\"\n\n # 1) find the parents' data\n if father == None or mother == None:\n return False\n else:\n\n # determine the status of the phred Filter which will be either\n # True (they passed) or False (they didn't pass)\n dad_Phred_Pass = PhredScaleFilter(father, thresh['PL_Thresh'])\n mom_Phred_Pass = PhredScaleFilter(mother, thresh['PL_Thresh'])\n\n # 2) apply allelic balance filters to parents\n # Both parents must pass all the filters\n if not passFilters(father, thresh, thresh['GQ_Parent_Thresh']) or not passFilters(mother, thresh, thresh.get('GQ_Parent_Thresh')):\n return False\n\n # 3) apply PL filter to parents\n elif not dad_Phred_Pass or not mom_Phred_Pass:\n return False\n\n else:\n return True\n\n\ndef AllelicBalance(AD):\n \"\"\" Calculate the allelic balance.\n alternate reads\n allelic balance = -----------------------------------\n alternate reads + reference reads\n\n Parameters\n ----------\n AD: Array of integers [Number of Reference Reads, Number of Alternate Reads]\n \"\"\"\n ref = AD[0]\n alt = AD[1]\n\n return alt / (ref + alt)\n\n\ndef PhredScaleFilter(stats, PL_Thresh):\n \"\"\" This function determines what phred scale filter to use for\n a given individual as different genotypes require\n different filters\n\n Parameters\n ----------\n stats: GT:AD:DP:GQ:PL values\n PL_Thresh: (int) the minimum Phred Quality Score threshold\n \"\"\"\n\n gtype = stats['GT']\n\n if gtype == 'homoRef':\n return PhredScaleFilter_HOMOREF(stats, PL_Thresh)\n elif gtype == 'het':\n return PhredScaleFilter_HET(stats, PL_Thresh)\n elif gtype == 'homoAlt':\n return PhredScaleFilter_HOMOALT(stats, PL_Thresh)\n else:\n return False\n\n\ndef PhredScaleFilter_HET(stats, PL_Thresh):\n \"\"\" Apply filters for the normalized Phred Quality Scores for\n AA, AB, BB genotypes where A = reference allele,\n B = alternate allele\n\n Parameters\n ----------\n stats: GT:AD:DP:GQ:PL values\n PL_Thresh: (int) the minimum Phred Quality Score threshold\n\n Returns\n -------\n True: Passed Filter\n False: Failed Filter\n \"\"\"\n\n homoRef = stats['PL'][0]\n het = stats['PL'][1]\n homoAlt = stats['PL'][2]\n\n if homoRef < PL_Thresh:\n return False\n elif het != 0:\n return False\n elif homoAlt < PL_Thresh:\n return False\n else:\n return True\n\n\ndef PhredScaleFilter_HOMOREF(stats, PL_Thresh):\n \"\"\" Apply filters for the normalized Phred Quality Scores for\n AA, AB, BB genotypes where A = reference allele,\n B = alternate allele\n\n Parameters\n ----------\n stats: GT:AD:DP:GQ:PL values\n PL_Thresh: (int) the minimum Phred Quality Score threshold\n\n Returns\n -------\n True: Passed Filter\n False: Failed Filter\n \"\"\"\n\n homoRef = stats['PL'][0]\n het = stats['PL'][1]\n homoAlt = stats['PL'][2]\n\n if homoRef != 0:\n return False\n elif het < PL_Thresh:\n return False\n elif homoAlt < PL_Thresh:\n return False\n else:\n return True\n\n\ndef PhredScaleFilter_HOMOALT(stats, PL_Thresh):\n \"\"\" Apply filters for the normalized Phred Quality Scores for\n AA, AB, BB genotypes where A = reference allele,\n B = alternate allele\n\n Parameters\n ----------\n stats: GT:AD:DP:GQ:PL values\n PL_Thresh: (int) the minimum Phred Quality Score threshold\n\n Returns\n -------\n True: Passed Filter\n False: Failed Filter\n \"\"\"\n\n homoRef = stats['PL'][0]\n het = stats['PL'][1]\n homoAlt = stats['PL'][2]\n\n if homoAlt != 0:\n return False\n elif het < PL_Thresh:\n return False\n elif homoRef < PL_Thresh:\n return False\n else:\n return True\n\n\ndef check_Hemizgyous(chrom,gender,inParRegion):\n \"\"\" Hemizygous chromosomes have to be dealt with differently.\n\n Cases:\n Y is hemizgyous\n X is hemizygous in males if they aren't in the PAR region\n\n True: not in hemizygous case\n False: in a hemizgyous case\n \"\"\"\n\n return chrom not in ('X','Y') or gender == 'female' or inParRegion\n\n\ndef inPar(pos):\n \"\"\" Are you in the pseudo-autosomal region (PAR)?\n\n Current PAR regions defined in GRCh37 from\n http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/\n\n Parameters\n ----------\n pos: (int) position of the variant\n\n Returns\n -------\n True: in par False: not in par\n \"\"\"\n\n return (60001 <= pos <= 2699520) or (154931044 <= pos <= 155260560)\n"
},
{
"alpha_fraction": 0.7284482717514038,
"alphanum_fraction": 0.8017241358757019,
"avg_line_length": 65.28571319580078,
"blob_id": "d420e0662967226ba496d48d431b205042c28e56",
"content_id": "0e10468b444b5b539aba44918f33dd2a90e8264d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 464,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 7,
"path": "/dnm_calling/TaiTrios_AllWaves_08.2015_de_novo_finder_trio1698.sh",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\npython \\\n/psych/genetics_data/howrigan/projects/taiwan_trio/scripts/de_novo_finder_3.py \\\n/seq/dax/TaiTrios_AllWaves_08.2015/Exome/v1/TaiTrios_AllWaves_08.2015.vcf.gz \\\n/psych/genetics_data/howrigan/projects/taiwan_trio/data/Taiwanese_Trio_AllWaves_08.2015_confirmedTrios_wholeBloodProbands.fam \\\n/humgen/atgu1/fs03/wip/kaitlin/all_ESP_counts_5.28.13.txt > \\\n/psych/genetics_data/howrigan/projects/taiwan_trio/denovo_data/TaiTrios_AllWaves_08.2015.dnm\n"
},
{
"alpha_fraction": 0.5455231070518494,
"alphanum_fraction": 0.5553251504898071,
"avg_line_length": 28.66473960876465,
"blob_id": "058e52ab9ac5848ac1e8b4643583c7b9821e8295",
"content_id": "13565831791dd603d5b51ab12b155017f9c65b25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5305,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 173,
"path": "/dnm_calling/VCF_VEP.py",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\r\n\"\"\"\r\n:File: VCF_VEP.py\r\n:OriginalAuthor: Kamil Slowikowski <[email protected]>\r\n:Edited: Jack A. Kosmicki\r\n:Last updated: 2015-08-10\r\n\r\nRead in a line from a VCF and return a hash table with the columns of the VCF as keys\r\n and the records for each variant as the values. The info field is broken up into\r\n key value pairs.\r\nHence, vcfLine['CHROM'] will return the chromosome the variant is on.\r\n\r\nNote:\r\nvcf lines that do not pass GATK filters will not be processed nor are multi-allelic variants.\r\n\"\"\"\r\n\r\n\r\nimport gzip\r\nimport sys\r\nfrom sets import Set\r\n\r\n\r\ndef parse(line, hashTable, PASS):\r\n \"\"\"Parse a single VCF line and return a dictionary.\r\n\r\n Parameters\r\n ----------\r\n line: a line in the VCF (string)\r\n hashTables: hash tables of individuals to look up in the VCF\r\n PASS: Flag indicating whether only PASS sites should be examined\r\n \"\"\"\r\n\r\n vcfLine = {}\r\n\r\n # the 1st 8 defined Columns: CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT\r\n FIELDS = line.split('\\t')[:9]\r\n\r\n vcfLine['FILTER'] = FIELDS[6]\r\n\r\n if vcfLine['FILTER'].startswith('InbreedingCoeff'):\r\n\treturn None\r\n \r\n # if PASS is true, examine only sites whose FILTER is PASS\r\n if PASS:\r\n if not vcfLine['FILTER'].startswith('PASS'):\r\n return None\r\n\r\n # ignore multi-allelics\r\n if ',' in FIELDS[4]:\r\n return None\r\n\r\n # load dictionary\r\n vcfLine['CHROM'] = FIELDS[0]\r\n vcfLine['POS'] = int(FIELDS[1])\r\n vcfLine['ID'] = FIELDS[2]\r\n vcfLine['REF'] = FIELDS[3]\r\n vcfLine['ALT'] = FIELDS[4]\r\n vcfLine['QUAL'] = FIELDS[5]\r\n\r\n # VCF file format must be GT:AD:DP:GQ:PL\r\n if not checkFORMAT(FIELDS[8]):\r\n return None\r\n else:\r\n format = FIELDS[8].split(':')\r\n format_len = len(format)\r\n\r\n # INFO field consists of \"key1=value;key2=value;...\".\r\n infos = FIELDS[7].split(';')\r\n\r\n for i, info in enumerate(infos, 1):\r\n # It should be \"key=value\".\r\n try:\r\n key, value = info.split('=')\r\n # But sometimes it is just \"value\".\r\n except ValueError:\r\n key = 'INFO{}'.format(i)\r\n value = info\r\n # Set the value to None if there is no value.\r\n if not value:\r\n value = None\r\n vcfLine[key] = value\r\n\r\n if not PASS and float(vcfLine['VQSLOD']) < -2.632:\r\n return None\r\n\r\n fields = line.split('\\t')[9:] # fields are all of the individuals in the vcf.\r\n\r\n for indivID in hashTable.keys():\r\n # individuals values based on the format field\r\n stats = {}\r\n\r\n # indivAttr (individual attributes) are GT:AD:DP:GQ:PL\r\n indivAttr = fields[hashTable[indivID]].strip('\"').split(':')\r\n\r\n # If the sizes don't match, the individual has missing values in the format field.\r\n # i.e., format = GT:AD:DP and indivAttr = ./.\r\n if len(indivAttr) != format_len:\r\n vcfLine[indivID] = None\r\n else:\r\n # create a dictionary of GT:AD:DP:GQ:PL\r\n for i in range(format_len):\r\n if ',' in indivAttr[i]:\r\n try:\r\n indivAttr[i] = map(int, indivAttr[i].split(','))\r\n except ValueError:\r\n vcfLine[indivID] = None\r\n stats[format[i]] = indivAttr[i]\r\n\r\n # sometimes AD is none\r\n if stats['AD'] != '.':\r\n stats['GT'] = find_gtype(stats)\r\n vcfLine[indivID] = stats\r\n else:\r\n vcfLine[indivID] = None\r\n\r\n return vcfLine\r\n\r\n\r\ndef find_gtype(stats):\r\n \"\"\" Determine the genotype of the individual by checking both\r\n the genotype and AD.\r\n\r\n Parameters\r\n ----------\r\n stats = format field [GT,[AD_ref, AD_alt],DP,GQ,[PL_ref, PL_het, PL_alt]]\r\n \"\"\"\r\n\r\n # Throw away individuals with 0 reference and alternate reads.\r\n if stats['AD'][0] == 0 and stats['AD'][1] == 0:\r\n return None\r\n elif stats['GT'] in ('0/0', '0|0'):\r\n return 'homoRef'\r\n elif stats['GT'] in ('0/1', '0|1', '1/0', '1|0'):\r\n return 'het'\r\n elif stats['GT'] in ('1/1', '1|1'):\r\n return 'homoAlt'\r\n else:\r\n return None\r\n\r\n\r\ndef checkFORMAT(formatField):\r\n \"\"\" This program requires the following values in the FORMAT field:\r\n GT: genotype\r\n AD: allelic depth\r\n DP: total read depth\r\n GQ: Genotype Quality\r\n PL: Phred-scaled likelihoods\r\n\r\n If any of these fields are missing then the line cannot be read.\r\n\r\n Parameters\r\n ----------\r\n formatField: (: delimited string) 9th column of the vcf.\r\n \"\"\"\r\n\r\n format = Set(formatField.split(':'))\r\n if 'GT' not in format:\r\n sys.stderr.write('WARNING: GT not in format field.')\r\n return False\r\n elif 'AD' not in format:\r\n sys.stderr.write('WARNING: AD not in format field.')\r\n return False\r\n elif 'DP' not in format:\r\n sys.stderr.write('WARNING: DP not in format field.')\r\n return False\r\n elif 'GQ' not in format:\r\n sys.stderr.write('WARNING: GQ not in format field.')\r\n return False\r\n elif 'PL' not in format:\r\n sys.stderr.write('WARNING: PL not in format field.')\r\n return False\r\n else:\r\n return True\r\n"
},
{
"alpha_fraction": 0.6763392686843872,
"alphanum_fraction": 0.7723214030265808,
"avg_line_length": 62.85714340209961,
"blob_id": "0baf63836a6d2de5a62f2ae6e2c407da2d54cd27",
"content_id": "4d42ea27259bb8d35d0f40e97901cac8a80fef45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 448,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 7,
"path": "/dnm_calling/run_TDT_CC.sh",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\npython TDT_CC.py doTDT \\\n/seq/dax/TaiTrios_AllWaves_08.2015/Exome/v1/TaiTrios_AllWaves_08.2015.vcf.gz \\\n/psych/genetics_data/howrigan/projects/taiwan_trio/data/Taiwanese_Trio_AllWaves_08.2015_confirmedTrios_uncontaminatedTrios.fam \\\n/psych/genetics_data/howrigan/projects/taiwan_trio/transmission_data/variant_transmission/TaiTrios_AllWaves_08.2015_n1142.txt \\\n--pl 25 --ab_Ref 0.05 --ab_Het 0.2 --ab_Alt 0.95 --gq_Par 25 --gq_Kid 25\n\n"
},
{
"alpha_fraction": 0.6163046360015869,
"alphanum_fraction": 0.6329392194747925,
"avg_line_length": 29.95319175720215,
"blob_id": "7350a12a07560014ab7f9c83f02b9f3e48443f51",
"content_id": "071f56c52956e72c822187dc19a83ed141f83f50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 7274,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 235,
"path": "/exome_burden/DNM_burden.R",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "## DNM_burden.R\n\n## Author: Daniel P. Howrigan ([email protected])\n## Last Modified: January 2019\n\n\n## ----- DESCRIPTION:\n\n## Interactive Script to generate DNM burden results\n\n## ----- SCRIPT SET UP:\n\n## PART 1: read in relevant files\n## PART 2: aggregate DNMs by cohort\n## PART 3: test DNM burden in SCZ vs control\n## PART 4: test DNM burden in SCZ vs mutation model\n\n\n## ----- NOTES:\n\n## The mutation model used has already adjusted X chromosome DNM rates to the proportion of male probands among bothe SCZ and control cohorts\n\n\n## PART 1: read in relevant files\n\n## de novo mutation list\ndnm <- read.table('../files/combined_cohorts_DNM_list.tsv',h=T,stringsAsFactors=F,sep='\\t')\n\n## list of cohorts and trio counts\ngrps <- read.table('../files/DNM_studies.tsv',h=T,stringsAsFactors=F,sep='\\t')\n\n## mutation model\nmodel <- read.table('../files/gencode_pct75_gene17925.tsv',h=T,stringsAsFactors=F,sep='\\t')\n\n\n\n\n## PART 2: aggregate DNMs by cohort\n\n## Remove DNMs outside of Agilent Interval\ndnm <- subset(dnm,dnm$agilent_interval==T)\n\n## Remove DNMs outside of genes with low 10x coverage\ndnm <- subset(dnm,dnm$gene_symbol_used %in% model$gene)\n\n## create per-trio count for various measures\nall <- NA\nptv <- NA\nmis <- NA\nsyn <- NA\n\n## aggregate DNMs by cohort\nfor (i in 1:nrow(grps)) {\n\n ## subset to cohort\n dnm2 <- subset(dnm,dnm$STUDY %in% grps$STUDY[i])\n dnm2 <- subset(dnm2,dnm2$DISEASE %in% grps$DISEASE[i])\n\n all[i] <- nrow(dnm2)\n ptv[i] <- sum(dnm2$annotation_used=='ptv')\n mis[i] <- sum(dnm2$annotation_used=='missense')\n syn[i] <- sum(dnm2$annotation_used=='synonymous')\n\n print(i)\n} ## End of i LooP\n\n## add counts to cohort list\ngrps <- cbind(grps,all,ptv,mis,syn)\n\n\n\n\n\n\n\n## PART 3: test DNM burden in SCZ vs control\n\n\n## data <- read.table('burden_scz/published_ttrio_dnmCounts_agilentIntervals.txt',h=T,stringsAsFactors=F)\nNOW grps\n\n## remove outlier cohorts\ngrps <- subset(grps,grps$STUDY != 'Xu')\n\n## split by disease\nstudy <- names(table(grps$DISEASE))\n\n## set up variables\ntrios <- NA\ndnm <- NA; dnm_rate <- NA; dnm_lowci <- NA; dnm_highci <- NA; dnm_enrich <- NA; dnm_pval <- NA\nptv <- NA; ptv_rate <- NA; ptv_lowci <- NA; ptv_highci <- NA; ptv_enrich <- NA; ptv_pval <- NA\nmis <- NA; mis_rate <- NA; mis_lowci <- NA; mis_highci <- NA; mis_enrich <- NA; mis_pval <- NA\nsyn <- NA; syn_rate <- NA; syn_lowci <- NA; syn_highci <- NA; syn_enrich <- NA; syn_pval <- NA\n\n\n\nfor (i in 1:length(study)) {\n\n grp1 <- subset(grps,grps$DISEASE==study[i])\n grp2 <- subset(grps,grps$DISEASE!=study[i])\n\n trios[i] <- sum(grp1$TRIOS)\n\n ## Overall DNM\n mod <- poisson.test(c(sum(grp1$all),sum(grp2$all)),\n \tc(sum(grp1$TRIOS),sum(grp2$TRIOS)))\n dnm[i] <- sum(grp1$all)\n dnm_rate[i] <- sum(grp1$all) / sum(grp1$TRIOS)\n grp2_rate <- sum(grp2$all) / sum(grp2$TRIOS)\n dnm_lowci[i] <- as.numeric(mod$conf.int[1]*grp2_rate)\n dnm_highci[i] <- as.numeric(mod$conf.int[2]*grp2_rate)\n dnm_pval[i] <- mod$p.value \n dnm_enrich[i] <- mod$estimate\n\n ## PTV\n mod <- poisson.test(c(sum(grp1$ptv),sum(grp2$ptv)),\n \tc(sum(grp1$TRIOS),sum(grp2$TRIOS)))\n ptv[i] <- sum(grp1$ptv)\n ptv_rate[i] <- sum(grp1$ptv) / sum(grp1$TRIOS)\n grp2_rate <- sum(grp2$ptv) / sum(grp2$TRIOS)\n ptv_lowci[i] <- as.numeric(mod$conf.int[1]*grp2_rate)\n ptv_highci[i] <- as.numeric(mod$conf.int[2]*grp2_rate)\n ptv_pval[i] <- mod$p.value \n ptv_enrich[i] <- mod$estimate\n\n ## MIS\n mod <- poisson.test(c(sum(grp1$mis),sum(grp2$mis)),\n \tc(sum(grp1$TRIOS),sum(grp2$TRIOS)))\n mis[i] <- sum(grp1$mis)\n mis_rate[i] <- sum(grp1$mis) / sum(grp1$TRIOS)\n grp2_rate <- sum(grp2$mis) / sum(grp2$TRIOS)\n mis_lowci[i] <- as.numeric(mod$conf.int[1]*grp2_rate)\n mis_highci[i] <- as.numeric(mod$conf.int[2]*grp2_rate)\n mis_pval[i] <- mod$p.value \n mis_enrich[i] <- mod$estimate\n\n ## SYN\n mod <- poisson.test(c(sum(grp1$syn),sum(grp2$syn)),\n \tc(sum(grp1$TRIOS),sum(grp2$TRIOS)))\n syn[i] <- sum(grp1$syn)\n syn_rate[i] <- sum(grp1$syn) / sum(grp1$TRIOS)\n grp2_rate <- sum(grp2$syn) / sum(grp2$TRIOS)\n syn_lowci[i] <- as.numeric(mod$conf.int[1]*grp2_rate)\n syn_highci[i] <- as.numeric(mod$conf.int[2]*grp2_rate)\n syn_pval[i] <- mod$p.value \n syn_enrich[i] <- mod$estimate\n\n} ## END of i LooP\n\n(burden_casecon <- cbind.data.frame(study,trios,\ndnm, dnm_rate, dnm_lowci, dnm_highci, dnm_enrich, dnm_pval,\nptv, ptv_rate, ptv_lowci, ptv_highci, ptv_enrich, ptv_pval,\nmis, mis_rate, mis_lowci, mis_highci, mis_enrich, mis_pval,\nsyn, syn_rate, syn_lowci, syn_highci, syn_enrich, syn_pval))\n\n## write out to file\n## write.table(burden_casecon,'burden_casecon_results.txt',col=T,row=F,quo=F,sep='\\t')\n\n\n\n\n\n## PART 4: test DNM burden in SCZ vs mutation model\n\n## aggregate exome wide mutation model rate (NOTE: X chromosome rates already accounted for in mutation model file)\np_all <- sum(model$p_all)\np_mis <- sum(model$p_mis)\np_ptv <- sum(model$p_lof)\np_syn <- sum(model$p_syn)\n\nexp_rate <- c(p_all,p_mis,p_ptv,p_syn)\n\n## split by disease\nstudy <- names(table(grps$DISEASE))\n\n## set up variables\ntrios <- NA\ndnm <- NA; dnm_rate <- NA; dnm_lowci <- NA; dnm_highci <- NA; dnm_enrich <- NA; dnm_pval <- NA\nptv <- NA; ptv_rate <- NA; ptv_lowci <- NA; ptv_highci <- NA; ptv_enrich <- NA; ptv_pval <- NA\nmis <- NA; mis_rate <- NA; mis_lowci <- NA; mis_highci <- NA; mis_enrich <- NA; mis_pval <- NA\nsyn <- NA; syn_rate <- NA; syn_lowci <- NA; syn_highci <- NA; syn_enrich <- NA; syn_pval <- NA\n\n\nfor (i in 1:length(study)) {\n\n grp1 <- subset(grps,grps$DISEASE==study[i])\n\n trios[i] <- sum(grp1$TRIOS)\n\n ## Overall DNM\n mod <- poisson.test(sum(grp1$all),sum(grp1$TRIOS),p_all)\n dnm[i] <- sum(grp1$all)\n dnm_rate[i] <- sum(grp1$all) / sum(grp1$TRIOS)\n dnm_lowci[i] <- as.numeric(mod$conf.int[1])\n dnm_highci[i] <- as.numeric(mod$conf.int[2])\n dnm_pval[i] <- mod$p.value \n dnm_enrich[i] <- mod$estimate / p_all\n\n ## PTV\n mod <- poisson.test(sum(grp1$ptv),sum(grp1$TRIOS),p_ptv)\n ptv[i] <- sum(grp1$ptv)\n ptv_rate[i] <- sum(grp1$ptv) / sum(grp1$TRIOS)\n ptv_lowci[i] <- as.numeric(mod$conf.int[1])\n ptv_highci[i] <- as.numeric(mod$conf.int[2])\n ptv_pval[i] <- mod$p.value \n ptv_enrich[i] <- mod$estimate / p_ptv\n\n ## MIS\n mod <- poisson.test(sum(grp1$mis),sum(grp1$TRIOS),p_mis)\n mis[i] <- sum(grp1$mis)\n mis_rate[i] <- sum(grp1$mis) / sum(grp1$TRIOS)\n mis_lowci[i] <- as.numeric(mod$conf.int[1])\n mis_highci[i] <- as.numeric(mod$conf.int[2])\n mis_pval[i] <- mod$p.value \n mis_enrich[i] <- mod$estimate / p_mis\n\n ## SYN\n mod <- poisson.test(sum(grp1$syn),sum(grp1$TRIOS),p_syn)\n syn[i] <- sum(grp1$syn)\n syn_rate[i] <- sum(grp1$syn) / sum(grp1$TRIOS)\n syn_lowci[i] <- as.numeric(mod$conf.int[1])\n syn_highci[i] <- as.numeric(mod$conf.int[2])\n syn_pval[i] <- mod$p.value \n syn_enrich[i] <- mod$estimate / p_syn\n\n} ## END of i LooP\n\n(burden_model <- cbind.data.frame(study,trios,\ndnm, dnm_rate, dnm_lowci, dnm_highci, dnm_enrich, dnm_pval,\nptv, ptv_rate, ptv_lowci, ptv_highci, ptv_enrich, ptv_pval,\nmis, mis_rate, mis_lowci, mis_highci, mis_enrich, mis_pval,\nsyn, syn_rate, syn_lowci, syn_highci, syn_enrich, syn_pval))\n\n## write out to file\n## write.table(burden_model,'burden_model_results.txt',col=T,row=F,quo=F,sep='\\t')\n"
},
{
"alpha_fraction": 0.701694905757904,
"alphanum_fraction": 0.7661017179489136,
"avg_line_length": 48,
"blob_id": "595210c7a687846accde3a7d0c9751c14e181a98",
"content_id": "6dc32f4d39c57420e468c1126006b8fad0265c92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 6,
"path": "/dnm_calling/vep_annotation.sh",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "python /psych/genetics_data/howrigan/projects/taiwan_trio/scripts/run_lof_annotation.py \\\n-i /seq/dax/TaiTrios_AllWaves_08.2015/Exome/v1/TaiTrios_AllWaves_08.2015.site_only_plus_mixins.vcf.gz \\\n-o /psych/genetics_data/howrigan/projects/taiwan_trio/data/annot/ \\\n--memory 8 \\\n-s 15000 \\\n-q hour\n\n"
},
{
"alpha_fraction": 0.5511952042579651,
"alphanum_fraction": 0.5605577826499939,
"avg_line_length": 33.156463623046875,
"blob_id": "f9abdd8878c7bda2abeab5d7765237bea3278a45",
"content_id": "9e5897a86b75d690296928769090f4aaefd6ffb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5020,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 147,
"path": "/dnm_calling/FamilyPed.py",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "\"\"\"\n:File: FamilyPed.py\n:Author: Jack A. Kosmicki\n:Last updated: 2015-04-23\n\nFamilyPed.py reads in the Pedigree file (.ped file).\n\nThe .ped file must be tab delimited ('\\t') and have the following 5 columns in this order:\n family_ID indiv_ID father_ID mother_ID sex affected_Status\nSee http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml for more information.\n\nFamilyPed.py stores the family relationships into three hash tables, one for trios,\n one for cases, and one for controls. An additional final hash table\n contains all of the case/control or trio individuals and their index in the vcf.\n\"\"\"\n\n\nimport sys\nfrom sets import Set\n\n\ndef readFamily(Ped_File, vcfIndivs, unaff_Flag):\n \"\"\" Read in the pedigree file for the trios (.ped file).\n\n Parameters\n ----------\n Ped_File: Pedigree file (.ped) of the individuals in the vcf.\n columns: FamilyID, IndividualID, dadID, momID, sex, phenotype\n sex: 1=male, 2=female, 3=unknown\n phenotype: 1=unaffected, 2=affected\n\n vcfIndivs: Array of individuals in the vcf file.\n unaff_Flag: Boolean indicating whether TDT is run on affected/unaffected individuals\n True: unaffected False: affected\n \"\"\"\n\n family = {} # family hash table Key = ID Value = (Father ID, Mother ID, Sex)\n indivs = {} # individuals in VCF Key = ID Value = index in the vcf\n\n indivSet = Set(vcfIndivs) # Convert array to Set to decrease lookup time.\n\n with open(Ped_File) as file:\n for line in file:\n field = line.strip().split('\\t')\n\n family_ID = field[0]\n indiv_ID = field[1]\n father_ID = field[2]\n mother_ID = field[3]\n\n if indiv_ID not in indivSet:\n sys.stderr.write('Individual {} is not in vcf.\\n'.format(indiv_ID))\n continue\n\n if field[4] == '1':\n sex = 'male'\n elif field[4] == '2':\n sex = 'female'\n else:\n sex = 'NA'\n\n # Parents, cases, and controls will not have parental IDs.\n if(father_ID == '0' or mother_ID == '0'):\n continue\n\n # Check to see if the parents are in the vcf.\n if father_ID not in indivSet or mother_ID not in indivSet:\n sys.stderr.write('Family {} is incomplete.\\n'.format(family_ID))\n continue\n\n # If we only want affected probands.\n if not unaff_Flag:\n if field[5] != '2':\n continue\n # If we are only looking at unaffected probands.\n else:\n if field[5] != '1':\n continue\n\n # Family dictionary is in the form: {child_ID} = [Dad_ID, Mom_ID, Sex]\n family[indiv_ID] = (father_ID, mother_ID, sex)\n indivs[indiv_ID] = vcfIndivs.index(indiv_ID)\n indivs[father_ID] = vcfIndivs.index(father_ID)\n indivs[mother_ID] = vcfIndivs.index(mother_ID)\n\n print 'Number of families in hash table = {}.'.format(len(family))\n return family, indivs\n\n\ndef readCC(Ped_File, vcfIndivs):\n \"\"\" Read in the pedigree file for cases and controls (.ped file).\n\n Parameters\n ----------\n Ped_File: pedigree file (.ped) of the individuals in the vcf.\n columns = FamilyID, IndividualID, dadID, momID, sex, phenotype\n sex: 1=male, 2=female, 3=unknown\n phenotype: 1=unaffected, 2=affected\n\n vcfIndivs: Array of individuals in the vcf file.\n \"\"\"\n\n case = {} # case hash table: Key = ID Value = Sex\n control = {} # control hash table: Key = ID Value = Sex\n caseControl = {} # cases and controls hash table: Key = ID Value = index in vcf\n\n indivSet = Set(vcfIndivs) # convert array to Set to decrease lookup time.\n\n with open(Ped_File) as file:\n for line in file:\n field = line.strip().split('\\t')\n\n indiv_ID = field[1]\n father_ID = field[2]\n mother_ID = field[3]\n ptype = field[5] # case/control status: 1=control, 2=case\n\n if indiv_ID not in indivSet:\n sys.stderr.write('Individual {} is not in vcf.\\n'.format(indiv_ID))\n continue\n\n if field[4] == '1':\n sex = 'male'\n elif field[4] == '2':\n sex = 'female'\n else:\n sex = 'NA'\n\n if(father_ID != '0' or mother_ID != '0'):\n continue\n\n elif(ptype == '2'):\n case[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n elif(ptype == '1'):\n control[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n print 'Number of cases in hash table = {}.'.format(len(case))\n print 'Number of controls in hash table = {}.'.format(len(control))\n return case, control, caseControl\n\n\ndef getGender(gender):\n \"\"\" \"\"\"\n Pass"
},
{
"alpha_fraction": 0.6325629353523254,
"alphanum_fraction": 0.6528505682945251,
"avg_line_length": 43.25,
"blob_id": "2f46527c3f44f34d1f9a214addc7aedae01e5006",
"content_id": "26f04bda79f2b03711084a1224a9573b17350f6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 19470,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 440,
"path": "/geneset_enrichment/geneset_enrichment.R",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "#!bin/Rscript\n\n# geneset_enrichment.R\n\n## Author: Daniel P. Howrigan ([email protected])\n## Last Modified: February 2019\n\n\n## ----- DESCRIPTION:\n\n## Command line script to generate DNM gene set enrichment results\n\n\n# USAGE:\n\n# Rscript geneset_enrichment.R \\\n# [affected DNM list] \\\n# [unaffected DNM list] \\\n# [mutation expection gene list] \\\n# [gene sets file] \\\n# [output results file] \\\n# [DNM overlap details path] \\\n# [annotation type] \\\n# [header status] \\\n\n## --- argument descriptions:\n\n## affected DNM list: DNM list for affected samples from dnm_list.R containing gene symbol (column 1) and annotation (column 2)\n## unaffected DNM list: DNM list for unaffected samples from dnm_list.R containing gene symbol (column 1) and annotation (column 2)\n## mutation expection gene list: list of genes with mutation expectation for default annotations\n## gene sets file: gene set list with gene symbols (column 1) and gene set name (column 2) \n## output results file: results filename\n## DNM overlap details path: directory path for listing the overlapping genes in each gene set. Produces one file per gene set\n## annotation type: use 'default_annotation' (5 tests: all,ptv,ptv+mis,mis,syn) or 'custom_annotation' (1 aff/unaff test)\n## OPTIONS: [default_annotation] [custom_annotation]\n## header status: does the gene set list have a column header or not? \n## OPTIONS: [header] [no_header]\n\n\n# example usage:\n\n# Rscript geneset_enrichment.R \\\n# SCZ_n2772_CCDS.dnm \\\n# CON_n2216_CCDS.dnm \\\n# ../files/gencode_pct75_gene17925.tsv \\\n# ../files/candidate_genesets.tsv \\\n# SCZ_n2772_CCDS_coverageQC_candidate_genesets.tsv \\\n# overlap/SCZ_n2772_CCDS_coverageQC/ \\\n# default_annotation \\\n# header\n\n\n## ----- SCRIPT SET UP:\n\n## PART 1: read in command line arguments\n## PART 2: read in files / parse command line arguments\n## PART 3: Running enrichment with default annotations (all_types==T)\n## PART 4: Running enrichment with custom annotation (all_types==F)\n\n\n## ----- NOTES:\n\n## gene set enrichment does not test DNM rate, but DNM proportions, and thus doesn't require sample counts\n\n## the script currently restricts to genes in the mutation expection gene list, even for custom annotations and case/control comparisons\n\n## mutation expection gene list requires these four columns:\n## 1) gene - gene symbol\n## 2) p_all - all coding mutation probability\n## 2) p_syn - synonymous mutation probability\n## 2) p_mis - missense mutation probability\n## 2) p_lof - LoF/PTV/LGD mutation probability (sum of nonsense, frameshift, and essential splice probabilities)\n\n\n\n\n\n\n\n\n## PART 1: read in command line arguments\nargs <- commandArgs(TRUE)\ndnmlist <- args[1] ## observed DNMs\nunafflist <- args[2] ## observed DNMs\ngenelist <- args[3] ## gene file\nsetfile <- args[4] ## gene set directory\nout <- args[5] ## output file name\noverlap_path <- args[6] ## list output directory and name\nannot_types <- args[7] ## run primary annotation types or only single type\nsetfile_header <- args[8] ## does the geneset file have a header\n\ncat(paste('affected DNM file =',dnmlist),'\\n')\ncat(paste('unaffected DNM file =',unafflist),'\\n')\ncat(paste('mutation expection list =',genelist),'\\n')\ncat(paste('gene sets file =',setfile),'\\n')\ncat(paste('output results file =',out),'\\n')\ncat(paste('DNM overlap details path =',overlap_path),'\\n')\ncat(paste('running all annots =',annot_types),'\\n')\n\n\n## PART 2: read in files / parse command line arguments\n\n## read in files\ndnm <- read.table(dnmlist,stringsAsFactors=F)\nunaff <- read.table(unafflist,stringsAsFactors=F)\ngenes <- read.table(genelist,h=T,stringsAsFactors=F)\n\n\n## --- parsing command line input\n\n## check / create overlap directory\nsystem(paste0('mkdir -p ',overlap_path))\n\n## selecting DNM annotation types\nif (annot_types=='default_annotation') {all_types <- TRUE}\nif (annot_types=='custom_annotation') {all_types <- FALSE}\n\n## read in gene set file\nif (setfile_header=='header') { setlist <- read.table(setfile,h=T,sep='\\t',stringsAsFactors=F) }\nif (setfile_header=='no_header') { setlist <- read.table(setfile,h=F,sep='\\t',stringsAsFactors=F) }\n\n\n## get gene sets\nsetlist_name <- names(table(setlist[,2]))\n\n\n## PART 3: Running enrichment with default annotations (all_types==T)\n\nif (all_types==T) {\n\n## get DNM counts\nall <- dnm$V1\nlofmis <- subset(dnm$V1,dnm$V2=='ptv' | dnm$V2=='missense')\nlof <- subset(dnm$V1,dnm$V2=='ptv')\nmis <- subset(dnm$V1,dnm$V2=='missense')\nsyn <- subset(dnm$V1,dnm$V2=='synonymous')\n\nall_dnm <- rep(length(dnm$V1),length(setlist_name))\nlofmis_dnm <- rep(sum(dnm$V2=='ptv' | dnm$V2=='missense'),length(setlist_name))\nlof_dnm <- rep(sum(dnm$V2=='ptv'),length(setlist_name))\nmis_dnm <- rep(sum(dnm$V2=='missense'),length(setlist_name))\nsyn_dnm <- rep(sum(dnm$V2=='synonymous'),length(setlist_name))\n\n## --- Gene matching\ngenes_listed <- NA\ngenes_missed <- NA\ngenes_tested <- NA\ngenes_pct <- NA\ngenenames_missed <- NA\n\n## --- Binomal testing\nall_expected <- NA; all_actual <- NA; all_enrichment <- NA; all_pval <- NA; all_count <- NA; all_low95 <- NA; all_hi95 <- NA\nlofmis_expected <- NA; lofmis_actual <- NA; lofmis_enrichment <- NA; lofmis_pval <- NA; lofmis_count <- NA; lofmis_low95 <- NA; lofmis_hi95 <- NA\nlof_expected <- NA; lof_actual <- NA; lof_enrichment <- NA; lof_pval <- NA; lof_count <- NA; lof_low95 <- NA; lof_hi95 <- NA\nmis_expected <- NA; mis_actual <- NA; mis_enrichment <- NA; mis_pval <- NA; mis_count <- NA; mis_low95 <- NA; mis_hi95 <- NA\nsyn_expected <- NA; syn_actual <- NA; syn_enrichment <- NA; syn_pval <- NA; syn_count <- NA; syn_low95 <- NA; syn_hi95 <- NA\n\n\n## -- unaffected counts\nunaff_all <- unaff$V1\nunaff_lofmis <- subset(unaff$V1,unaff$V2=='ptv' | unaff$V2=='missense')\nunaff_lof <- subset(unaff$V1,unaff$V2=='ptv')\nunaff_mis <- subset(unaff$V1,unaff$V2=='missense')\nunaff_syn <- subset(unaff$V1,unaff$V2=='synonymous')\n\nunaff_all_dnm <- rep(length(unaff$V1),length(setlist_name))\nunaff_lofmis_dnm <- rep(sum(unaff$V2=='ptv' | unaff$V2=='missense'),length(setlist_name))\nunaff_lof_dnm <- rep(sum(unaff$V2=='ptv'),length(setlist_name))\nunaff_mis_dnm <- rep(sum(unaff$V2=='missense'),length(setlist_name))\nunaff_syn_dnm <- rep(sum(unaff$V2=='synonymous'),length(setlist_name))\n\n## --- Proportion testing\nunaff_all_count <- NA; all_prop <- NA; unaff_all_prop <- NA; all_prop_enrichment <- NA; all_prop_pval <- NA; all_prop_low95 <- NA; all_prop_hi95 <- NA\nunaff_lofmis_count <- NA; lofmis_prop <- NA; unaff_lofmis_prop <- NA; lofmis_prop_enrichment <- NA; lofmis_prop_pval <- NA; lofmis_prop_low95 <- NA; lofmis_prop_hi95 <- NA\nunaff_lof_count <- NA; lof_prop <- NA; unaff_lof_prop <- NA; lof_prop_enrichment <- NA; lof_prop_pval <- NA; lof_prop_low95 <- NA; lof_prop_hi95 <- NA\nunaff_mis_count <- NA; mis_prop <- NA; unaff_mis_prop <- NA; mis_prop_enrichment <- NA; mis_prop_pval <- NA; mis_prop_low95 <- NA; mis_prop_hi95 <- NA\nunaff_syn_count <- NA; syn_prop <- NA; unaff_syn_prop <- NA; syn_prop_enrichment <- NA; syn_prop_pval <- NA; syn_prop_low95 <- NA; syn_prop_hi95 <- NA\n\n\n## --- LooP through gene sets\nfor (i in 1:length(setlist_name)) {\n\n ll <- subset(setlist[,1],setlist[,2]==setlist_name[i])\n\n genes_listed[i] <- length(ll)\n genes_missed[i] <- sum(!(ll %in% genes$gene))\n genes_tested[i] <- sum(ll %in% genes$gene)\n genes_pct[i] <- sum(ll %in% genes$gene) / length(ll) \n ll2 <- ll[!(ll %in% genes$gene)]\n genenames_missed[i] <- paste(ll2,collapse=';')\n\n ## get mutation expectation\n gene2 <- subset(genes,genes$gene %in% ll) \n all_expected[i] <- sum(gene2$p_all) / sum(genes$p_all)\n lofmis_expected[i] <- (sum(gene2$p_lof)+sum(gene2$p_mis)) / (sum(genes$p_lof) + sum(genes$p_mis))\n lof_expected[i] <- sum(gene2$p_lof) / sum(genes$p_lof)\n mis_expected[i] <- sum(gene2$p_mis) / sum(genes$p_mis)\n syn_expected[i] <- sum(gene2$p_syn) / sum(genes$p_syn)\n\n ## get DNM count\n all_count[i] <- sum(all %in% gene2$gene)\n lofmis_count[i] <- sum(lofmis %in% gene2$gene)\n lof_count[i] <- sum(lof %in% gene2$gene)\n mis_count[i] <- sum(mis %in% gene2$gene)\n syn_count[i] <- sum(syn %in% gene2$gene)\n\n ## Run binomal test\n if (all_dnm[i] == 0) { all_pval[i] <- NA; all_low95[i] <- NA; all_hi95[i] <- NA; all_enrichment[i] <- NA }\n if (all_dnm[i] > 0) { \n all_binom <- binom.test(x=all_count[i],n=all_dnm[i],p=all_expected[i],alternative =\"two.sided\")\n all_pval[i] <- all_binom$p.value ; all_actual[i] <- all_binom$estimate \n all_low95[i] <- all_binom$conf.int[1] ; all_hi95[i] <- all_binom$conf.int[2] \n all_enrichment[i] <- all_actual[i] / all_expected[i]\n }\n\n if (lofmis_dnm[i] == 0) { lofmis_pval[i] <- NA; lofmis_low95[i] <- NA; lofmis_hi95[i] <- NA; lofmis_enrichment[i] <- NA }\n if (lofmis_dnm[i] > 0) { \n lofmis_binom <- binom.test(x=lofmis_count[i],n=lofmis_dnm[i],p=lofmis_expected[i],alternative =\"two.sided\")\n lofmis_pval[i] <- lofmis_binom$p.value ; lofmis_actual[i] <- lofmis_binom$estimate \n lofmis_low95[i] <- lofmis_binom$conf.int[1] ; lofmis_hi95[i] <- lofmis_binom$conf.int[2] \n lofmis_enrichment[i] <- lofmis_actual[i] / lofmis_expected[i]\n }\n\n if (lof_dnm[i] == 0) { lof_pval[i] <- NA; lof_low95[i] <- NA; lof_hi95[i] <- NA; lof_enrichment[i] <- NA }\n if (lof_dnm[i] > 0) { \n lof_binom <- binom.test(x=lof_count[i],n=lof_dnm[i],p=lof_expected[i],alternative =\"two.sided\")\n lof_pval[i] <- lof_binom$p.value ; lof_actual[i] <- lof_binom$estimate \n lof_low95[i] <- lof_binom$conf.int[1] ; lof_hi95[i] <- lof_binom$conf.int[2] \n lof_enrichment[i] <- lof_actual[i] / lof_expected[i]\n }\n\n if (mis_dnm[i] == 0) { mis_pval[i] <- NA; mis_low95[i] <- NA; mis_hi95[i] <- NA; mis_enrichment[i] <- NA }\n if (mis_dnm[i] > 0) { \n mis_binom <- binom.test(x=mis_count[i],n=mis_dnm[i],p=mis_expected[i],alternative =\"two.sided\")\n mis_pval[i] <- mis_binom$p.value ; mis_actual[i] <- mis_binom$estimate \n mis_low95[i] <- mis_binom$conf.int[1] ; mis_hi95[i] <- mis_binom$conf.int[2] \n mis_enrichment[i] <- mis_actual[i] / mis_expected[i]\n }\n\n if (syn_dnm[i] == 0) { syn_pval[i] <- NA; syn_low95[i] <- NA; syn_hi95[i] <- NA; syn_enrichment[i] <- NA }\n if (syn_dnm[i] > 0) { \n syn_binom <- binom.test(x=syn_count[i],n=syn_dnm[i],p=syn_expected[i],alternative =\"two.sided\")\n syn_pval[i] <- syn_binom$p.value ; syn_actual[i] <- syn_binom$estimate \n syn_low95[i] <- syn_binom$conf.int[1] ; syn_hi95[i] <- syn_binom$conf.int[2] \n syn_enrichment[i] <- syn_actual[i] / syn_expected[i]\n }\n\n\n ## get unaffected DNM count\n unaff_all_count[i] <- sum(unaff_all %in% gene2$gene)\n unaff_lofmis_count[i] <- sum(unaff_lofmis %in% gene2$gene)\n unaff_lof_count[i] <- sum(unaff_lof %in% gene2$gene)\n unaff_mis_count[i] <- sum(unaff_mis %in% gene2$gene)\n unaff_syn_count[i] <- sum(unaff_syn %in% gene2$gene)\n\n ## Run proportion test\n if (all_dnm[i] == 0 | unaff_all_dnm[i] == 0) { all_prop[i] <- NA; all_prop_pval[i] <- NA; all_prop_low95[i] <- NA; all_prop_hi95[i] <- NA; all_prop_enrichment[i] <- NA }\n if (all_dnm[i] > 0 & unaff_all_dnm[i] > 0) { \n all_proptest <- suppressWarnings(prop.test(c(all_count[i],unaff_all_count[i]),c(all_dnm[i],unaff_all_dnm[i]),alternative =\"two.sided\",correct=F))\n all_prop[i] <- all_proptest$estimate[1]; unaff_all_prop[i] <- all_proptest$estimate[2]\n all_prop_pval[i] <- all_proptest$p.value \n all_prop_low95[i] <- all_proptest$conf.int[1] ; all_prop_hi95[i] <- all_proptest$conf.int[2] \n all_prop_enrichment[i] <- all_prop[i] / unaff_all_prop[i]\n }\n\n if (lofmis_dnm[i] == 0 | unaff_lofmis_dnm[i] == 0) { lofmis_prop[i] <- NA; lofmis_prop_pval[i] <- NA; lofmis_prop_low95[i] <- NA; lofmis_prop_hi95[i] <- NA; lofmis_prop_enrichment[i] <- NA }\n if (lofmis_dnm[i] > 0 & unaff_lofmis_dnm[i] > 0) { \n lofmis_proptest <- suppressWarnings(prop.test(c(lofmis_count[i],unaff_lofmis_count[i]),c(lofmis_dnm[i],unaff_lofmis_dnm[i]),alternative =\"two.sided\",correct=F))\n lofmis_prop[i] <- lofmis_proptest$estimate[1]; unaff_lofmis_prop[i] <- lofmis_proptest$estimate[2]\n lofmis_prop_pval[i] <- lofmis_proptest$p.value \n lofmis_prop_low95[i] <- lofmis_proptest$conf.int[1] ; lofmis_prop_hi95[i] <- lofmis_proptest$conf.int[2] \n lofmis_prop_enrichment[i] <- lofmis_prop[i] / unaff_lofmis_prop[i]\n }\n\n if (lof_dnm[i] == 0 | unaff_lof_dnm[i] == 0) { lof_prop[i] <- NA; lof_prop_pval[i] <- NA; lof_prop_low95[i] <- NA; lof_prop_hi95[i] <- NA; lof_prop_enrichment[i] <- NA }\n if (lof_dnm[i] > 0 & unaff_lof_dnm[i] > 0) { \n lof_proptest <- suppressWarnings(prop.test(c(lof_count[i],unaff_lof_count[i]),c(lof_dnm[i],unaff_lof_dnm[i]),alternative =\"two.sided\",correct=F))\n lof_prop[i] <- lof_proptest$estimate[1]; unaff_lof_prop[i] <- lof_proptest$estimate[2]\n lof_prop_pval[i] <- lof_proptest$p.value \n lof_prop_low95[i] <- lof_proptest$conf.int[1] ; lof_prop_hi95[i] <- lof_proptest$conf.int[2] \n lof_prop_enrichment[i] <- lof_prop[i] / unaff_lof_prop[i]\n }\n\n if (mis_dnm[i] == 0 | unaff_mis_dnm[i] == 0) { mis_prop[i] <- NA; mis_prop_pval[i] <- NA; mis_prop_low95[i] <- NA; mis_prop_hi95[i] <- NA; mis_prop_enrichment[i] <- NA }\n if (mis_dnm[i] > 0 & unaff_mis_dnm[i] > 0) { \n mis_proptest <- suppressWarnings(prop.test(c(mis_count[i],unaff_mis_count[i]),c(mis_dnm[i],unaff_mis_dnm[i]),alternative =\"two.sided\",correct=F))\n mis_prop[i] <- mis_proptest$estimate[1]; unaff_mis_prop[i] <- mis_proptest$estimate[2]\n mis_prop_pval[i] <- mis_proptest$p.value \n mis_prop_low95[i] <- mis_proptest$conf.int[1] ; mis_prop_hi95[i] <- mis_proptest$conf.int[2] \n mis_prop_enrichment[i] <- mis_prop[i] / unaff_mis_prop[i]\n }\n\n if (syn_dnm[i] == 0 | unaff_syn_dnm[i] == 0) { syn_prop[i] <- NA; syn_prop_pval[i] <- NA; syn_prop_low95[i] <- NA; syn_prop_hi95[i] <- NA; syn_prop_enrichment[i] <- NA }\n if (syn_dnm[i] > 0 & unaff_syn_dnm[i] > 0) { \n syn_proptest <- suppressWarnings(prop.test(c(syn_count[i],unaff_syn_count[i]),c(syn_dnm[i],unaff_syn_dnm[i]),alternative =\"two.sided\",correct=F))\n syn_prop[i] <- syn_proptest$estimate[1]; unaff_syn_prop[i] <- syn_proptest$estimate[2]\n syn_prop_pval[i] <- syn_proptest$p.value \n syn_prop_low95[i] <- syn_proptest$conf.int[1] ; syn_prop_hi95[i] <- syn_proptest$conf.int[2] \n syn_prop_enrichment[i] <- syn_prop[i] / unaff_syn_prop[i]\n }\n\n ## get DNM list\n lof_list <- lof[lof %in% gene2$gene]\n mis_list <- mis[mis %in% gene2$gene]\n syn_list <- syn[syn %in% gene2$gene]\n\n ## combine and write to file\n full_list <- rbind(c('PTV overlap = ',paste(lof_list,collapse=';')),\n \t c('Missense overlap = ',paste(mis_list,collapse=';')),\n \t c('Synonymous overlap = ',paste(syn_list,collapse=';')))\n\n ## write to list \n write.table(full_list,paste0(overlap_path,setlist_name[i],'.overlap'),col=F,row=F,quo=F,sep='\\t')\n\n print(i) \n\n} ## END of i LooP\n\n\n## combine results\nset_data <- cbind.data.frame(setlist_name,genes_listed,genes_missed,genes_tested,genes_pct,\n\t all_dnm,unaff_all_dnm,lofmis_dnm,unaff_lofmis_dnm,lof_dnm,unaff_lof_dnm,mis_dnm,unaff_mis_dnm,syn_dnm,unaff_syn_dnm,\n\t all_count,unaff_all_count,lofmis_count,unaff_lofmis_count,lof_count,unaff_lof_count,mis_count,unaff_mis_count,syn_count,unaff_syn_count,\n all_expected,all_actual,all_enrichment,all_pval,all_low95,all_hi95,\n all_prop,unaff_all_prop,all_prop_enrichment,all_prop_pval,all_prop_low95,all_prop_hi95,\n lofmis_expected,lofmis_actual,lofmis_enrichment,lofmis_pval,lofmis_low95,lofmis_hi95,\n lofmis_prop,unaff_lofmis_prop,lofmis_prop_enrichment,lofmis_prop_pval,lofmis_prop_low95,lofmis_prop_hi95,\n lof_expected,lof_actual,lof_enrichment,lof_pval,lof_low95,lof_hi95,\n lof_prop,unaff_lof_prop,lof_prop_enrichment,lof_prop_pval,lof_prop_low95,lof_prop_hi95,\n mis_expected,mis_actual,mis_enrichment,mis_pval,mis_low95,mis_hi95,\n mis_prop,unaff_mis_prop,mis_prop_enrichment,mis_prop_pval,mis_prop_low95,mis_prop_hi95,\n syn_expected,syn_actual,syn_enrichment,syn_pval,syn_low95,syn_hi95,\n syn_prop,unaff_syn_prop,syn_prop_enrichment,syn_prop_pval,syn_prop_low95,syn_prop_hi95,\n genenames_missed)\n\n## write to file\nwrite.table(set_data,out,col=T,row=F,quo=F,sep='\\t')\n\n\n} ## END of all_types==T\n\n\n\n\n## PART 4: Running enrichment with custom annotation (all_types==F)\n\nif (all_types==F) {\n\n## get DNM counts\nall <- dnm$V1\nall_dnm <- rep(length(dnm$V1),length(setlist_name))\n\n## --- Gene matching\ngenes_listed <- NA\ngenes_missed <- NA\ngenes_tested <- NA\ngenes_pct <- NA\ngenenames_missed <- NA\n\n## --- Binomal testing\nall_expected <- NA; all_actual <- NA; all_enrichment <- NA; all_pval <- NA; all_count <- NA; all_low95 <- NA; all_hi95 <- NA\n\n\n## -- unaffected counts\nunaff_all <- unaff$V1\nunaff_all_dnm <- rep(length(unaff$V1),length(setlist_name))\n\n## --- Proportion testing\nunaff_all_count <- NA; all_prop <- NA; unaff_all_prop <- NA; all_prop_enrichment <- NA; all_prop_pval <- NA; all_prop_low95 <- NA; all_prop_hi95 <- NA\n\n\n## --- LooP through gene sets\nfor (i in 1:length(setlist_name)) {\n\n ll <- subset(setlist[,1],setlist[,2]==setlist_name[i])\n\n genes_listed[i] <- length(ll)\n genes_missed[i] <- sum(!(ll %in% genes$gene))\n genes_tested[i] <- sum(ll %in% genes$gene)\n genes_pct[i] <- sum(ll %in% genes$gene) / length(ll) \n ll2 <- ll[!(ll %in% genes$gene)]\n genenames_missed[i] <- paste(ll2,collapse=';')\n\n ## get mutation expectation\n gene2 <- subset(genes,genes$gene %in% ll) \n all_expected[i] <- sum(gene2$p_all) / sum(genes$p_all)\n\n ## get DNM count\n all_count[i] <- sum(all %in% gene2$gene)\n\n ## Run binomal test\n if (all_dnm[i] == 0) { all_pval[i] <- NA; all_low95[i] <- NA; all_hi95[i] <- NA; all_enrichment[i] <- NA }\n if (all_dnm[i] > 0) { \n all_binom <- binom.test(x=all_count[i],n=all_dnm[i],p=all_expected[i],alternative =\"two.sided\")\n all_pval[i] <- all_binom$p.value ; all_actual[i] <- all_binom$estimate \n all_low95[i] <- all_binom$conf.int[1] ; all_hi95[i] <- all_binom$conf.int[2] \n all_enrichment[i] <- all_actual[i] / all_expected[i]\n }\n\n\n ## get unaffected DNM count\n unaff_all_count[i] <- sum(unaff_all %in% gene2$gene)\n\n ## Run proportion test\n if (all_dnm[i] == 0 | unaff_all_dnm[i] == 0) { all_prop[i] <- NA; all_prop_pval[i] <- NA; all_prop_low95[i] <- NA; all_prop_hi95[i] <- NA; all_prop_enrichment[i] <- NA }\n if (all_dnm[i] > 0 & unaff_all_dnm[i] > 0) { \n all_proptest <- suppressWarnings(prop.test(c(all_count[i],unaff_all_count[i]),c(all_dnm[i],unaff_all_dnm[i]),alternative =\"two.sided\",correct=F))\n all_prop[i] <- all_proptest$estimate[1]; unaff_all_prop[i] <- all_proptest$estimate[2]\n all_prop_pval[i] <- all_proptest$p.value \n all_prop_low95[i] <- all_proptest$conf.int[1] ; all_prop_hi95[i] <- all_proptest$conf.int[2] \n all_prop_enrichment[i] <- all_prop[i] / unaff_all_prop[i]\n }\n\n ## get DNM list\n overlap_list <- all[all %in% gene2$gene]\n\n # write to list \n write.table(overlap_list,paste0(overlap_path,setlist_name[i],'.overlap'),col=F,row=F,quo=F,sep='\\t')\n\n print(i) \n\n} ## END of i LooP\n\n\n## combine results\nset_data <- cbind.data.frame(setlist_name,genes_listed,genes_missed,genes_tested,genes_pct,\n\t all_dnm,unaff_all_dnm,all_count,unaff_all_count,\n all_expected,all_actual,all_enrichment,all_pval,all_low95,all_hi95,\n all_prop,unaff_all_prop,all_prop_enrichment,all_prop_pval,all_prop_low95,all_prop_hi95,\n genenames_missed)\n\n## write to file\nwrite.table(set_data,out,col=T,row=F,quo=F,sep='\\t')\n\n\n} ## END of all_types==F\n\n\n\n# END of geneset_enrichment.R\n"
},
{
"alpha_fraction": 0.758051872253418,
"alphanum_fraction": 0.7847604155540466,
"avg_line_length": 47.96154022216797,
"blob_id": "d4064b53437a987d9d79c6425e9eab1074a4a72e",
"content_id": "796f0847180655ad1ec8462b268a216f2aaeb9fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1273,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 26,
"path": "/files/README.md",
"repo_name": "weizhousjtu/trio_sequence_analysis",
"src_encoding": "UTF-8",
"text": "# Overview\nThe files are used to run DNM exome-wide burden, gene set, and single gene enrichment analyses\n\n# Files\nDNM_studies.tsv\n * list of cohorts, disease designation, and number of trios reported\n\ncombined_cohorts_DNM_list.tsv\n * full list of QC-passing (or published) DNM calls from the trios listed in DNM_studies.tsv\n * ALl DNMs aligned to hg19 (GRCh37) reference\n * contains annotations used in subsequent analyses\n\ngencode_pct75_gene17925.tsv\n * Per-gene mutation model probabilities\n * Probabilities split by primary annotation (PTV, missense, synonymous)\n * Using mutation model probabilities from Gencode v19 transcripts (See supplementary section 8 - Mutation rate model testing)\n * Restricted to 17925 'well-covered' genes in the SCZ Taiwanese trio cohort (at least 75% of capture target in gene meets 10x coverage)\n * Probabilities additionally adjusted to coverage and proportion of females in full trio set\n\ngencode_pct75_gene17925_haploid.tsv\n * Similar file to above but using per-chromosome probabilities (as opposed to per-proband probabilities)\n * For autosomal genes = probability * 2\n * For X-linked genes = female probands (prob. * 2) / male probands (prob.)\n\ncandidate_genesets.tsv\n * HGNC gene symbols for 85 gene sets used in the main analysis\n"
}
] | 20 |
Sohamraje137/Python-Learning
|
https://github.com/Sohamraje137/Python-Learning
|
a644f5f6b1501990c5d826b9f74485d621050fe4
|
17987632fe46a74a12e863616d22df32cfff1435
|
c11e258575bf259b87e74407f437f9c7a8b75f98
|
refs/heads/master
| 2020-03-17T00:31:43.764726 | 2018-07-03T14:56:19 | 2018-07-03T14:56:19 | 133,120,138 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5507692098617554,
"alphanum_fraction": 0.618461549282074,
"avg_line_length": 12.583333015441895,
"blob_id": "d18afb5ecb9882790060fa8ac93edc1540599bfc",
"content_id": "e747a61a63d3655b19ba216fcb0418ccea5aa10e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 24,
"path": "/Python_codes/ex06.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "from functools import lru_cache\n\n@lru_cache(maxsize=1000)\ndef fib(n):\n\tif type(n)!= int :\n\t\traise TypeError(\"n must be positive integer \")\n\n\n\n\tif n==1 :\n\t\treturn 1\n\telif n==2 :\n\t\treturn 1\n\telif n > 2:\n\t\treturn fib(n-1)+fib(n-2)\n\n\n\nfor n in range(1,1001):\n\tprint(n,\":\",fib(n))\n\n\nfor n in range(1,1001):\n\tprint(fib(n+1)/fib(n))"
},
{
"alpha_fraction": 0.5887850522994995,
"alphanum_fraction": 0.6214953064918518,
"avg_line_length": 14.357142448425293,
"blob_id": "9d1a59167e53da032a25672c070f6a649d7ba9ff",
"content_id": "51c5726c34537ac2767fad8164996054f3d09c95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 14,
"path": "/Python_codes/ex03.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "# inpu1=input(\"Please enter a test string\\n\")\n\n# if len(inpu1) <3:\n# \tprint(\"\\nShort\")\n# else:\n# \tprint(\"Okay\")\n\nnum=input(\"Please enter a number\");\nnum1=int(num)\n\nif num1%2 == 0:\n\tprint(\"Even\")\nelse:\n\tprint(\"Odd\")"
},
{
"alpha_fraction": 0.680672287940979,
"alphanum_fraction": 0.7121848464012146,
"avg_line_length": 15.448275566101074,
"blob_id": "73e79412d1c622c57bd8f4325b28598f20d37dac",
"content_id": "c8bf5a07c45e7765ee0a23cac4ba2d7b3d129f20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 476,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 29,
"path": "/Python_codes/ex14.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "earth=[\"Bhaiya\",\"Soham\",\"Baba\",\"Aayee\",\"Mama\"]\nearth.sort()\n\nprint(earth)\nearth.sort(reverse=True)\nprint(earth)\n\nearth1=(\"Bhaiya\",\"Soham\",\"Baba\",\"Aayee\",\"Mama\")\n# earth1.sort()\n\n# print(earth1)\n# earth1.sort(reverse=True)\n# print(earth1)\n\nearth2=[\"Bhaiya\",\"Soham\",\"Baba\",\"Aayee\",\"Mama\"]\nearth=sorted(earth2)\n\nprint(earth)\nprint(earth2)\n\nearth=sorted(earth2,reverse=True)\nprint(earth)\n\nearth3=sorted(earth1)\nprint(earth3)\n\nearth4=sorted(earth)\nprint(earth4)\nprint(type(earth4))"
},
{
"alpha_fraction": 0.5404255390167236,
"alphanum_fraction": 0.5914893746376038,
"avg_line_length": 12.11111068725586,
"blob_id": "0d49f83d03eb7ee53f0fe27ca7b1daa285dd3781",
"content_id": "05aaddae56bfd738e424884620ddf1eb5363699c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 18,
"path": "/Python_codes/ex05.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "fib_cache={}\n\ndef fib(n):\n\tif n in fib_cache:\n\t\treturn fib_cache[n]\n\tif n==1 :\n\t\treturn 1\n\telif n==2 :\n\t\treturn 1\n\telif n > 2:\n\t\tvalue= fib(n-1)+fib(n-2)\n\n\tfib_cache[n]=value\n\treturn value\n\n\nfor n in range(1,1001):\n\tprint(n,\":\",fib(n))"
},
{
"alpha_fraction": 0.5908183455467224,
"alphanum_fraction": 0.6806387305259705,
"avg_line_length": 10.952381134033203,
"blob_id": "a9acedcfbc3864292b7e18f2b2d4de617a1009ce",
"content_id": "d4a5b561102df7ce87e1edeea3c9007a2da398f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 42,
"path": "/Python_codes/ex13.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "from functools import reduce\n\ndata=[2,3,5,7,11,13,17,19,23,29]\ndata1=[0,2,3,5,7,11,13,17,19,23,29]\n\nmultiplier= lambda x,y: x*y\n\n\nprint(reduce(multiplier,data))\n\nprod=1\nfor i in data:\n\tprod=prod*i\n\n\nprint(prod)\n\n\nimport statistics\n\navg=statistics.mean(data)\nprint(avg)\n\nfilter(lambda x:x>avg, data)\n\nprint(filter)\n\n\nprint(list(filter(lambda x: x>avg, data)))\n\n\n\nprint(list(filter(None,data1)))\n\n#none,\"\",0.0,0j,{},(),[],false\n\nimport math\n\ndef area(r):\n\treturn r*r*3.1428\n\nprint(list(map(area,data1)))"
},
{
"alpha_fraction": 0.522088348865509,
"alphanum_fraction": 0.586345374584198,
"avg_line_length": 16.785715103149414,
"blob_id": "df89ee4b037ac7c69afa1292fcd63bf96c76f654",
"content_id": "7024a14648ab2c7dba96411bad9e28d544c69fe7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 14,
"path": "/Python_codes/ex09.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "import random\n\ndef randwalk(n):\n\tx,y=0,0\n\tfor i in range(n):\n\t\t(dx,dy)=random.choice([(0,1),(0,-1),(1,0),(-1,0)])\n\t\tx+=dx\n\t\ty+=dy\n\treturn (x,y)\n\n\nfor i in range(25):\n\twalk=randwalk(10)\n\tprint(walk,\"Distance from home = \", abs(walk[0])+abs(walk[1]))\n"
},
{
"alpha_fraction": 0.7317708134651184,
"alphanum_fraction": 0.7317708134651184,
"avg_line_length": 75.80000305175781,
"blob_id": "20cabbab68db766afb294a576f535d73d8ffa4b3",
"content_id": "6130e4dd1c2fbcae4f16758aafad7c489277e571",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 5,
"path": "/Python_codes/ex02.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "m=\"\"\"hello there this is my first attempt to learn python but i want to get master into this very soon \"beacuse pyhthon is important for django\"\\n 'else i would stick with java'\n and now i got to know how to use \"\"\"\nprint(m);\n//You can also use single codes or else double codes but ifyou have a sentence like this -\n//'I'm very keen in typing fast ' ->'This will produce an error\n"
},
{
"alpha_fraction": 0.45123061537742615,
"alphanum_fraction": 0.4855667054653168,
"avg_line_length": 27.128204345703125,
"blob_id": "13ef12f9588db8c4507f5cbbae05dfc05d806e13",
"content_id": "a023532e0d4254de417381dc970c3b7d81440371",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3291,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 117,
"path": "/Python_codes/snake_game.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "import random\nimport curses\n\ns = curses.initscr()\ncurses.start_color()\n\ncurses.curs_set(0)\nsh, sw = s.getmaxyx()\nw = curses.newwin(sh, sw, 0, 0)\nw.keypad(1)\nw.timeout(100)\n#w.box(100,100)\nw.border(\"|\",\"|\",\"-\",\"-\")\n\nscore=0\n#w.bgcolor(\"white\")\nsnk_x = sw//4\nsnk_y = sh//2\nsnake = [\n [snk_y, snk_x],\n [snk_y, snk_x-1],\n [snk_y, snk_x-2]\n]\ncurses.init_color(0, 224, 224, 224)\nfood = [sh//2, sw//2]\nw.addch(food[0], food[1], curses.ACS_PI)\n\nkey = curses.KEY_RIGHT\nw.addch(snake[0][0], snake[0][1], curses.ACS_BLOCK)\n# s.getcsh()\nprint(sh)\nprint(sw)\n\nwhile True:\n w.border(0)\n w.addstr(0, 2, 'Score : ' + str(score) + ' ') # Printing 'Score' and\n w.addstr(0, 27, ' SNAKE GAME ') # 'SNAKE' strings\n w.addstr(0, 50, 'Soham ') \n w.timeout(50 - (len(snake)//3)%120) # Increases the speed of Snake as its length increases\n \n next_key = w.getch()\n if next_key == ord(' '): # If SPACE BAR is pressed, wait for another\n next_key = -1 # one (Pause/Resume)\n while next_key != ord(' '):\n next_key = w.getch()\n next_key = key\n continue\n if next_key not in [curses.KEY_LEFT, curses.KEY_RIGHT,curses.KEY_UP, curses.KEY_DOWN]: # If an invalid key is pressed\n next_key = key\n if key == curses.KEY_DOWN and next_key==curses.KEY_UP :\n curses.beep()\n key=key\n elif key == curses.KEY_UP and next_key==curses.KEY_DOWN :\n curses.beep()\n key=key\n elif key == curses.KEY_RIGHT and next_key==curses.KEY_LEFT :\n curses.beep()\n key=key\n elif key == curses.KEY_LEFT and next_key==curses.KEY_RIGHT :\n curses.beep()\n key=key\n elif (next_key == -1 or next_key==key) :\n key = key \n else :\n key= next_key\n\n if snake[0] in snake[1:]:\n curses.beep()\n curses.endwin()\n break\n else:\n new_head = [snake[0][0], snake[0][1]]\n\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n\n snake.insert(0, new_head)\n\n if snake[0][0] == 0: snake[0][0] = sh-2\n if snake[0][1] == 0: snake[0][1] = sw-2\n if snake[0][0] == sh-1: snake[0][0] = 1\n if snake[0][1] == sw-1: snake[0][1] = 1\n\n if snake[0] == food:\n curses.flash()\n food = None\n score+=1;\n while food is None:\n nf = [\n random.randint(1, sh-2),\n random.randint(1, sw-2)\n ]\n food = nf if nf not in snake else None\n w.addch(food[0], food[1], curses.ACS_PI)\n true =1 \n else:\n tail = snake.pop()\n w.addch(tail[0], tail[1], ' ')\n \n w.addch(snake[0][0], snake[0][1], curses.ACS_BLOCK)\n if score%10==0 and score!=0 and true==1:\n curses.flash()\n curses.beep()\n curses.beep()\n true=0\n \n\n# s.refresh()\n# print(sh)\n# print(sw)\nprint(\"Your score is :\" +str(score))\n"
},
{
"alpha_fraction": 0.764102578163147,
"alphanum_fraction": 0.7743589878082275,
"avg_line_length": 26.85714340209961,
"blob_id": "ca99bec197f728826e5586ab14b18ec61ea9d142",
"content_id": "fa7f3816cc8c654eb268de79f7b9dc854e605ff7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 7,
"path": "/Django/mysite/webapp/views.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom django.http import HttpResponse\n\ndef index(request):\n\treturn HttpResponse(\"<h2>Hey there, Soham here with his first app</h2>\")\n# Create your views here.\n"
},
{
"alpha_fraction": 0.5864332318305969,
"alphanum_fraction": 0.6389496922492981,
"avg_line_length": 19.81818199157715,
"blob_id": "74490e692134c8aab4316aff9970f2b54b6f5c8d",
"content_id": "225759b12f9bb28f1203cd3f734c4ce35a1dfc25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 457,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 22,
"path": "/Python_codes/ex10.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "import random\n\ndef randwalk(n):\n\tx,y=0,0\n\tfor i in range(n):\n\t\t(dx,dy)=random.choice([(0,1),(0,-1),(1,0),(-1,0)])\n\t\tx+=dx\n\t\ty+=dy\n\treturn (x,y)\n\nnumber=70000\n\nfor walk_length in range(1,31):\n\tno_transport=0\n\tfor i in range(number):\n\t\t(x,y)=randwalk(walk_length)\n\t\tdistance= abs(x)+abs(y)\n\t\tif distance<=4:\n\t\t\tno_transport+=1\n\tno_transport_percent= 100*(no_transport /number)\n\n\tprint(\"Walk size = \", walk_length, \"/% of no transport = \",no_transport_percent)"
},
{
"alpha_fraction": 0.5797872543334961,
"alphanum_fraction": 0.6382978558540344,
"avg_line_length": 13.384614944458008,
"blob_id": "04f27c7ecf3a2ec635aa9567e73775ba85dceee6",
"content_id": "02aff8a4c33a0cc8fecc1afdc4330b2f096f64b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 13,
"path": "/Python_codes/ex12.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "\nimport json\nvalue=\"\"\" \n{\n\t\"title\": \"Tron:Legacy\",\n\t\"compser\":\"Soham\",\n\t\"release_year\":2018,\n\t\"budget\":1700000,\n\t\"actors\":null,\n\t\"won_oscar\":false\n}\"\"\"\n\ntron=json.loads(value)\nprint(tron) "
},
{
"alpha_fraction": 0.7199559211730957,
"alphanum_fraction": 0.7519294619560242,
"avg_line_length": 19.636363983154297,
"blob_id": "0c32c886e9613bab43b595166a1459ea1fb5e079",
"content_id": "735da7d2bdf8eb5654534399a891c81c8a21d904",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 909,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 44,
"path": "/Python_codes/ex04.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "print(\"fera\")\nv=\tarea()\nprint v\n\ndef area(b,h):\n\ta=1/2*b*h\n\tprint(a)\n\n\t#pandas \n\n\t\nMachine Learning SMP\nNumpy Assignment Questions\nAssignment I\n1.\n2.\n3.\n4.\n5.\nExtract the integer part of a random array using 5 different\nmethods\n6.\nWrite a Python program to check two random arrays are equal or not.\n7.\n\nWrite a Python program to find the nearest value from a given value in an\narray.\n8.\nWrite a Python program to get the n largest values of an array.\n9.\nHow to find the closest value (to a given scalar) in an array?\n10.\nWrite a Python program to create random vector of size 15 and replace\nthe maximum value by -1.\n11.\nConsider a random vector with shape (100,2) representing\ncoordinates, find point by point distances\n12.\nHow to I sort an array by the nth column?\n13.\nConsidering a four dimensions array, how to get sum over the\nlast two axis at once?\n14.\nHow to find the most frequent value in an array?"
},
{
"alpha_fraction": 0.702786386013031,
"alphanum_fraction": 0.7523219585418701,
"avg_line_length": 15.199999809265137,
"blob_id": "b4027b3e8f449569ec0671ebfbc066904dc0130a",
"content_id": "a6c739bc4d21e60f5c2cdbd1b7ec07e7e8f00883",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 323,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 20,
"path": "/Python_codes/dinasur.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "import pyautogui\nimport time\nimport pyscreenshot as ImageGrab\n\nclass Coordinates():\n\treplaybtn=(300,340)\n\tdinasur=(171,395)\n\ndef restart():\n\tpyautogui.click(Coordinates.replaybtn)\n\ndef pressSpace():\n\tpyautogui.keyDown('space')\n\ttime.sleep(0.05)\n\tprint(\"jump\")\n\tpyautogui.keyUp('space')\n\nrestart()\ntime.sleep(1)\npressSpace()"
},
{
"alpha_fraction": 0.5397260189056396,
"alphanum_fraction": 0.5726027488708496,
"avg_line_length": 16.428571701049805,
"blob_id": "fd72521662aede476930f53a54618cfb43d40657",
"content_id": "fa40b68464047b1f66ba21ac2198daf34e69ed78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 21,
"path": "/Python_codes/ex08.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "import random\ndef randwalk(n):\n\t\"\"\"Return coordinate after n random walk\"\"\"\n\tx=0;\n\ty=0;\n\n\tfor i in range(n):\n\t\tstep = random.choice(['N','S','E','W'])\n\t\tif step== 'N':\n\t\t\ty=y+1\n\t\telif step=='S':\n\t\t\ty=y-1\n\t\telif step=='E':\n\t\t\tx=x+1\n\t\telse:\n\t\t\tx=x-1\n\treturn (x,y)\n\nfor i in range(25):\n\twalk = randwalk(10)\n\tprint(walk,\"Distance from home= \",abs(walk[0])+abs(walk[1]))"
},
{
"alpha_fraction": 0.6537421345710754,
"alphanum_fraction": 0.6970243453979492,
"avg_line_length": 16.619047164916992,
"blob_id": "9fc5254088abe5bb8898bf37826e4c87de2ac9f0",
"content_id": "b1962fb2cc3ef699f40d978534e0b15879d0bdc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1109,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 63,
"path": "/Python_codes/ex11.py",
"repo_name": "Sohamraje137/Python-Learning",
"src_encoding": "UTF-8",
"text": "import datetime\n\nclass User:\n pass\n\nuser1=User()\n\nuser1.first_name=\"Soham\"\nuser1.last_name=\"Patil\"\n\nprint(user1.first_name)\nprint(user1.last_name)\n\nfirst_name=\"Aayee\"\nlast_name=\"Baba\"\n\n\nprint(first_name)\nprint(last_name)\n\nuser2=User()\n\n\nuser2.first_name=\"Soham\"\nuser2.last_name=\"Patil\"\n\nprint(user2.first_name)\nprint(user2.last_name)\n\nuser1.age=19\nuser2.fav=\"Chapati\"\n\nprint(user2.fav,user1.age)\n\nclass New_user:\n\tdef __init__(self,full_name,birthday):\n\t self.name=full_name\n\t self.birthday = birthday\n\n\t name_pecies=full_name.split(\" \")\n\t self.first_name= name_pecies[0]\n\t self.last_name=name_pecies[1]\n\n\tdef age(self):\n\t\t\"\"\"Returns the age of the user\"\"\"\n\t\ttoday=datetime.date(2018,5,12)\n\t\tyyyy=int(self.birthday[0:4])\n\t\tmm=int(self.birthday[4:6])\n\t\tdd=int(self.birthday[6:8])\n\t\tdob=datetime.date(yyyy,mm,dd)\n\t\tage_in_days=(today-dob).days\n\t\tage_in_years=age_in_days/365\n\t\treturn (age_in_days,age_in_years)\n\t # name_peecies=full_name.split(\" \")\nuser3= New_user(\"Soham Patil\", \"19980713\")\nprint(user3.name)\nprint(user3.birthday)\n\n\nprint(user3.first_name)\nprint(user3.last_name)\n\nprint(user3.age())"
}
] | 15 |
GooseYArd/brocade_stress
|
https://github.com/GooseYArd/brocade_stress
|
4c822d837021d41e046f70c4ca6804339144fdfb
|
8f759d45d08e25644c53fd38756b3a6a3a5f5047
|
6b7afd936eed4a72398054640f85aab7d5684c44
|
refs/heads/master
| 2015-08-13T05:18:34.968013 | 2014-08-27T20:17:03 | 2014-08-27T20:17:03 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7463414669036865,
"alphanum_fraction": 0.8585366010665894,
"avg_line_length": 67.33333587646484,
"blob_id": "66e240024846d66e0719d878f335ed4fac0dd1db",
"content_id": "303b64f41e3c24c4ab85ae5339bce8adb71c361f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 3,
"path": "/README.md",
"repo_name": "GooseYArd/brocade_stress",
"src_encoding": "UTF-8",
"text": "# To run 10000 requests for getAllVirtualServerSummary with a maximum of 10 concurrent sessions\n\npython stressreq_brocade.py 10.17.25.108 getAllVirtualServerSummary getAllVirtualServerSummary.xml 10000 10\n"
},
{
"alpha_fraction": 0.5614625215530396,
"alphanum_fraction": 0.5665055513381958,
"avg_line_length": 32.51408386230469,
"blob_id": "3e02b4721dafb7b240cf3c75c41329d4bf1fb5d4",
"content_id": "77c02e3e959cbafb9153b03f5761f0049858b639",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4759,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 142,
"path": "/stressreq_brocade.py",
"repo_name": "GooseYArd/brocade_stress",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport pprint\nimport os\nimport socket\nimport ssl\nimport logging\nimport urllib2\nimport sys\nimport argparse\nimport httplib\nimport time\nimport datetime\nimport base64\nimport xmlrpclib\n\nfrom multiprocessing.pool import ThreadPool\nimport Queue\n\nlogger = logging.getLogger('stressreq')\nlogger.setLevel(logging.DEBUG)\nfh = logging.FileHandler(\"stressreq.log\")\nfh.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\n\ndlogger = logging.getLogger('stressreq_data')\ndlogger.setLevel(logging.INFO)\ndfh = logging.FileHandler(\"stressreq_data.log\")\ndfh.setLevel(logging.INFO)\ndformatter = logging.Formatter('%(asctime)s %(message)s')\ndfh.setFormatter(formatter)\ndlogger.addHandler(dfh)\n\nCACERTS=\"/etc/ssl/certs/ca-certificates.crt\"\n\nclass HTTPSConnectionV3(httplib.HTTPSConnection):\n def __init__(self, *args, **kwargs):\n httplib.HTTPSConnection.__init__(self, *args, **kwargs)\n \n def connect(self):\n sock = socket.create_connection((self.host, self.port), self.timeout)\n if self._tunnel_host:\n self.sock = sock\n self._tunnel()\n try:\n self.sock = ssl.wrap_socket(sock, \n self.key_file, \n self.cert_file, \n ssl_version=ssl.PROTOCOL_SSLv3,\n ca_certs=CACERTS\n )\n except ssl.SSLError, e:\n print(\"Trying SSLv3.\")\n self.sock = ssl.wrap_socket(sock, \n self.key_file, \n self.cert_file, \n ssl_version=ssl.PROTOCOL_SSLv23,\n ca_certs=CACERTS\n )\n \nclass HTTPSHandlerV3(urllib2.HTTPSHandler):\n def https_open(self, req):\n return self.do_open(HTTPSConnectionV3, req)\n\ndef mkopener(uri, user, passwd):\n auth_handler = urllib2.HTTPBasicAuthHandler()\n# auth_handler.add_password(realm=\"fire-engine\",\n# uri=uri,\n# user=user,\n# passwd=passwd)\n auth_handler.add_password(realm=\"realm\",\n uri=uri,\n user=user,\n passwd=passwd)\n\n https_handler = HTTPSHandlerV3()\n return urllib2.build_opener(auth_handler, https_handler)\n\ndef main(): \n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"device\", help=\"check DEVICE\")\n parser.add_argument(\"function\", help=\"call api FUNCTION\") \n parser.add_argument(\"body\", help=\"load body from FILE\", type=argparse.FileType('r'))\n parser.add_argument(\"count\", type=int)\n parser.add_argument(\"concurrent\", type=int)\n parser.add_argument(\"-u\", \"--user\", default=\"backups\")\n parser.add_argument(\"-p\", \"--passwd\", default=\"whatever\")\n parser.add_argument(\"-s\", \"--service\", default=\"SLB\")\n \n args = parser.parse_args() \n logger.info(\"using %s\" % args.device)\n\n url = \"https://%s/WS/%s\" % (args.device, args.service)\n body = \"\".join(args.body.readlines())\n base64string = base64.encodestring('%s:%s' % (args.user, args.passwd)).replace('\\n', '')\n q = Queue.PriorityQueue(maxsize=0)\n\n def worker(value):\n opener = mkopener(url, args.user, args.passwd)\n request = urllib2.Request(url, data=body) \n request.add_header(\"Authorization\", \"Basic %s\" % base64string)\n request.add_header('SOAPAction', \"urn:webservicesapi#%s\" % (args.function) )\n \n start_time = time.time()\n status = \"ok\"\n try:\n res = opener.open(request, timeout=10)\n logger.debug(\"code: %s\" % res.code)\n resbody = res.read()\n except Exception, x:\n logger.debug(\"exception: %s\", x)\n status = \"error\"\n\n end_time = time.time()\n elapsed = end_time - start_time \n q.put((value, status, elapsed, end_time))\n print(\"%d: %s: %s\" % (value, status, elapsed))\n\n p = ThreadPool(args.concurrent)\n p.map(worker, range(args.count))\n \n run = time.time()\n\n s = []\n e = []\n print \"done with execution\"\n for i in range(q.qsize()):\n (value, status, st, elapsed) = q.get()\n dlogger.info(\"%d %d %d %d %s %s %s\" % (run, args.concurrent, args.count, value, status, st, elapsed))\n s.append(status)\n e.append(elapsed)\n\n errc = len([ x for x in s if x == \"error\"]) \n print \"OK %d ERROR %d\" % (len(s) - errc, errc)\n print sum(e) / float(len(e))\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
Rounak-kedia/abc
|
https://github.com/Rounak-kedia/abc
|
f9a5b92a2e5f10ad5eb74c3b05f2fd3ec8f17c8f
|
549ac689bcb4660fd3b36d43d1e01175034f8530
|
6d7336bd7d799849ecf26187d278176aee66f865
|
refs/heads/master
| 2020-09-17T04:49:09.181474 | 2019-11-25T16:36:14 | 2019-11-25T16:36:14 | 223,994,557 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6580406427383423,
"alphanum_fraction": 0.6987060904502869,
"avg_line_length": 22.68181800842285,
"blob_id": "9f37fc2781c4085b4e061de14bbfdec754024cf2",
"content_id": "02160545d3ec9d37288bf019f6f84f5536324cac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 22,
"path": "/graph.py",
"repo_name": "Rounak-kedia/abc",
"src_encoding": "UTF-8",
"text": "import matplotlib, numpy, sys\r\nmatplotlib.use('TkAgg')\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\nimport tkinter as Tk\r\n\r\nroot = Tk.Tk()\r\n\r\nf = Figure(figsize=(5,4), dpi=100)\r\nax = f.add_subplot(111)\r\ndata = (20, 35, 30, 35, 27)\r\n\r\nind = numpy.arange(5) # the x locations for the groups\r\nwidth = .5\r\n\r\nrects1 = ax.bar(ind, data, width)\r\n\r\ncanvas = FigureCanvasTkAgg(f, master=root)\r\ncanvas.draw()\r\ncanvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\r\n\r\nTk.mainloop()"
},
{
"alpha_fraction": 0.5122950673103333,
"alphanum_fraction": 0.5737704634666443,
"avg_line_length": 15.571428298950195,
"blob_id": "3a5823b1ed1e000126f8c1f9d1b24a2efcc9afc1",
"content_id": "2331f401af4f7ef212a279660ae9f2d32e87873f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 14,
"path": "/hellotest.py",
"repo_name": "Rounak-kedia/abc",
"src_encoding": "UTF-8",
"text": "today='1221/23/12'\r\ndate=int(today[-2:])\r\ndt=[]\r\ndt.append(today)\r\nfor i in range(6):\r\n\td=today[0:8]+str(date-i-1)\r\n\tdt.append(d)\r\nprint(dt)\r\nfor i in range(7):\r\n\ta=dt[i][5:].split(\"/\")\r\n\ta.reverse()\r\n\tdt[i]=\"/\".join(a)\r\ndt.reverse()\r\nprint(dt)"
},
{
"alpha_fraction": 0.5827361345291138,
"alphanum_fraction": 0.6107491850852966,
"avg_line_length": 35,
"blob_id": "f9df658553621e6a0a417a5ecc30b4cf5605ec27",
"content_id": "532be43b4850cea39c710a01418252aef157ac34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6140,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 166,
"path": "/dashboard.py",
"repo_name": "Rounak-kedia/abc",
"src_encoding": "UTF-8",
"text": "import matplotlib, numpy, sys\r\nimport sqlite3\r\nfrom datetime import date\r\nmatplotlib.use('TkAgg')\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n\r\nglobal Ecat,Icat\r\nEcat=[\"Entertainment\",\"Utilities\",\"Education\",\"Others\"]\r\nIcat=[\"Salary\",\"Bonus\",\"Gift Income\",\"Pension\"]\r\n'''\r\ndef daily_summary():\r\n\tdaily_sum=Frame(dashboard,width=100,height=100)\r\n\t##############data retrival\r\n\ttoday = str(date.today()).replace(\"-\",\"/\")\r\n\tdte=int(today[-2:])\r\n\tif(date<7)\r\n\t{\r\n\t#################################\tif()\r\n\t}\r\n\tdt=[]\r\n\tfor i in range(7):\r\n\t\td=today[0:8]+str(dte-i)\r\n\t\tdt.append(d)\r\n\tcursor = conn.execute(\"SELECT * FROM tracks\")\r\n\trows = cursor.fetchall()\r\n\tfor row in rows:\r\n\t print row\r\n\t##############\r\n\tLabel(daily_sum,text=\"Here is your daily summary!\",anchor=W,justify=LEFT).pack()\r\n\t#################################################################################################<graph>\r\n\tf = Figure(figsize=(4,4), dpi=100)\r\n\tax = f.add_subplot(111)\r\n\tdata = (20,35,30,35,27,12,5)\r\n\tind = numpy.arange(7) # the x locations for the groups\r\n\twidth = .5\r\n\tfor i in range(7):\r\n\t\ta=dt[i][5:].split(\"/\")\r\n\t\ta.reverse()\r\n\t\tdt[i]=\"/\".join(a)\r\n\tdt.reverse()\r\n\trects1 = ax.bar(ind, data, width,tick_label = dt,color = ['red', 'green','blue','black'])\r\n\tax.set_xlabel('Date') \r\n\tax.set_ylabel('Expense') \r\n\tax.set_title('Daily Expense')\r\n\tcanvas = FigureCanvasTkAgg(f, master=daily_sum)\r\n\tcanvas.draw()\r\n\tcanvas.get_tk_widget().pack(side=TOP, fill=BOTH)\r\n\t##################################################################################################\r\n\tdaily_sum.grid(row=0,column=0)\r\n'''\r\ndef budget_summary():\r\n\tbud_sum=Frame(dashboard)\r\n\tLabel(bud_sum,text=\"Here is your budget summary!\").pack()\r\n\t#<data retrival>\r\n\r\n\tcursor=conn.excute(\"SELECT amount,tag from Expense where eDate Between '%s'\"%())\r\n\t\t\r\n\t#\r\n\t#<graph>\r\n\tf = Figure(figsize=(4,4), dpi=100)\r\n\tax = f.add_subplot(111)\r\n\tlabels = [\"Food\", \"Cab\", \"Medical\", \"Education\"]\r\n\tvalues = [1,2,3,2]\r\n\trects1 = ax.pie(values,labels=labels)\r\n\tax.set_title('Daily Expense')\r\n\tcanvas = FigureCanvasTkAgg(f, master=bud_sum)\r\n\tcanvas.draw()\r\n\tcanvas.get_tk_widget().pack(side=TOP, fill=BOTH)\r\n\t#\r\n\tbud_sum.grid(row=1,column=0)\r\n'''\r\ndef earning_summary():\r\n\tear_sum=Frame(dashboard)\r\n\tLabel(ear_sum,text=\"Here is your earning summary!\").pack()\r\n\t##################################################################################################\r\n\tf = Figure(figsize=(4,4), dpi=100)\r\n\tax = f.add_subplot(111)\r\n\tdata = [[1,2,3,4,5,6,7],[1,3,24,5,43,2,7]]\r\n\tind = numpy.arange(7) # the x locations for the groups\r\n\twidth = .25\r\n\ttick_label = ['mon', 'tues', 'wed', 'thur', 'fri','sat','sun']\r\n\trects1 = ax.bar(ind, data[0], width,tick_label = tick_label,color = ['red'])\r\n\trects1 = ax.bar(ind+width, data[1], width,tick_label = tick_label,color = ['green'])\r\n\tax.set_xlabel('Day') \r\n\tax.set_ylabel('Expense') \r\n\tax.set_title('Daily Expense')\r\n\tcanvas = FigureCanvasTkAgg(f, master=ear_sum)\r\n\tcanvas.draw()\r\n\tcanvas.get_tk_widget().pack(side=TOP, fill=BOTH)\r\n\t##################################################################################################\r\n\tear_sum.grid(row=2,column=0)\r\ndef add_to_database():\r\n\t##get data from global variable\r\n\r\n\t# add to database\r\n\r\n\t#show successfull add\r\n\tLabel(tran_win,text=\"Transaction Added\", fg=\"green\", font=(\"calibri\", 11)).grid(row=8,column=3,sticky = W, pady = 2)\r\n\t#add some timer to remove this or close TLW\r\n\r\ndef transaction():\r\n\tglobal tran_win\r\n\ttran_win = Toplevel(dashboard)\r\n\ttran_win.title(\"Add a trannsaction\")\r\n\ttran_win.geometry(\"400x200\")\r\n\tv = StringVar()\r\n\tv.set(1)\r\n\tdef event():\r\n\t\tif v.get()==\"1\":\r\n\t\t\tnl.config(text=\"Name of Expense : \")\r\n\t\t\tradio1.config(text=\"Entertainment\")\r\n\t\t\tradio2.config(text=\"Utilities\")\r\n\t\t\tradio3.config(text=\"Education\")\r\n\t\t\tradio4.config(text=\"Others\")\r\n\t\t\tradioVar.set(0)\r\n\t\telse:\r\n\t\t\tnl.config(text=\"Name of Income : \")\r\n\t\t\tradio1.config(text=\"Salary\")\r\n\t\t\tradio2.config(text=\"Bonus\")\r\n\t\t\tradio3.config(text=\"Gift Income\")\r\n\t\t\tradio4.config(text=\"Pension\")\r\n\t\t\tradioVar.set(0)\r\n\tLabel(tran_win,text=\"Add your transaction : \").grid(row=0,column=0,sticky = W, pady = 2)\r\n\tnl=Label(tran_win,text=\"Name of Expense : \")\r\n\tnl.grid(row=3,column=0,sticky = W, pady = 2)\r\n\tRadiobutton(tran_win,text=\"Expense\",variable=v,value =1,command=event).grid(row=2,column=0,sticky = W, pady = 2)\r\n\tRadiobutton(tran_win,text=\"Income\",variable=v,value =2,command=event).grid(row=2,column=3,sticky = W, pady = 2) \r\n\tname=Entry(tran_win).grid(row=3,column=3,sticky = W, pady = 2)\r\n\tLabel(tran_win,text=\"Amount : \").grid(row=4,column=0,sticky = W, pady = 2)\r\n\tamt=Entry(tran_win).grid(row=4,column=3,sticky = W, pady = 2)\r\n\t#take date time from system\r\n\tLabel(tran_win,text=\"Tags : \").grid(row=5,column=0,sticky = W, pady = 2)\r\n\tradioVar = BooleanVar()\r\n\tradioVar.set(0)\r\n\tradio1 = Radiobutton(tran_win,text = \"Entertainment\", variable = radioVar)\r\n\tradio1.grid(row = 5, column = 3, sticky = W,padx=2,pady=2)\r\n\tradio2 = Radiobutton(tran_win,text = \"Utilities\", variable = radioVar)\r\n\tradio2.grid(row = 5, column = 5, sticky = W,padx=2,pady=2)\r\n\tradio3 = Radiobutton(tran_win,text = \"Education\", variable =radioVar)\r\n\tradio3.grid(row = 6, column = 3, sticky = W,padx=2,pady=2)\r\n\tradio4 = Radiobutton(tran_win,text = \"Others\", variable = radioVar)\r\n\tradio4.grid(row = 6, column = 5, sticky = W,padx=2,pady=2)\r\n\tadd=ttk.Button(tran_win, text=\"Add\", width=10, command=add_to_database)\r\n\tadd.grid(row=7,column=3,sticky = W, pady = 2)\r\n\r\ndef add_transaction():\r\n\tadd_tran=Frame(dashboard)\r\n\tadd=Button(add_tran, text=\"Add a transaction\", width=20, command = transaction,activebackground='green',activeforeground='black',anchor=N).pack()\r\n\tadd_tran.grid(row=4,column=0)\r\n'''\r\n##Starter Code\r\ndef dashboard_screen():\r\n global dashboard\r\n dashboard = Tk()\r\n dashboard.title(\"Dashboard\")\r\n #conn = sqlite3.connect('ExpenseTracker.db')\r\n daily_summary()\r\n Label(dashboard,text=\" \")\r\n budget_summary()\r\n earning_summary()\r\n add_transaction()\r\n dashboard.mainloop()\r\ndashboard_screen()"
},
{
"alpha_fraction": 0.6261022686958313,
"alphanum_fraction": 0.6719576716423035,
"avg_line_length": 23.863636016845703,
"blob_id": "e4d6c2f967a899ea514725fd3f7242893374f22d",
"content_id": "0fdc671d815bbb612daf224e26d87c28f32a7e79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 22,
"path": "/index.py",
"repo_name": "Rounak-kedia/abc",
"src_encoding": "UTF-8",
"text": "from tkinter import *\r\nfrom tkinter import ttk \r\nroot=Tk()\r\n\r\nroot.title(\"Spent\")\r\nroot.geometry('500x500')\r\nroot.iconbitmap(r'spent.ico')\r\n'''\r\nbg_pic=PhotoImage(file='l_bgr.jpg')\r\nbgr_label = Label(root, image=bg_pic)\r\nbgr_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n'''\r\nwel_l=Label(root,text = \"Welcome to Spent Expense Tracker!\")\r\ndesc_l=Label(root,text = \"Personal Finance made Eassy.\")\r\n'''\r\nwel_l.config(font=(\"Courier\", 14))\r\ndesc_l.config(font=(\"Courier\", 8))\r\n'''\r\nwel_l.place(relx=0.28,rely=0.3)\r\ndesc_l.place(relx=0.301,rely=0.335)\r\n\r\nroot.mainloop()"
},
{
"alpha_fraction": 0.6399999856948853,
"alphanum_fraction": 0.6650000214576721,
"avg_line_length": 26.571428298950195,
"blob_id": "a356e71f30ea4b9a7572f7b75c360ee72a296ed7",
"content_id": "934f46eeca5ce1653fa1403b4a227cc69c66d235",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 600,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 21,
"path": "/page1.py",
"repo_name": "Rounak-kedia/abc",
"src_encoding": "UTF-8",
"text": "from tkinter import *\r\nfrom tkinter import ttk\r\n \r\n\r\nroot = Tk() # create a GUI window \r\nroot.geometry(\"300x250\") # set the configuration of GUI window \r\nroot.title(\"Account Login\") # set the title of GUI window\r\n\r\n# create a Form label \r\nttk.Label(root,text=\"Choose Login Or Register\", width=\"300\", font=(\"Calibri\", 13)).pack() \r\nttk.Label(root,text=\"\").pack() \r\n\r\n# create Login Button \r\nttk.Button(root,text=\"Login\", width=\"30\").pack() \r\nttk.Label(root,text=\"\").pack() \r\n\r\n# create a register button\r\nttk.Button(root,text=\"Register\", width=\"30\").pack()\r\n \r\n\r\nroot.mainloop() # start the GUI\r\n"
},
{
"alpha_fraction": 0.6867559552192688,
"alphanum_fraction": 0.7023809552192688,
"avg_line_length": 26.63829803466797,
"blob_id": "5feab8939b0ab46970c61bfc0530731335fb7be6",
"content_id": "bc939d3dbf0dc638c12644888d034e68b7d50058",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1344,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 47,
"path": "/try.py",
"repo_name": "Rounak-kedia/abc",
"src_encoding": "UTF-8",
"text": "import matplotlib, numpy, sys\r\nmatplotlib.use('TkAgg')\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\n\r\nimport sqlite3\r\nfrom datetime import date\r\nimport datetime\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n\r\nglobal Ecat,Icat,wday\r\nEcat=[\"Entertainment\",\"Utilities\",\"Education\",\"Others\"]\r\nIcat=[\"Salary\",\"Bonus\",\"Gift Income\",\"Pension\"]\r\nwk={Monday:mon,}\r\n\r\ndef budget_summary():\r\n\tbud_sum=Frame(dashboard)\r\n\tLabel(bud_sum,text=\"Here is your budget summary!\").pack()\r\n\t#<data retrival>\r\n\r\n\tcursor=conn.excute(\"SELECT amount,tag from Expense where eDate Between '%s' and '%s'\"%(d1,d2))\r\n\t\t\r\n\t#\r\n\t#<graph>\r\n\tf = Figure(figsize=(4,4), dpi=100)\r\n\tax = f.add_subplot(111)\r\n\trects1 = ax.pie(values,labels=Ecat)\r\n\tax.set_title('Daily Expense')\r\n\tcanvas = FigureCanvasTkAgg(f, master=bud_sum)\r\n\tcanvas.draw()\r\n\tcanvas.get_tk_widget().pack(side=TOP, fill=BOTH)\r\n\t#\r\n\tbud_sum.grid(row=1,column=0)\r\n\r\nglobal dashboard\r\ndashboard = Tk()\r\ndashboard.title(\"Dashboard\")\r\n#\r\ntoday=str(date.today()).replace(\"-\",\"/\")\t#today's date in string format\toutput : 'dd/mm/yyyy'\r\ntdate=int(today[-2:])\t#today's date like 08\r\nday=datetime.datetime.strptime(date, '%d %m %Y').weekday()\t#day of week\r\nstartdate='01'+today[2:]\r\n#\r\nconn=sqlite3.connect(\"ExpenseTracker.db\")\r\nbudget_summary()\r\ndashboard.mainloop()"
},
{
"alpha_fraction": 0.6440548896789551,
"alphanum_fraction": 0.6722561120986938,
"avg_line_length": 36.64706039428711,
"blob_id": "afc560521cf264b71754a81f1311adf9fbd5e39a",
"content_id": "e1a4aa5cd1661080e60ba02163ecb2e17c3148de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1312,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 34,
"path": "/test.py",
"repo_name": "Rounak-kedia/abc",
"src_encoding": "UTF-8",
"text": "from tkinter import *\r\nfrom tkinter import ttk\r\ndef event():\r\n\tif v.get()==str(1):\r\n\t\tnl.config(text=\"expense\")\r\n\telse:\r\n\t\tnl.config(text=\"income\")\r\n\r\ntran_win = Tk()\r\ntran_win.title(\"Dashboard\")\r\nv = StringVar()\r\nv.set(None)\r\nLabel(tran_win,text=\"Add your transaction : \").grid(row=0,column=0,sticky = W, pady = 2)\r\nnl=Label(tran_win,text=\"Name of Income/expense : \")\r\nnl.grid(row=3,column=0,sticky = W, pady = 2)\r\nRadiobutton(tran_win,text=\"Expense\",variable=v,value =1,command=event).grid(row=2,column=0,sticky = W, pady = 2)\r\nRadiobutton(tran_win,text=\"Income\",variable=v,value =2,command=event).grid(row=2,column=5,sticky = W, pady = 2) \r\nname=Entry(tran_win).grid(row=3,column=5,sticky = W, pady = 2)\r\nLabel(tran_win,text=\"Amount : \").grid(row=4,column=0,sticky = W, pady = 2)\r\namt=Entry(tran_win).grid(row=4,column=5,sticky = W, pady = 2)\r\n#take date time from system\r\nLabel(tran_win,text=\"Tags : \").grid(row=5,column=0,sticky = W, pady = 2)\r\ntags=Entry(tran_win).grid(row=5,column=5,sticky = W, pady = 2)\r\n####\r\n#take tags input split and send\r\n####\r\nadd=ttk.Button(tran_win, text=\"Add\", width=10)\r\nadd.grid(row=6,column=3,sticky = W, pady = 2)\r\n\r\n\r\n'''\r\nadd=Button(dashboard, text=\"Add a transaction\", width=20,activebackground='green',activeforeground='black',anchor=N).pack()\r\n'''\r\ntran_win.mainloop()"
}
] | 7 |
amacasieb/meso
|
https://github.com/amacasieb/meso
|
82d7e1648ee05d11346049b1077daf8e470352c9
|
004a8e3e4c37c01ffe16f5cfc30735d924ee4115
|
677cc1d032194dc77f31919d2429004becc3210a
|
refs/heads/master
| 2020-04-12T08:52:13.791612 | 2017-01-21T09:00:15 | 2017-01-21T09:00:15 | 65,072,599 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6990049481391907,
"alphanum_fraction": 0.6990049481391907,
"avg_line_length": 21.33333396911621,
"blob_id": "769049a995d42616d10317d2fd43f6789d7797ff",
"content_id": "19ea26945553ddd7a15c2b0db0490d51f015dd23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 402,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 18,
"path": "/osem/settings/stage.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from ._base import *\nimport os\n\n\nDEBUG = False\n\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('DSN'),\n}\n\nINSTALLED_APPS += (\n 'raven.contrib.django.raven_compat',\n)\n\nEMAIL_BACKEND = 'django_mailgun.MailgunBackend'\nMAILGUN_ACCESS_KEY = os.environ.get('MAILGUN_ACCESS_KEY')\nMAILGUN_SERVER_NAME = os.environ.get('MAILGUN_SERVER_NAME')\nSITE_DOMAIN = os.environ.get('SITE_DOMAIN', 'http://osem.shalakh.in')\n"
},
{
"alpha_fraction": 0.5434018969535828,
"alphanum_fraction": 0.543803334236145,
"avg_line_length": 31.887788772583008,
"blob_id": "6b8410d2c7ede96356c535d3b529c40b4f9ca1b1",
"content_id": "9c3f7fb8098fac83176bb6b34f950125d876a42d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9965,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 303,
"path": "/apps/users/forms.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.contrib.auth import password_validation\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom PIL import Image\n\nfrom .models import User\n\n\nclass RegisterForm(forms.ModelForm):\n \"\"\"\n used to register new user\n \"\"\"\n password_again = forms.CharField(\n label=_(\"Password again\"),\n widget=forms.PasswordInput(attrs={'placeholder': _(\"Password again\")}))\n\n def __init__(self, *args, **kwargs):\n super(RegisterForm, self).__init__(*args, **kwargs)\n self.fields[\"email\"].required = True\n\n def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email=email).exists():\n raise forms.ValidationError(\n _('User with such email already exists.'))\n return email\n\n def clean(self):\n \"\"\"\n Verifies that the values entered into the password fields match\n \"\"\"\n self.cleaned_data = super(RegisterForm, self).clean()\n if ('password' in self.cleaned_data and\n 'password_again' in self.cleaned_data and\n self.cleaned_data['password'] !=\n self.cleaned_data['password_again']):\n raise forms.ValidationError(\n _(\"Passwords don't match. Please enter both fields again.\"))\n return self.cleaned_data\n\n def save(self, commit=True):\n user = super(RegisterForm, self).save(commit=False)\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n class Meta:\n model = User\n fields = [\n 'username', 'first_name', 'last_name', 'email', 'phone',\n 'password', 'photo'\n ]\n widgets = {\n 'username': forms.TextInput(attrs={\n 'placeholder': _('Username')\n }),\n 'password': forms.PasswordInput(attrs={\n 'placeholder': _('Password')\n }),\n 'first_name': forms.TextInput(attrs={\n 'placeholder': _('First name')\n }),\n 'last_name': forms.TextInput(attrs={\n 'placeholder': _('Last name')\n }),\n 'email': forms.EmailInput(attrs={\n 'placeholder': _('Email')\n }),\n 'phone': forms.TextInput(attrs={\n 'placeholder': _('Phone')\n }),\n 'photo': forms.FileInput(attrs={\n 'placeholder': _('Upload photo')\n })\n }\n\n\nclass LoginForm(forms.Form):\n \"\"\"\n form to login user\n \"\"\"\n error_messages = {\n 'invalid_username': _(\"Username not registered.\"),\n }\n username = forms.CharField(\n max_length=254,\n widget=forms.TextInput(attrs={'autofocus': '',\n 'placeholder': _(\"Username\")}),\n )\n password = forms.CharField(\n label=_(\"Password\"),\n widget=forms.PasswordInput(attrs={'placeholder': _('Password')}))\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n if username and not User.objects.filter(username=username).exists():\n raise forms.ValidationError('Username not registered.',\n code='invalid_username')\n return username\n\n\nclass PhotoForm(forms.ModelForm):\n \"\"\"\n Handle photo upload\n \"\"\"\n\n class Meta:\n model = User\n fields = ['photo']\n\n def save(self, commit=True, *args, **kwargs):\n instance = super(PhotoForm, self).save(commit=False)\n if commit:\n instance.save()\n instance.resize_photo()\n instance.create_thumbnail()\n return instance\n\n\n\nclass UserProfileForm(forms.ModelForm):\n \"\"\"\n allows user to edit profile data\n \"\"\"\n\n error_messages = {\n 'email_in_use': _(\"This email address is already in use. \" +\n \"Please supply a different email address.\"),\n }\n\n class Meta:\n model = User\n fields = [\n 'id', 'username', 'first_name', 'last_name', 'email', 'phone',\n 'photo'\n ]\n widgets = {\n 'username': forms.TextInput(attrs={\n 'placeholder': _('Username')\n }),\n 'first_name': forms.TextInput(attrs={\n 'placeholder': _('First name')\n }),\n 'last_name': forms.TextInput(attrs={\n 'placeholder': _('Last name')\n }),\n 'phone': forms.TextInput(attrs={\n 'placeholder': _('Phone')\n }),\n 'photo': forms.FileInput(attrs={\n 'placeholder': _('Upload photo')\n })\n }\n\n def clean_email(self):\n cleaned_data = super(UserProfileForm, self).clean()\n username = cleaned_data.get('username')\n email = cleaned_data.get('email')\n if email and User.objects.filter(email=email)\\\n .exclude(username=username).count():\n raise forms.ValidationError(self.error_messages['email_in_use'])\n return email\n\n\nclass ResetPasswordForm(forms.ModelForm):\n \"\"\"\n form for reset password\n \"\"\"\n error_messages = {\n 'email_not_found': _(\"Email not found\"),\n }\n\n class Meta:\n model = User\n fields = ['email']\n\n def clean_email(self):\n cleaned_data = super(ResetPasswordForm, self).clean()\n email = cleaned_data.get('email')\n if email and User.objects.filter(email=email).count() == 0:\n raise forms.ValidationError(self.error_messages['email_not_found'])\n return email\n\n\nclass ChangePasswordForm(forms.ModelForm):\n \"\"\"\n allows user to change password\n \"\"\"\n\n error_messages = {\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n 'password_incorrect': _(\"Your old password was entered incorrectly. \" +\n \"Please enter it again.\"),\n 'old_password_required': _(\"Please enter your old password.\"),\n 'new_password_repeat': _(\"Please type new password two times.\")\n }\n\n new_password = forms.CharField(\n label=_(\"New password\"),\n required=False,\n widget=forms.PasswordInput(\n attrs={'placeholder': _(\"New password\")}\n ),\n help_text=password_validation.password_validators_help_text_html())\n new_password_again = forms.CharField(\n label=_(\"New password confirmation\"),\n required=False,\n widget=forms.PasswordInput(\n attrs={'placeholder': _(\"New password again\")}\n )\n )\n old_password = forms.CharField(\n label=_(\"Old password\"),\n required=False,\n widget=forms.PasswordInput(\n attrs={'placeholder': _(\"Old password\")}\n )\n )\n field_order = ['old_password', 'new_password', 'new_password_again']\n\n class Meta:\n model = User\n fields = ['username']\n widgets = {\n 'username': forms.TextInput(attrs={\n 'placeholder': _('Username')\n })\n }\n\n def clean(self):\n new_password = self.cleaned_data['new_password']\n new_password_again = self.cleaned_data['new_password_again']\n old_password = self.cleaned_data['old_password']\n\n if (new_password_again or new_password) and not old_password:\n raise forms.ValidationError(\n self.error_messages['old_password_required'],\n code='old_password_required',)\n if old_password:\n if not self.instance.check_password(old_password):\n raise forms.ValidationError(\n self.error_messages['password_incorrect'],\n code='password_incorrect',\n )\n if new_password and new_password_again:\n if new_password != new_password_again:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',)\n password_validation.validate_password(\n new_password_again,\n self.instance\n )\n else:\n raise forms.ValidationError(\n self.error_messages['new_password_repeat'],\n code='new_password_repeat',\n )\n\n def save(self, commit=True, *args, **kwargs):\n instance = super(ChangePasswordForm, self).save(commit=False)\n new_password = self.cleaned_data['new_password']\n new_password_again = self.cleaned_data['new_password_again']\n old_password = self.cleaned_data['old_password']\n if all([new_password,\n new_password_again,\n old_password,\n new_password == new_password_again,\n self.instance.check_password(old_password)]):\n instance.set_password(new_password)\n\n if commit:\n instance.save()\n\n return instance\n\n\nclass UserAdminForm(forms.ModelForm):\n \"\"\"\n Form for user admin\n \"\"\"\n class Meta:\n model = User\n fields = [\n 'username', 'email', 'password', 'first_name',\n 'last_name', 'phone', 'photo', 'validation_code', 'approving_user',\n 'balance', 'is_validated', 'user_department', 'is_department',\n 'is_approved', 'is_active', 'is_staff', 'is_superuser', 'groups',\n 'user_permissions', 'last_login', 'date_joined', 'expired_at',\n 'email_on_transfer_requested',\n 'email_on_transfer_completed',\n 'email_on_resource_sent',\n 'email_on_resource_received'\n ]\n\n def clean(self):\n if not self.cleaned_data['user_department']:\n raise ValidationError(\n _('Every user must be attached to department'))\n return super(UserAdminForm, self).clean()\n"
},
{
"alpha_fraction": 0.5866873264312744,
"alphanum_fraction": 0.5928792357444763,
"avg_line_length": 48.69230651855469,
"blob_id": "490c32803e9985ce631034a935e4b817ae7d3a54",
"content_id": "de32e6f819762765445d708f79c83973f98e9f4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 646,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 13,
"path": "/templates/_tiny-profile.html",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "{% load staticfiles %}\n\n<div class=\"tiny-profile dark\">\n {% if type == 'account' %}\n <img src=\"{% if user.photo %}{{ user.thumbnail.url }}{% else %}{% static \"img/placeholder-img.png\" %}{% endif %}\" class=\"img-circle avatar avatar-small\">\n <h4 class=\"name\">{{ account.name }}</h4>\n <p class=\"username\">{{ account.account_department.name }}</p>\n {% else %}\n <img src=\"{% if user.photo %}{{ user.thumbnail.url }}{% else %}{% static \"img/placeholder-img.png\" %}{% endif %}\" class=\"img-circle avatar avatar-small\">\n <h4 class=\"name\">{{ user.get_full_name }}</h4>\n <p class=\"username\">@{{ user.username }}</p>\n {% endif %}\n</div>\n"
},
{
"alpha_fraction": 0.5730189681053162,
"alphanum_fraction": 0.5861165523529053,
"avg_line_length": 42.01408386230469,
"blob_id": "48a6165ad01b413a66e0078ff4a02dd93fd6761f",
"content_id": "365bc58931dfdead3ac5e80d2d818bafab5023c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3054,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 71,
"path": "/apps/users/migrations/0003_auto_20160210_2138.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-10 21:38\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_auto_20160209_1957'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Account',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('number', models.CharField(blank=True, max_length=255, null=True, verbose_name='Account number')),\n ('registered_at', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Registration date')),\n ('expired_at', models.DateTimeField(blank=True, null=True, verbose_name='Expiration date')),\n ('validation_code', models.CharField(blank=True, max_length=32, null=True, verbose_name='Validation code')),\n ('is_validated', models.BooleanField(db_index=True, default=False)),\n ],\n options={\n 'db_table': 'accounts',\n },\n ),\n migrations.CreateModel(\n name='Department',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255, null=True, unique=True, verbose_name='Name')),\n ('created_at', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Created at')),\n ],\n options={\n 'db_table': 'departments',\n },\n ),\n migrations.RenameField(\n model_name='user',\n old_name='expiration_date',\n new_name='expired_at',\n ),\n migrations.RemoveField(\n model_name='user',\n name='is_approved',\n ),\n migrations.AddField(\n model_name='user',\n name='approving_user',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_approved', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='account',\n name='approving_user',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='account_approved', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='account',\n name='department',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Department', verbose_name='Department'),\n ),\n migrations.AddField(\n model_name='account',\n name='owner',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6565117835998535,
"alphanum_fraction": 0.669205367565155,
"avg_line_length": 28.616540908813477,
"blob_id": "f09cd273a6cc70b9cd03056538e9799abeb25895",
"content_id": "2676609613422eba9539f6e2b49bfa5a07a150b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3939,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 133,
"path": "/osem/settings/_base.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport dj_database_url\nfrom django.utils.translation import ugettext_lazy as _\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(\n os.path.abspath(__file__))))\nsys.path.append(os.path.join(BASE_DIR, 'apps'))\n\nLANGUAGES = (\n ('en', _('English')),\n)\n\nSECRET_KEY = '!)p05$mzur8qtssha(!qsshwu%ka_qh2p(ua^i%kwc6kia(xe='\nDEBUG = True\nALLOWED_HOSTS = ['*']\nINSTALLED_APPS = (\n 'django_extensions',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.humanize',\n 'django.contrib.staticfiles',\n 'django.contrib.contenttypes',\n 'django_filters',\n 'django_tables2',\n\n 'osem_admin',\n 'users',\n 'orders',\n\n 'social.apps.django_app.default',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',\n)\n\nROOT_URLCONF = 'osem.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n 'orders.context_processors.requests_to_me',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'osem.wsgi.application'\nDATABASES = {\n 'default': dj_database_url.config(\n default='postgres://osem@localhost/osem'\n )\n}\n\nAUTHENTICATION_BACKENDS = (\n 'social.backends.facebook.FacebookOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nLANGUAGE_CODE = 'en_US'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static_collected')\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\nLOGIN_URL = '/user/login/'\nLOGOUT_URL = 'logout'\nLOGIN_REDIRECT_URL = 'home'\nAUTH_USER_MODEL = 'users.User'\nDEFAULT_FROM_EMAIL = os.environ.get('FROM_EMAIL', '[email protected]')\n\n\nEMAIL_BACKEND = 'django_mailgun.MailgunBackend'\nMAILGUN_ACCESS_KEY = os.environ.get('MAILGUN_ACCESS_KEY')\nMAILGUN_SERVER_NAME = os.environ.get('MAILGUN_SERVER_NAME')\nEMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.mailgun.org')\nEMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '[email protected]')\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '6dbdf5fe720a40e0e2f12a8eefcd3b51')\nEMAIL_PORT = os.environ.get('EMAIL_PORT', 587)\nEMAIL_USE_TLS = True\n\nBROKER_URL = 'amqp://guest:guest@localhost:5672/'\n\nCACHES = {\n 'default': {\n 'BACKEND': 'caching.backends.memcached.MemcachedCache',\n 'LOCATION': [\n '127.0.0.1:11211',\n ],\n 'KEY_PREFIX': 'weee:',\n },\n}\n\nCACHE_COUNT_TIMEOUT = 60\n\nTHUMBNAIL_SIZE = (128, 128)\nPHOTO_SIZE = (256, 256)\n\nSOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('FACEBOOK_APP_ID')\nSOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('FACEBOOK_APP_SECRET')\nSOCIAL_AUTH_FACEBOOK_SCOPE = ['email']\nSOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {\n 'fields': 'id,name,email',\n}\n"
},
{
"alpha_fraction": 0.5469940900802612,
"alphanum_fraction": 0.5503810048103333,
"avg_line_length": 29.28205108642578,
"blob_id": "062e44c77a67b1b598434013836f8d4d35ac1818",
"content_id": "ba7caaf5da480050eb962521faf0b06f33573f15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1181,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 39,
"path": "/static/js/pages/search.js",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "(function () {\n var form = $('.search-form');\n var queryField = $('.query-field');\n var resultContainer = $('.search-result .row');\n\n form.on('submit', function (e) {\n e.preventDefault();\n\n var q = queryField.val();\n\n if (q) {\n $.getJSON(urls.userAutocomplete, {\n term: q\n }, function (data) {\n renderResult(data);\n });\n }\n });\n\n function renderResult(data) {\n var content = '';\n\n data.forEach(function (user) {\n var img = user.image ? user.image : defaultImg;\n\n content += '<div class=\"col-sm-4 col-md-3\">';\n content += '<a class=\"result-item small-profile dark profile-selectable\" href=\"' + urls.send + '?dest=' + user.value + '\">';\n content += '<div class=\"result-overlay hidden-sm\"><span>Send</span></div>'\n content += '<img src=\"' + img + '\" class=\"img-circle avatar avatar-medium\">';\n content += '<h4 class=\"name\">' + user.full_name + '</h4>';\n content += '<p class=\"username\">@' + user.value + '</p>';\n content += '<p class=\"visible-xs visible-sm\"><strong>SEND</strong></p>'\n content += '</a>';\n content += '</div>';\n });\n\n resultContainer.html(content);\n }\n})();\n"
},
{
"alpha_fraction": 0.5541965961456299,
"alphanum_fraction": 0.559982419013977,
"avg_line_length": 36.40821838378906,
"blob_id": "630d8bcabdeae150046dcaac8077f9ca5117488a",
"content_id": "b6151700a4b1bf30bc58b10b6f3ef165a9c0ee74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13654,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 365,
"path": "/apps/orders/models.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "import uuid\nfrom decimal import Decimal\n\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ugettext\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils import timezone\nfrom caching.base import CachingManager, CachingMixin\n\nfrom users.models import User\nfrom users.models import Account\nfrom .tasks import notify_on_requested\nfrom .tasks import notify_on_status_change\nfrom .tasks import notify_on_send\nfrom .tasks import notify_on_request_order\nfrom .tasks import notify_on_request_approved\nfrom .tasks import notify_on_request_rejected\n\n\n@python_2_unicode_compatible\nclass Transaction(CachingMixin, models.Model):\n \"\"\"\n Data about transactions is stored here\n \"\"\"\n TYPE_1 = 1\n TYPE_2 = 2\n TYPE_3 = 3\n TYPE_CHOICES = ((TYPE_1, _('1')), (TYPE_2, _('2')), (TYPE_3, _('3')), )\n\n STATUS_1 = 1\n STATUS_2 = 2\n STATUS_3 = 3\n STATUS_CHOICES = ((STATUS_1, _('Pending')),\n (STATUS_2, _('Approved')),\n (STATUS_3, _('Not approved')), )\n\n uid = models.UUIDField(default=uuid.uuid4, editable=False, null=True)\n transaction_type = models.IntegerField(\n choices=TYPE_CHOICES, db_index=True, null=True)\n transaction_status = models.IntegerField(\n choices=STATUS_CHOICES, db_index=True, null=True)\n created_at = models.DateTimeField(\n _('Created at'),\n auto_now_add=True,\n null=True)\n completed_at = models.DateTimeField(\n _('Completed at'),\n blank=True,\n null=True)\n origin_account = models.ForeignKey(Account,\n related_name='origin_account',\n null=True)\n receiving_account = models.ForeignKey(Account,\n related_name='receiving_account',\n null=True)\n approving_user = models.ForeignKey('users.User',\n blank=True,\n null=True,\n db_index=True,\n related_name='transaction_approved')\n amount = models.DecimalField(max_digits=12, decimal_places=2, null=True)\n\n objects = CachingManager()\n\n def __str__(self):\n return '%s - %s' % (self.uid, self.amount)\n\n def get_absolute_url(self):\n return settings.SITE_DOMAIN + reverse('orders:transaction',\n args=[self.id])\n\n def operate(self, order):\n '''\n Add transcation to Order, and if transaction_type = TYPE_2\n do calculations with users balance and add value to completed_at.\n '''\n self.amount = order.amount\n if (self.transaction_type == self.TYPE_2): # SEND & REQUEST\n origin_balance = order.origin_user.balance - order.amount\n receiving_balance = order.receiving_user.balance + order.amount\n\n if order.order_type == Order.TYPE_D:\n origin_balance = order.origin_user.balance + order.amount\n receiving_balance = order.receiving_user.balance - order.amount\n\n if origin_balance >= 0:\n now = timezone.now()\n self.completed_at = now\n order.completed_at = now\n self.transaction_status = self.STATUS_2 # Approved\n self.save()\n order.save()\n\n order.origin_user.update_balance(origin_balance)\n order.receiving_user.update_balance(receiving_balance)\n\n notify_on_send.apply_async(kwargs={'order': order})\n\n if self.transaction_type == self.TYPE_3: # WITHDRAW\n receiving_balance = self.receiving_account.owner.balance - order.amount\n order.receiving_user.update_balance(receiving_balance)\n\n self.transaction_status = self.STATUS_1 # Pending\n self.save()\n order.save()\n\n notify_on_requested.apply_async(\n kwargs={'transaction': self, 'user': order.origin_user})\n\n if self.transaction_type == self.TYPE_1: # DEPOSIT\n self.transaction_status = self.STATUS_1 # Pending\n self.save()\n order.save()\n\n notify_on_requested.apply_async(\n kwargs={'transaction': self, 'user': order.origin_user})\n\n def approve(self, user):\n order = Order.objects.get(id=self.order.id)\n\n if self.transaction_type == self.TYPE_1 or self.transaction_type == self.TYPE_3:\n origin_user_balance = order.origin_user.balance + order.amount\n receiving_user_balance = order.receiving_user.balance - order.amount\n else:\n origin_user_balance = order.origin_user.balance - order.amount\n receiving_user_balance = order.receiving_user.balance + order.amount\n\n if (self.transaction_status != self.STATUS_2 and\n origin_user_balance >= 0):\n\n if self.transaction_type != self.TYPE_3:\n order.receiving_user.update_balance(receiving_user_balance)\n order.origin_user.update_balance(origin_user_balance)\n\n self.approving_user = user\n self.transaction_status = self.STATUS_2 # approved\n now = timezone.now()\n order.completed_at = now\n order.save()\n self.completed_at = now\n self.save()\n notify_on_status_change.apply_async(\n kwargs={'user': order.origin_user,\n 'transaction': self})\n\n def cancel(self, user):\n order = Order.objects.get(pk=self.order.pk)\n now = timezone.now()\n\n if (self.transaction_status != self.STATUS_2 and\n self.transaction_status != self.STATUS_3):\n self.approving_user = user\n self.transaction_status = self.STATUS_3 # canceled\n\n if self.transaction_type == self.TYPE_3:\n receiving_user_balance = order.receiving_user.balance + order.amount\n order.receiving_user.update_balance(receiving_user_balance)\n\n order.completed_at = now\n order.save()\n self.completed_at = now\n self.save()\n\n notify_on_status_change.apply_async(\n kwargs={'user': order.origin_user,\n 'transaction': self})\n\n class Meta:\n db_table = 'transactions'\n\n\n@python_2_unicode_compatible\nclass Order(CachingMixin, models.Model):\n \"\"\"\n Data stored in order\n \"\"\"\n TYPE_A = 1\n TYPE_B = 2\n TYPE_C = 3\n TYPE_D = 4\n TYPE_CHOICES = ((TYPE_A, _('Resource Allocation')),\n (TYPE_B, _('Resource Transfer')),\n (TYPE_C, _('Dept Account Reconciliation')),\n (TYPE_D, _('Resource Spend')), )\n\n uid = models.UUIDField(default=uuid.uuid4, editable=False, null=True)\n origin_user = models.ForeignKey('users.User',\n related_name='origin_user',\n null=True)\n receiving_user = models.ForeignKey('users.User',\n related_name='receiving_user',\n null=True)\n completed_at = models.DateTimeField(\n _('Completed at'),\n blank=True,\n db_index=True,\n null=True)\n created_at = models.DateTimeField(\n _('Creation date'),\n auto_now_add=True,\n db_index=True,\n null=True)\n order_type = models.IntegerField(choices=TYPE_CHOICES,\n db_index=True,\n null=True)\n amount = models.DecimalField(max_digits=12, decimal_places=2, null=True)\n transaction = models.OneToOneField(Transaction,\n on_delete=models.CASCADE,\n null=True,\n blank=True)\n remarks = models.TextField(blank=True, null=True)\n\n objects = CachingManager()\n\n def __str__(self):\n return '%s - %s' % (self.uid, self.amount)\n\n def get_absolute_url(self):\n return settings.SITE_DOMAIN + reverse('orders:order', args=[self.id])\n\n def save(self,\n origin_account=None,\n receiving_account=None,\n *args,\n **kwargs):\n self.full_clean()\n\n origin_user_balance = self.origin_user.balance - self.amount\n receiving_user_balance = self.receiving_user.balance - self.amount\n\n if not self.pk and origin_user_balance >= 0 and (self.order_type == self.TYPE_B or self.order_type == self.TYPE_C):\n super(Order, self).save(*args, **kwargs)\n\n # Resource Transfer or Dept Account Reconciliation\n transaction_type = Transaction.TYPE_2\n\n self.transaction = Transaction.objects.create(\n transaction_type=transaction_type,\n origin_account=origin_account,\n receiving_account=receiving_account)\n self.transaction.operate(self)\n\n elif not self.pk and receiving_user_balance >= 0 and (self.order_type == self.TYPE_A or self.order_type == self.TYPE_D):\n super(Order, self).save(*args, **kwargs)\n\n # Resource Allocation\n if self.order_type == self.TYPE_A:\n transaction_type = Transaction.TYPE_1\n\n # Resource Spend\n if self.order_type == self.TYPE_D:\n transaction_type = Transaction.TYPE_3\n\n self.transaction = Transaction.objects.create(\n transaction_type=transaction_type,\n origin_account=origin_account,\n receiving_account=receiving_account)\n self.transaction.operate(self)\n\n else:\n # do not create order if user does not have sufficient resources\n super(Order, self).save(*args, **kwargs)\n\n def clean(self):\n amount = self.amount\n if amount:\n if amount < 0:\n raise ValidationError({'amount': _(\"You can't send order \" +\n 'with negative amount')})\n if amount == Decimal(0):\n raise ValidationError({'amount': _(\"You can't send order \" +\n 'with zero amount')})\n return amount\n\n class Meta:\n db_table = 'orders'\n\n\n@python_2_unicode_compatible\nclass RequestOrder(CachingMixin, models.Model):\n STATUS_1 = 1\n STATUS_2 = 2\n STATUS_3 = 3\n STATUS_CHOICES = ((STATUS_1, _('Pending')),\n (STATUS_2, _('Approved')),\n (STATUS_3, _('Not approved')), )\n\n uid = models.UUIDField(default=uuid.uuid4, editable=False, null=True)\n origin_user = models.ForeignKey('users.User',\n related_name='request_origin_user',\n null=True)\n receiving_user = models.ForeignKey('users.User',\n related_name='request_receiving_user',\n null=True)\n amount = models.DecimalField(max_digits=12, decimal_places=2, null=True)\n status = models.IntegerField(\n choices=STATUS_CHOICES, db_index=True, null=True, default=STATUS_1)\n requested_by = models.ForeignKey('users.User',\n related_name='request_requested_user',\n null=True,\n blank=True)\n approved_by = models.ForeignKey('users.User',\n null=True,\n blank=True)\n approved_at = models.DateTimeField(blank=True, null=True)\n notes = models.TextField(blank=True, null=True)\n order = models.OneToOneField(Order,\n on_delete=models.CASCADE,\n null=True,\n blank=True)\n created_at = models.DateTimeField(\n _('Creation date'),\n auto_now_add=True,\n null=True)\n\n def get_absolute_url(self):\n return settings.SITE_DOMAIN + reverse('orders:request_detail',\n args=[self.id])\n\n def approve(self):\n self.status = self.STATUS_2\n self.approved_at = timezone.now()\n\n self.save()\n self.create_order()\n\n notify_on_request_approved.apply_async(\n kwargs={'request_order': self})\n\n def reject(self):\n self.status = self.STATUS_3\n self.save()\n\n notify_on_request_rejected.apply_async(\n kwargs={'request_order': self})\n\n def create_order(self):\n order = Order(\n origin_user = self.origin_user,\n receiving_user = self.receiving_user,\n amount = self.amount,\n order_type = 2\n )\n order.save()\n\n self.order = order\n self.save()\n\n transaction = order.transaction\n transaction.approve(self.origin_user)\n\n\n@receiver(post_save, sender=RequestOrder)\ndef update_stock(sender, instance, created, **kwargs):\n if created:\n notify_on_request_order.apply_async(\n kwargs={'request_order': instance})\n"
},
{
"alpha_fraction": 0.5416666865348816,
"alphanum_fraction": 0.5922619104385376,
"avg_line_length": 24.846153259277344,
"blob_id": "90cb3265c6fba58850ff5b69fb7e08f746261709",
"content_id": "a801c650213797d66a4f8c067ef8a6edee8761e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 672,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 26,
"path": "/apps/orders/migrations/0006_auto_20160303_2306.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-03-03 23:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0005_auto_20160211_1218'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='uid',\n field=models.UUIDField(default=uuid.uuid4, editable=False, null=True),\n ),\n migrations.AddField(\n model_name='transaction',\n name='uid',\n field=models.UUIDField(default=uuid.uuid4, editable=False, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5490981936454773,
"alphanum_fraction": 0.6132264733314514,
"avg_line_length": 23.950000762939453,
"blob_id": "9225226debf7d8336698190f3d60c09d65b08b0d",
"content_id": "bc3e58cd3473cb6439e207cd3cdb27d59c0c610d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 20,
"path": "/apps/orders/migrations/0007_transaction_completed_at.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-03-11 13:28\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0006_auto_20160303_2306'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='transaction',\n name='completed_at',\n field=models.DateTimeField(blank=True, null=True, verbose_name='Completed at'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5942503809928894,
"alphanum_fraction": 0.5994662642478943,
"avg_line_length": 37.34418487548828,
"blob_id": "a4970743e87c15bb29316f043b00c0af9ae768b7",
"content_id": "a3a0fc84ba062d65c338eedbcce89251ba7bcf15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8244,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 215,
"path": "/apps/osem_admin/views.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "import csv\n\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.http import StreamingHttpResponse\nfrom django.views.generic import View\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom django_tables2 import RequestConfig\n\nfrom users.models import User\nfrom users.models import Account\nfrom orders.models import Order\nfrom orders.models import Transaction\nfrom .tables import UsersTable\nfrom .tables import AccountsTable\nfrom .tables import OrdersTable\nfrom .tables import TransactionsTable\n\n\ndef stream_csv(rows, filename):\n class Echo(object):\n def write(self, value):\n \"\"\"\n Write the value by returning it, instead of storing in a buffer.\n \"\"\"\n return value\n\n pseudo_buffer = Echo()\n writer = csv.writer(pseudo_buffer)\n response = StreamingHttpResponse(\n (writer.writerow([row]) for row in rows),\n content_type=\"text/csv\")\n response[\n 'Content-Disposition'] = 'attachment; filename=\"' + filename + '.csv\"'\n return response\n\n\nclass Users(LoginRequiredMixin, TemplateView):\n template_name = \"osem_admin/users/users.html\"\n\n def get(self, request, *args, **kwargs):\n table = UsersTable(User.objects.all(), order_by=\"-date_joined\")\n RequestConfig(request).configure(table)\n return render(request, self.template_name, {'table': table})\n\n def post(self, request, *args, **kwargs):\n table = UsersTable(User.objects.all())\n RequestConfig(request).configure(table)\n\n action = int(request.POST.get('action'))\n pks = request.POST.getlist(\"selection\")\n selected_objects = User.objects.filter(pk__in=pks)\n\n # approve\n if action == 1:\n for user in selected_objects:\n user.approve(request.user)\n # expire\n if action == 2:\n for user in selected_objects:\n user.expire(request.user)\n\n return render(request, self.template_name, {'table': table})\n\n\nclass Accounts(LoginRequiredMixin, TemplateView):\n template_name = \"osem_admin/accounts/accounts.html\"\n\n def get(self, request, *args, **kwargs):\n table = AccountsTable(Account.objects.all(), order_by=\"-registered_at\")\n RequestConfig(request).configure(table)\n return render(request, self.template_name, {'table': table})\n\n def post(self, request, *args, **kwargs):\n table = AccountsTable(Account.objects.all())\n RequestConfig(request).configure(table)\n\n action = int(request.POST.get('action'))\n pks = request.POST.getlist(\"selection\")\n selected_objects = Account.objects.filter(pk__in=pks)\n\n # approve\n if action == 1:\n for account in selected_objects:\n account.approve(request.user)\n # expire\n if action == 2:\n for account in selected_objects:\n account.expire(request.user)\n\n return render(request, self.template_name, {'table': table})\n\n\nclass Orders(LoginRequiredMixin, TemplateView):\n template_name = \"osem_admin/orders/orders.html\"\n\n def get(self, request, pk=None, *args, **kwargs):\n if pk:\n table = OrdersTable(Order.objects.filter(pk=int(pk)))\n else:\n table = OrdersTable(Order.objects.filter(), order_by=\"-created_at\")\n RequestConfig(request).configure(table)\n return render(request, self.template_name, {'table': table})\n\n def post(self, request, *args, **kwargs):\n table = OrdersTable(Order.objects.all())\n RequestConfig(request).configure(table)\n\n action = int(request.POST.get('action'))\n pks = request.POST.getlist(\"selection\")\n selected_objects = Order.objects.filter(pk__in=pks)\n\n # Download as csv\n if action == 3:\n rows = [\n \"'selection', 'uid','order_type', 'origin_user',\" +\n \"'receiving_user','amount', 'remarks',\" +\n \"'created_at','completed_at'\"\n ]\n for order in selected_objects:\n rowdata = \"\"\"{0},{1},{2},{3},{4},{5},{6},{7}\"\"\".format(\n order.uid, order.order_type, order.origin_user,\n order.receiving_user, order.amount, order.remarks,\n order.created_at, order.completed_at)\n rows.append(rowdata)\n return stream_csv(rows, 'orders' + str(timezone.now()))\n\n return render(request, self.template_name, {'table': table})\n\n\nclass OrdersDownload(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n rows = [\n \"'selection', 'uid','order_type', 'origin_user',\" +\n \"'receiving_user','amount', 'remarks',\" +\n \"'created_at','completed_at'\"\n ]\n for order in Order.objects.all():\n rowdata = \"\"\"{0},{1},{2},{3},{4},{5},{6},{7}\"\"\".format(\n order.uid, order.order_type, order.origin_user,\n order.receiving_user, order.amount, order.remarks,\n order.created_at, order.completed_at)\n rows.append(rowdata)\n return stream_csv(rows, 'orders' + str(timezone.now()))\n\n\nclass Transactions(LoginRequiredMixin, TemplateView):\n template_name = \"osem_admin/transactions/transactions.html\"\n\n def get(self, request, pk=None, *args, **kwargs):\n if pk:\n table = TransactionsTable(Transaction.objects.filter(pk=int(pk)))\n else:\n table = TransactionsTable(\n Transaction.objects.filter(origin_account__isnull=False,\n receiving_account__isnull=False),\n order_by=\"-created_at\")\n RequestConfig(request).configure(table)\n return render(request, self.template_name, {'table': table})\n\n def post(self, request, *args, **kwargs):\n table = TransactionsTable(Transaction.objects.all())\n RequestConfig(request).configure(table)\n\n action = int(request.POST.get('action'))\n pks = request.POST.getlist(\"selection\")\n selected_objects = Transaction.objects.filter(pk__in=pks)\n\n # Approve\n if action == 1:\n for transaction in selected_objects:\n transaction.approve(request.user)\n # Cancel\n if action == 2:\n for transaction in selected_objects:\n transaction.cancel(request.user)\n\n # Download as csv\n if action == 3:\n rows = [\n \"'uid', 'transaction_type','transaction_status', 'amount',\" +\n \"'created_at','completed_at', 'origin_account',\" +\n \"'receiving_account','approving_user'\"\n ]\n for transaction in selected_objects:\n rowdata = \"\"\"{0},{1},{2},{3},{4},{5},{6},{7},{8}\"\"\".format(\n transaction.uid, transaction.transaction_type,\n transaction.transaction_status, transaction.amount,\n transaction.created_at, transaction.completed_at,\n transaction.origin_account, transaction.receiving_account,\n transaction.approving_user)\n rows.append(rowdata)\n return stream_csv(rows, 'transactions' + str(timezone.now()))\n\n return render(request, self.template_name, {'table': table})\n\n\nclass TransactionsDownload(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n rows = [\n \"'uid', 'transaction_type','transaction_status', 'amount',\" +\n \"'created_at','completed_at', 'origin_account',\" +\n \"'receiving_account','approving_user'\"\n ]\n for transaction in Transaction.objects.all():\n rowdata = \"\"\"{0},{1},{2},{3},{4},{5},{6},{7},{8}\"\"\".format(\n transaction.uid, transaction.transaction_type,\n transaction.transaction_status, transaction.amount,\n transaction.created_at, transaction.completed_at,\n transaction.origin_account, transaction.receiving_account,\n transaction.approving_user)\n rows.append(rowdata)\n return stream_csv(rows, 'transactions' + str(timezone.now()))\n"
},
{
"alpha_fraction": 0.5034382939338684,
"alphanum_fraction": 0.5095910429954529,
"avg_line_length": 34.42307662963867,
"blob_id": "fa144fe0509a7fb62506b1f7d2208a8c13161465",
"content_id": "4d3661410c2e5a3babb7e2c1023b8eafe0f7fad0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2763,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 78,
"path": "/templates/orders/history/history.html",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "{% extends '_base.html' %}\n\n{% load staticfiles %}\n\n{% block title %}History{% endblock %}\n\n{% block bodyclass %}page-history{% endblock %}\n\n{% block content %}\n <div class=\"content-container\">\n <div class=\"title-bar\">\n <h1><img src=\"{% static \"img/Button-History-Black-NoText.svg\" %}\" class=\"title-icon\"> History</h1>\n </div>\n\n <div class=\"row\">\n <div class=\"col-md-8 col-lg-6\">\n <div class=\"content-panel\">\n\n {% for order in orders %}\n {% if order.order_type == 1 %}\n {% include 'orders/history/_item_deposit.html' with order=order %}\n {% elif order.order_type == 2 %}\n {% include 'orders/history/_item_send.html' with order=order %}\n {% elif order.order_type == 4 and order.transaction %}\n {% include 'orders/history/_item_withdraw.html' with order=order %}\n {% endif %}\n {% empty %}\n <p class=\"empty-result\">No result</p>\n {% endfor %}\n\n </div>\n </div>\n\n <div class=\"col-md-4 col-lg-2\">\n <div class=\"filter-container\">\n <form action=\"\" method=\"get\" class=\"filter-form\">\n <h4 class=\"filter-title\">Filter</h4>\n\n {% if form.non_field_errors %}\n <div class=\"alert alert-danger\">\n {% for message in form.non_field_errors %}\n {{ message }}\n {% endfor %}\n </div>\n {% endif %}\n\n <div class=\"filter-section\">\n <h5 class=\"section-title\">Type:</h5>\n <ul class=\"list-unstyled\">\n {% for key, val in form.fields.order_type.choices %}\n <li><a class=\"{% if key == form.order_type.value %}active{% endif %} type-item\" href=\"#\" data-key=\"{{ key }}\">{{ val }}</a></li>\n {% endfor %}\n </ul>\n <input type=\"hidden\" name=\"order_type\" value=\"{{ form.order_type.value | default:''}}\">\n </div>\n\n <div class=\"filter-section\">\n <h5 class=\"section-title\">Date From:</h5>\n <input type=\"text\" class=\"form-control datepicker\" placeholder=\"Enter date\" name=\"date_from\" value=\"{{ form.date_from.value }}\">\n </div>\n\n <div class=\"filter-section\">\n <h5 class=\"section-title\">Date To:</h5>\n <input type=\"text\" class=\"form-control datepicker\" placeholder=\"Enter date\" name=\"date_to\" value=\"{{ form.date_to.value }}\">\n </div>\n\n <button type=\"submit\" class=\"btn btn-sm btn-green pull-right\">Update</button>\n </form>\n </div>\n </div>\n </div>\n\n </div>\n{% endblock %}\n\n{% block scripts %}\n <script src=\"{% static 'js/pages/history.js'%}\"></script>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.5504051446914673,
"alphanum_fraction": 0.5514507293701172,
"avg_line_length": 34.532508850097656,
"blob_id": "6af664c52d948e068ecaa9d6dfc525f3b6e74ec4",
"content_id": "def2c45eadccd96bd95520f6f4efbef8a93dc784",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11477,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 323,
"path": "/apps/orders/forms.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\n\nfrom .models import Order\nfrom .models import RequestOrder\nfrom users.models import User\nfrom users.models import Account\nfrom users.models import Department\n\n\nclass SendOrderForm(forms.ModelForm):\n '''\n Used to send resources from user to another user\n '''\n username = forms.CharField(label=_('User Identifier'),\n required=True,\n widget=forms.TextInput(attrs={\n 'placeholder': _('User Identifier')\n }))\n\n field_order = ['username', 'amount', 'remarks']\n\n def __init__(self, *args, **kwargs):\n self.origin_user = kwargs.pop('origin_user', None)\n super(SendOrderForm, self).__init__(*args, **kwargs)\n self.fields['amount'].required = True\n self.receiving_user = None\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n filter_fields = ['username', 'email']\n try:\n int(username)\n filter_fields.insert(0, 'pk')\n except ValueError:\n pass\n\n for field in filter_fields:\n query = {field: username}\n self.receiving_user = User.objects.filter(**query).first()\n if self.receiving_user:\n break\n if not self.receiving_user:\n raise forms.ValidationError(\n _(\"Didn't find a user\"),\n code='invalid_username')\n\n if self.receiving_user == self.origin_user:\n raise forms.ValidationError(_(\"You can't send order \" +\n 'to yourself'),\n code='resource_to_yourself')\n return username\n\n def save(self, commit=True):\n order = super(SendOrderForm, self).save(commit=False)\n order.origin_user = self.origin_user\n order.receiving_user = self.receiving_user\n order.order_type = 2\n if commit:\n order.save()\n return order\n\n class Meta:\n model = Order\n fields = ['amount', 'remarks']\n widgets = {\n 'amount': forms.NumberInput(attrs={\n 'placeholder': _('Amount')\n }),\n 'remarks': forms.Textarea(attrs={\n 'placeholder': _('Remarks'),\n 'rows': 4,\n 'cols': 15\n })\n }\n\n\nclass RequestOrderForm(forms.ModelForm):\n username = forms.CharField(label=_('User Identifier'),\n required=True,\n widget=forms.TextInput(attrs={\n 'placeholder': _('User Identifier')\n }))\n\n def __init__(self, *args, **kwargs):\n self.receiving_user = kwargs.pop('receiving_user', None)\n self.requested_by = kwargs.pop('requested_by', None)\n super(RequestOrderForm, self).__init__(*args, **kwargs)\n self.fields['amount'].required = True\n self.origin_user = None\n\n class Meta:\n model = RequestOrder\n fields = ['username', 'amount']\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n filter_fields = ['username', 'email']\n try:\n int(username)\n filter_fields.insert(0, 'pk')\n except ValueError:\n pass\n\n for field in filter_fields:\n query = {field: username}\n self.origin_user = User.objects.filter(**query).first()\n if self.origin_user:\n break\n if not self.origin_user:\n raise forms.ValidationError(\n _(\"Didn't find a user\"),\n code='invalid_username')\n\n if self.origin_user == self.receiving_user:\n raise forms.ValidationError(_(\"You can't request order \" +\n 'from yourself'),\n code='request_from_yourself')\n return username\n\n def save(self, commit=True):\n request_order = super(RequestOrderForm, self).save(commit=False)\n request_order.origin_user = self.origin_user\n request_order.receiving_user = self.receiving_user\n request_order.requested_by = self.requested_by\n if commit:\n request_order.save()\n return request_order\n\n\nclass ApproveRequestOrderForm(forms.Form):\n request_order = forms.ModelChoiceField(queryset=RequestOrder.objects.all())\n\n def save(self):\n request_order = self.cleaned_data['request_order']\n request_order.approve()\n\n\nclass RejectRequestOrderForm(ApproveRequestOrderForm):\n def save(self):\n request_order = self.cleaned_data['request_order']\n request_order.reject()\n\n\nclass HistoryForm(forms.Form):\n \"\"\"\n Form for transaction history page\n \"\"\"\n TYPE_CHOICES = (('send', 'Send'),\n ('request', 'Received'),\n ('deposit', 'Deposit'),\n ('withdraw', 'Withdraw'),)\n\n date_from = forms.DateField(\n required=True,\n widget=forms.TextInput(attrs={'class': 'form-control datepicker'}))\n date_to = forms.DateField(\n required=True,\n widget=forms.TextInput(attrs={'class': 'form-control datepicker'}))\n order_type = forms.ChoiceField(choices=TYPE_CHOICES, required=False)\n\n def clean(self):\n \"\"\"\n Validates date order\n \"\"\"\n self.cleaned_data = super(HistoryForm, self).clean()\n if ('date_from' in self.cleaned_data and\n 'date_to' in self.cleaned_data and\n self.cleaned_data['date_from'] > self.cleaned_data['date_to']):\n raise forms.ValidationError(\n _(\"'Date from' must be earlier than 'Date to'.\"))\n return self.cleaned_data\n\n\nclass NewAccountForm(forms.ModelForm):\n \"\"\"\n New account creation form\n \"\"\"\n name = forms.CharField(\n required=True,\n widget=forms.TextInput(attrs={'placeholder': _(\"Name of account\")}))\n number = forms.IntegerField(\n required=True,\n widget=forms.NumberInput(attrs={'placeholder': _(\"Account number\")}))\n department = forms.ModelChoiceField(queryset=Department.objects.all(),\n empty_label=_(\"Select department\"),\n required=True)\n\n def __init__(self, *args, **kwargs):\n self.owner = kwargs.pop('owner', None)\n super(NewAccountForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n if self.cleaned_data.get(\"is_master\", False):\n try:\n department = self.cleaned_data.get(\"department\")\n if department and Account.objects.filter(\n account_department__id=department.pk,\n is_master=True).exists():\n raise forms.ValidationError(\n _(\"Only one master account per department allowed\"))\n except Account.DoesNotExist:\n pass\n\n def save(self, commit=True):\n account = super(NewAccountForm, self).save(commit=False)\n account.owner = self.owner\n account.account_department = self.cleaned_data['department']\n account.number = self.cleaned_data['number']\n account.name = self.cleaned_data['name']\n account.registered_at = timezone.now()\n\n # only departments allowed to create master-accounts\n if self.owner.is_department:\n account.is_master = self.cleaned_data.get('is_master', False)\n else:\n account.is_master = False\n\n if commit:\n account.save()\n return account\n\n class Meta:\n model = Account\n fields = ['name', 'number', 'is_master']\n\n\nclass TransferForm(forms.ModelForm):\n \"\"\"\n Transfer in/out creation form\n \"\"\"\n order_type = forms.ChoiceField(choices=((1, _('In')), (4, _('Out'))),\n widget=forms.Select(),\n label=_(\"Select type\"),\n required=True)\n origin_account = forms.ModelChoiceField(\n empty_label=_(\"Select origin account\"),\n queryset=Account.objects.all(),\n required=False)\n receiving_account = forms.ModelChoiceField(\n empty_label=_(\"Select receiving account\"),\n queryset=Account.objects.all(),\n required=False)\n\n field_order = ['order_type', 'amount', 'origin_account',\n 'receiving_account', 'remarks']\n\n def __init__(self, *args, **kwargs):\n self.order_type = kwargs.pop('order_type', 1)\n\n super(TransferForm, self).__init__(*args, **kwargs)\n\n self.fields['amount'].required = True\n self.fields['order_type'].required = True\n\n def clean(self):\n cleaned_data = super(TransferForm, self).clean()\n try:\n order_type = cleaned_data.get('order_type')\n receiving_account = cleaned_data.get('receiving_account')\n origin_account = cleaned_data.get('origin_account')\n\n if not origin_account:\n raise ValidationError(\n _('Origin account is required.'))\n\n if not receiving_account:\n raise ValidationError(\n _('Receiving account is required.'))\n\n self.origin_user = origin_account.owner\n self.receiving_user = receiving_account.owner\n\n # if order_type and int(order_type) == Order.TYPE_A:\n # master_account = Account.objects.get(\n # account_department__id=origin_account.account_department.id,\n # is_master=True)\n # self.cleaned_data['receiving_account'] = master_account\n #\n # if order_type and int(order_type) == Order.TYPE_D:\n # master_account = Account.objects.get(\n # account_department__id=receiving_account.account_department.id,\n # is_master=True)\n # self.cleaned_data['origin_account'] = master_account\n\n except Account.DoesNotExist:\n raise ValidationError(\n _('No corresponding master account.'))\n except Account.MultipleObjectsReturned:\n raise ValidationError(\n _('Multiple corresponding master accounts found. ' +\n 'Please contact admin to solve this problem'))\n\n def save(self, commit=True):\n order = super(TransferForm, self).save(commit=False)\n order.origin_user = self.origin_user\n order.receiving_user = self.receiving_user\n order.order_type = self.cleaned_data['order_type']\n if commit:\n kwargs = {\n 'origin_account': self.cleaned_data['origin_account'],\n 'receiving_account': self.cleaned_data['receiving_account']\n }\n order.save(**kwargs)\n return order\n\n class Meta:\n model = Order\n fields = ['amount', 'remarks']\n widgets = {\n 'amount': forms.NumberInput(attrs={\n 'placeholder': _('Amount')\n }),\n 'remarks': forms.Textarea(attrs={\n 'placeholder': _('Remarks'),\n 'rows': 4,\n 'cols': 15\n })\n }\n"
},
{
"alpha_fraction": 0.48875856399536133,
"alphanum_fraction": 0.4913978576660156,
"avg_line_length": 30.28440284729004,
"blob_id": "b87a16effe25cb78478e7535d26ba9306528e865",
"content_id": "6f01f140ef397e72109c2b3974f3e36271ab4c82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10230,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 327,
"path": "/apps/users/tests/test_views.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom users.models import User\nfrom users.forms import UserProfileForm\n\nclass UsersViewsTest(TestCase):\n \"\"\"\n tests for users:views\n including forms and models\n \"\"\"\n def test_login(self):\n \"\"\"\n get, post and validation error test\n \"\"\"\n #creating test user\n user = User.objects.create_user('temporary', '[email protected]', 'temporary')\n user.is_approved = True\n user.is_active = True\n user.save()\n # 200\n response = self.client.get(reverse('users:login'))\n self.assertEqual(response.status_code, 200)\n\n # form errors\n response = self.client.post(reverse('users:login'),\n {\n 'username': 'temporary'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'password',\n errors = ['This field is required.']\n )\n response = self.client.post(reverse('users:login'),\n {\n 'username': 'temporary',\n 'password': 'temporary1'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'password',\n errors = ['Invalid password.']\n )\n response = self.client.post(reverse('users:login'),\n {\n 'password': 'temporary'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'username',\n errors = ['This field is required.']\n )\n\n response = self.client.post(reverse('users:login'),{\n 'username': 'temporary',\n 'password': 'temporary'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [\"User is not validated.\"]\n )\n user = User.objects.get(username='temporary')\n user.is_validated = True\n user.is_approved = True\n user.is_active = True\n user.save()\n response = self.client.post(reverse('users:login'),{\n 'username': 'temporary',\n 'password': 'temporary'\n }\n )\n self.assertEqual(response.status_code, 302)\n\n # deleting test user\n user = User.objects.get(username='temporary')\n user.delete()\n\n def test_register(self):\n \"\"\"\n get, post and validation error test\n \"\"\"\n # 200\n response = self.client.get(reverse('users:register'))\n self.assertEqual(response.status_code, 200)\n\n # post\n response = self.client.post(reverse('users:register'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'password': '123123',\n 'password_again': '123123'\n }\n )\n # log in and redirect\n user = User.objects.get(username='temporary')\n self.assertEqual(user.username, 'temporary')\n self.assertRedirects(\n response = response,\n expected_url = reverse('users:code_sent')\n )\n\n response = self.client.post(reverse('users:register'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'password': '123123',\n 'password_again': '1231231'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [\"Passwords don't match. Please enter both fields again.\"]\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'username',\n errors = [\"A user with that username already exists.\"]\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'email',\n errors = [\"User with such email already exists.\"]\n )\n\n response = self.client.post(reverse('users:register'),{\n 'username': '',\n 'email': '',\n 'password': '',\n 'password_again': ''\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'username',\n errors = ['This field is required.']\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'email',\n errors = ['This field is required.']\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'password',\n errors = ['This field is required.']\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'password_again',\n errors = ['This field is required.']\n )\n\n user = User.objects.get(username='temporary')\n self.assertNotEqual(user.validation_code, None)\n\n # deleting test user\n user.delete()\n\n def test_pofile(self):\n \"\"\"\n get, post and validation error test\n \"\"\"\n error_messages = UserProfileForm.error_messages\n #creating test user\n user = User.objects.create_user('temporary', '[email protected]', 'temporary')\n user.is_validated = True\n user.is_approved = True\n user.is_active = True\n user.save()\n # login required\n response = self.client.get(reverse('users:profile'))\n self.assertRedirects(\n response = response,\n expected_url = reverse('users:login')+\n \"?next=\"+\n reverse('users:profile')\n )\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('users:profile'))\n self.assertEqual(response.status_code, 200)\n\n response = self.client.post(reverse('users:profile'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'first_name': 'test',\n 'last_name': 'user',\n 'new_password': '123123'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [error_messages[\"old_password_required\"]]\n )\n response = self.client.post(reverse('users:profile'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'first_name': 'test',\n 'last_name': 'user',\n 'new_password_again': '123123'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [error_messages[\"old_password_required\"]]\n )\n response = self.client.post(reverse('users:profile'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'first_name': 'test',\n 'last_name': 'user',\n 'old_password': 'temporary',\n 'new_password': '123123'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [error_messages[\"new_password_repeat\"]]\n )\n response = self.client.post(reverse('users:profile'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'first_name': 'test',\n 'last_name': 'user',\n 'old_password': 'temporary',\n 'new_password_again': '123123'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [error_messages[\"new_password_repeat\"]]\n )\n response = self.client.post(reverse('users:profile'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'first_name': 'test',\n 'last_name': 'user',\n 'old_password': 'temporary',\n 'new_password': '123asd',\n 'new_password_again': '123123'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [error_messages[\"password_mismatch\"]]\n )\n\n user = User.objects.create_user('temporary2', '[email protected]', 'temporary2')\n user.is_validated = True\n user.is_approved = True\n user.is_active = True\n user.save()\n response = self.client.post(reverse('users:profile'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'first_name': 'test',\n 'last_name': 'user',\n 'old_password': 'temporary12',\n 'new_password': '123asd',\n 'new_password_again': '123123'\n }\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = None,\n errors = [error_messages[\"password_incorrect\"]]\n )\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'email',\n errors = [error_messages[\"email_in_use\"]]\n )\n response = self.client.post(reverse('users:profile'),{\n 'username': 'temporary',\n 'email': '[email protected]',\n 'first_name': 'test',\n 'last_name': 'user',\n 'old_password': 'temporary',\n 'new_password': '123123',\n 'new_password_again': '123123'\n }\n )\n\n user = User.objects.get(username='temporary')\n self.assertEqual(user.first_name, 'test')\n self.assertEqual(user.last_name, 'user')\n\n # password change\n self.client.login(username='temporary', password='123123')\n response = self.client.get(reverse('users:profile'))\n self.assertEqual(response.status_code, 200)\n\n # deleting test users\n user.delete()\n user = User.objects.get(username='temporary2')\n user.delete()\n"
},
{
"alpha_fraction": 0.5505050420761108,
"alphanum_fraction": 0.6010100841522217,
"avg_line_length": 19.842105865478516,
"blob_id": "b290b363609c93fee0ab8e06d956cbb3f8808c20",
"content_id": "9218e93aa7e91fb027e319433a3e5a78f611b231",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 19,
"path": "/apps/orders/migrations/0005_auto_20160211_1218.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-11 12:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0004_ordersorders'),\n ]\n\n operations = [\n migrations.AlterModelTable(\n name='ordersorders',\n table='ordersorders',\n ),\n ]\n"
},
{
"alpha_fraction": 0.535617470741272,
"alphanum_fraction": 0.537096381187439,
"avg_line_length": 36.564815521240234,
"blob_id": "04fe70db2202f87dd24e0cf2534aa0f136d86b12",
"content_id": "ab62d919d72f617fb17207f7446c200b7bad1e70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 4057,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 108,
"path": "/templates/users/register.html",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "{% extends \"users/_base.html\" %}\n\n{% load staticfiles %}\n{% load i18n %}\n\n{% block title %}Register{% endblock %}\n{% block bodyclass %}page-register{% endblock %}\n\n{% block content %}\n <div class=\"auth-box\">\n <div class=\"logo\">\n <img src=\"{% static \"img/logo.png\" %}\" alt=\"logo\">\n </div>\n\n <form method=\"post\" action=\"{% url \"users:register\" %}\">\n {% csrf_token %}\n\n <h3 class=\"form-title\">Register your account</h3>\n\n {% if form.non_field_errors %}\n <div class=\"alert alert-danger\">\n {% for message in form.non_field_errors %}\n {{ message }}\n {% endfor %}\n </div>\n {% endif %}\n\n <div class=\"form-group {% if form.username.errors %}has-error{% endif %}\">\n <label class=\"sr-only\" class=\"control-label\">Username</label>\n <input autofocus type=\"text\" class=\"form-control\" placeholder=\"Username\" name=\"username\" value=\"{{ form.username.value | default:'' }}\">\n <p class='help-block'>\n {% for message in form.username.errors %}\n {{ message }}\n {% endfor %}\n </p>\n </div>\n\n <div class=\"row\">\n <div class=\"col-sm-6\">\n <div class=\"form-group {% if form.first_name.errors %}has-error{% endif %}\">\n <label class=\"sr-only\" class=\"control-label\">First Name</label>\n <input type=\"text\" class=\"form-control\" placeholder=\"First Name\" name=\"first_name\" value=\"{{ form.first_name.value | default:'' }}\">\n <p class='help-block'>\n {% for message in form.first_name.errors %}\n {{ message }}\n {% endfor %}\n </p>\n </div>\n </div>\n\n <div class=\"col-sm-6\">\n <div class=\"form-group {% if form.last_name.errors %}has-error{% endif %}\">\n <label class=\"sr-only\" class=\"control-label\">Last Name</label>\n <input type=\"text\" class=\"form-control\" placeholder=\"Last Name\" name=\"last_name\" value=\"{{ form.last_name.value | default:'' }}\">\n <p class='help-block'>\n {% for message in form.last_name.errors %}\n {{ message }}\n {% endfor %}\n </p>\n </div>\n </div>\n </div>\n\n <div class=\"form-group {% if form.email.errors %}has-error{% endif %}\">\n <label class=\"sr-only\" class=\"control-label\">Email</label>\n <input type=\"email\" class=\"form-control\" placeholder=\"Email\" name=\"email\" value=\"{{ form.email.value | default:'' }}\">\n <p class='help-block'>\n {% for message in form.email.errors %}\n {{ message }}\n {% endfor %}\n </p>\n </div>\n\n <div class=\"form-group {% if form.password.errors %}has-error{% endif %}\">\n <label class=\"sr-only\" class=\"control-label\">Password</label>\n <input type=\"password\" class=\"form-control\" placeholder=\"Password\" name=\"password\" value=\"{{ form.password.value | default:'' }}\">\n <p class='help-block'>\n {% for message in form.password.errors %}\n {{ message }}\n {% endfor %}\n </p>\n </div>\n\n <div class=\"form-group {% if form.password_again.errors %}has-error{% endif %}\">\n <label class=\"sr-only\" class=\"control-label\">Confirm Password</label>\n <input type=\"password\" class=\"form-control\" placeholder=\"Confirm Password\" name=\"password_again\" value=\"{{ form.password_again.value | default:'' }}\">\n <p class='help-block'>\n {% for message in form.password_again.errors %}\n {{ message }}\n {% endfor %}\n </p>\n </div>\n\n <div class=\"form-action\">\n <button class=\"btn btn-primary btn-block\">Register</button>\n </div>\n\n <span class=\"or-divider\">Or</span>\n\n <a class=\"btn btn-block btn-primary btn-facebook\" href=\"{% url 'social:begin' 'facebook' %}\">\n <span class=\"fb-icon\"></span>\n Register with Facebook\n </a>\n\n <p class=\"footer-link\">Already have an account? <a href=\"{% url 'users:login' %}\">Login</a> now</p>\n </form>\n </div>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.658823549747467,
"alphanum_fraction": 0.658823549747467,
"avg_line_length": 14.9375,
"blob_id": "6cb9c0abda2c26affc02f0bcd00f984654bba869",
"content_id": "cf49c70ee52601ed37eb3b067ce4dd61462528f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 16,
"path": "/osem/settings/production.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from ._base import *\nimport os\n\n\nDEBUG = False\n\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('DSN'),\n}\n\nINSTALLED_APPS += (\n 'raven.contrib.django.raven_compat',\n)\n\nBROKER_URL = os.environ.get('CLOUDAMQP_URL')\nSITE_DOMAIN = os.environ.get('SITE_DOMAIN')\n"
},
{
"alpha_fraction": 0.5262054800987244,
"alphanum_fraction": 0.6016771197319031,
"avg_line_length": 22.850000381469727,
"blob_id": "3ac535a05d93b05db0caf3beb84d74843760f8c9",
"content_id": "de2cddb40eabfd27c6e957612164ecd1d07fe7a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 20,
"path": "/apps/users/migrations/0010_user_balance.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-03-09 16:53\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0009_auto_20160303_2344'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='balance',\n field=models.DecimalField(decimal_places=1, default=0, max_digits=12),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5852272510528564,
"alphanum_fraction": 0.623106062412262,
"avg_line_length": 25.399999618530273,
"blob_id": "e016d366ab7d68b3c35b2b5eb7746dcee32b802a",
"content_id": "c1cc6aa432ba2ce25b4e5d7ced565e08e6340834",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 20,
"path": "/apps/users/migrations/0016_user_is_approved.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-05-03 12:35\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0015_merge'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='is_approved',\n field=models.BooleanField(default=False, help_text='Designates whether this approved by administrator.', verbose_name='approved'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5452755689620972,
"alphanum_fraction": 0.6122047305107117,
"avg_line_length": 24.399999618530273,
"blob_id": "878caa9af835cf4647bd992c0fbc855ddd568e7f",
"content_id": "1cd0f92fac0140cad78f61aee551aa8ac2c41cab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 20,
"path": "/apps/users/migrations/0006_user_validation_code.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-18 09:07\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0005_auto_20160211_1154'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='validation_code',\n field=models.CharField(blank=True, max_length=32, null=True, verbose_name='Validation code'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6370786428451538,
"alphanum_fraction": 0.6567415595054626,
"avg_line_length": 51.35293960571289,
"blob_id": "4a75541a96779c463bd2c437254dc391e3572dbd",
"content_id": "b8419b1035b136b5285ebdfa7904bb486d95ebc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1780,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 34,
"path": "/apps/orders/migrations/0016_requestorder.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-12-30 11:00\nfrom __future__ import unicode_literals\n\nimport caching.base\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('orders', '0015_auto_20160712_1252'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RequestOrder',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('amount', models.DecimalField(decimal_places=2, max_digits=12, null=True)),\n ('approved_at', models.DateTimeField(blank=True, null=True)),\n ('notes', models.TextField(blank=True, null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Creation date')),\n ('approved_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ('origin_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='request_origin_user', to=settings.AUTH_USER_MODEL)),\n ('receiving_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='request_receiving_user', to=settings.AUTH_USER_MODEL)),\n ('requested_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='request_requested_user', to=settings.AUTH_USER_MODEL)),\n ],\n bases=(caching.base.CachingMixin, models.Model),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6128779649734497,
"alphanum_fraction": 0.613540530204773,
"avg_line_length": 33.878150939941406,
"blob_id": "15ca315043a454f472264be31f104409eefb893b",
"content_id": "5a7aeca803bf3213321b19720197efe808d4657c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16602,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 476,
"path": "/apps/orders/views.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom django.http import HttpResponseRedirect\nfrom django.http import JsonResponse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import resolve\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.views.generic import View\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import FormView\nfrom django.views.generic.detail import DetailView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.db.models import Q\n\nfrom users.models import User\nfrom users.models import Account\nfrom users.forms import ChangePasswordForm\n\nfrom .models import Transaction\nfrom .models import Order\nfrom .models import RequestOrder\nfrom .forms import SendOrderForm\nfrom .forms import RequestOrderForm\nfrom .forms import ApproveRequestOrderForm, RejectRequestOrderForm\nfrom .forms import HistoryForm\nfrom .forms import NewAccountForm\nfrom .forms import TransferForm\n\n\nclass IsApprovedMixin(UserPassesTestMixin):\n def test_func(self):\n return (self.request.user.is_validated and self.request.user.is_active\n and self.request.user.is_approved)\n\n def handle_no_permission(self):\n if self.request.user.is_anonymous:\n return super(UserPassesTestMixin, self).handle_no_permission()\n return HttpResponseRedirect(reverse('not_approved'))\n\n\nclass Home(LoginRequiredMixin, TemplateView):\n \"\"\"\n View for home page\n \"\"\"\n template_name = \"orders/home/home.html\"\n\n\nclass History(LoginRequiredMixin, ListView):\n \"\"\"\n View for transaction history\n \"\"\"\n template_name = \"orders/history/history.html\"\n\n def get_queryset(self):\n form = self.get_form()\n if form.is_valid():\n order_type = form['order_type'].value()\n date_from = datetime.datetime.strptime(form['date_from'].value(),\n \"%m/%d/%Y\")\n date_to = datetime.datetime.strptime(\n form['date_to'].value(), \"%m/%d/%Y\") + \\\n datetime.timedelta(days=1)\n\n qs = Order.objects.filter(\n created_at__range=(date_from, date_to)).filter(\n Q(origin_user__id=self.request.user.id) |\n Q(receiving_user__id=self.request.user.id)).order_by('-id')\n\n if order_type == 'send':\n qs = qs.filter(order_type=2, origin_user__id=self.request.user.id)\n elif order_type == 'request':\n qs = qs.filter(order_type=2, receiving_user__id=self.request.user.id)\n elif order_type == 'deposit':\n qs = qs.filter(order_type=1)\n elif order_type == 'withdraw':\n qs = qs.filter(order_type=4)\n\n return qs\n\n today = datetime.date.today() + datetime.timedelta(days=1)\n month_ago = (datetime.date.today() - datetime.timedelta(days=30))\n return Order.objects.filter(\n created_at__range=[month_ago, today]).filter(\n Q(origin_user__id=self.request.user.id) |\n Q(receiving_user__id=self.request.user.id)).order_by('-id')\n\n def get_form(self):\n today = (datetime.date.today() + datetime.timedelta(days=1)).strftime('%m/%d/%Y')\n month_ago = (datetime.date.today() - datetime.timedelta(days=30)).strftime('%m/%d/%Y')\n\n form_data = {\n 'date_from': self.request.GET.get('date_from', month_ago),\n 'date_to': self.request.GET.get('date_to', today),\n 'order_type': self.request.GET.get('order_type', None)\n }\n\n return HistoryForm(data=form_data)\n\n def get(self, request, *args, **kwargs):\n orders = self.get_queryset()\n return render(request, self.template_name, {'form': self.get_form(),\n 'orders': orders})\n\n\nclass OrderDetail(LoginRequiredMixin, IsApprovedMixin, DetailView):\n model = Order\n template_name = 'orders/order/detail.html'\n\n\nclass Send(LoginRequiredMixin, IsApprovedMixin, TemplateView):\n \"\"\"\n View for send new order page\n \"\"\"\n template_name = 'orders/send/send.html'\n form_class = SendOrderForm\n\n def get_form(self):\n return self.form_class(data=self.request.GET or None)\n\n def get(self, request):\n dest_username = self.request.GET.get('dest', '')\n destination = None\n try:\n destination = User.objects.get(username=dest_username)\n except User.DoesNotExist:\n pass\n\n return self.render_to_response({\n 'form': self.form_class(data=self.request.GET or None),\n 'destination': destination\n })\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(origin_user=request.user, data=request.POST)\n if form.is_valid():\n order = form.save()\n if order and order.completed_at:\n return HttpResponseRedirect(reverse('orders:send_success'))\n else:\n return HttpResponseRedirect(reverse('orders:send_fail'))\n return self.render_to_response({'form': form})\n\n\nclass Request(LoginRequiredMixin, IsApprovedMixin, FormView):\n \"\"\"\n View for request form\n \"\"\"\n template_name = 'orders/request/request.html'\n form_class = RequestOrderForm\n\n def get_success_url(self):\n return reverse('orders:request')\n\n def get_form_kwargs(self):\n form_kwargs = super(Request, self).get_form_kwargs()\n form_kwargs['receiving_user'] = self.request.user\n form_kwargs['requested_by'] = self.request.user\n return form_kwargs\n\n def form_valid(self, form):\n form.save()\n msg = _(\"Your request has been sent.\")\n messages.info(self.request, msg, extra_tags='request')\n return super(Request, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(Request, self).get_context_data(**kwargs)\n\n my_requests = RequestOrder.objects.filter(receiving_user=self.request.user)\n context['my_requests'] = my_requests\n\n requested_to_me = RequestOrder.objects.filter(origin_user=self.request.user)\n context['requested_to_me'] = requested_to_me\n return context\n\n\nclass RequestDetail(LoginRequiredMixin, IsApprovedMixin, DetailView):\n model = RequestOrder\n template_name = 'orders/request/detail.html'\n\nclass RequestApprove(RequestDetail):\n template_name = 'orders/request/approve.html'\n action_form_class = ApproveRequestOrderForm\n action_message = 'Request has been approved'\n\n def get_context_data(self, **kwargs):\n context = super(RequestApprove, self).get_context_data(**kwargs)\n context['form'] = self.action_form_class()\n return context\n\n def post(self, *args, **kwargs):\n form = self.action_form_class(self.request.POST)\n if form.is_valid():\n form.save()\n messages.info(self.request, self.action_message, extra_tags='request')\n return HttpResponseRedirect(reverse('orders:request'))\n else:\n return self.get(args, kwargs)\n\n\nclass RequestReject(RequestApprove):\n template_name = 'orders/request/reject.html'\n action_form_class = RejectRequestOrderForm\n action_message = 'Request has been rejected'\n\n\nclass Transfer(LoginRequiredMixin, IsApprovedMixin, TemplateView):\n \"\"\"\n View for Transfer in/out page\n \"\"\"\n template_name = \"orders/transfer/transfer.html\"\n form_class = TransferForm\n in_msg = _(\"Deposit successful. Balance update may take time: it is updated upon administrator approval.\")\n out_msg = _(\"Withdraw successful. Balance update may take time: it is updated upon administrator approval.\")\n\n def get_form(self):\n return self.form_class(origin_user=self.request.user,\n data=self.request.POST)\n\n def get_context_data(self, **kwargs):\n form = self.get_form()\n kwargs.update({\n 'form': form\n })\n return super(Transfer, self).get_context_data(**kwargs)\n\n def get(self, request, success=False):\n accounts = Account.objects.filter(owner=request.user, is_validated=True, is_master=False)\n\n return self.render_to_response({\n 'master_account': self.request.user.master_account,\n 'accounts': accounts,\n 'form': self.form_class()\n })\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(order_type=int(request.POST.get('order_type')),\n data=request.POST)\n if form.is_valid():\n order = form.save()\n if not order.transaction:\n messages.warning(request, 'You have insufficient balance to do this transaction', extra_tags='transfer')\n else:\n if form.order_type == Order.TYPE_A:\n msg = self.in_msg\n if form.order_type == Order.TYPE_D:\n msg = self.out_msg\n messages.info(request, msg, extra_tags='transfer')\n return HttpResponseRedirect(request.path)\n\n return self.render_to_response({'form': form})\n\n\nclass Deposit(Transfer):\n template_name = \"orders/deposit/deposit.html\"\n\n\nclass Withdraw(Transfer):\n template_name = \"orders/withdraw/withdraw.html\"\n\n\nclass NewAccount(LoginRequiredMixin, IsApprovedMixin, TemplateView):\n \"\"\"\n View for new account creation\n \"\"\"\n template_name = \"orders/settings/new_account.html\"\n form_class = NewAccountForm\n model = Account\n\n def get_form(self):\n return self.form_class(owner=self.request.user,\n data=self.request.POST or None)\n\n def get_context_data(self, **kwargs):\n kwargs.update({'form': self.get_form()})\n return super(NewAccount, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n form.save()\n msg = _(\"Account created. It must be validated by administrator.\")\n messages.info(request, msg, extra_tags='new_account')\n return HttpResponseRedirect(reverse('orders:new_account'))\n return self.render_to_response({'form': form})\n\n\nclass SearchUser(LoginRequiredMixin, TemplateView):\n template_name = \"orders/search/search.html\"\n\n\nclass Settings(LoginRequiredMixin, TemplateView):\n \"\"\"View for user settings page\"\"\"\n template_name = \"orders/settings/settings.html\"\n\n def get_context_data(self, **kwargs):\n accounts = Account.objects.filter(owner=self.request.user)\n kwargs.update({\n 'accounts': accounts,\n 'change_password_form': ChangePasswordForm()\n })\n return super(Settings, self).get_context_data(**kwargs)\n\n\nclass NotificationSettings(LoginRequiredMixin, TemplateView):\n \"\"\"\n View for user notification settings page\n \"\"\"\n template_name = \"orders/settings/settings.html\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Handling links from settings page\n \"\"\"\n current_url = resolve(request.path_info).url_name\n if current_url == 'settings_email_notifications':\n self.email_notifications()\n elif current_url == 'settings_transfer_requested':\n self.transfer_requested()\n elif current_url == 'settings_transfer_completed':\n self.transfer_completed()\n elif current_url == 'settings_send_requested':\n self.send_requested()\n elif current_url == 'settings_received_resource':\n self.received_resource()\n\n user = self.request.user\n return JsonResponse({\n 'email_notifications_enabled': user.email_notifications_enabled,\n 'email_on_transfer_completed': user.email_on_transfer_completed,\n 'email_on_transfer_requested': user.email_on_transfer_requested,\n 'email_on_resource_sent': user.email_on_resource_sent,\n 'email_on_resource_received': user.email_on_resource_received\n }, safe=False)\n\n def email_notifications(self):\n \"\"\"Toggle user.email_notifications_enabled\"\"\"\n user = self.request.user\n change = not self.request.user.email_notifications_enabled\n user.email_on_transfer_requested = change\n user.email_on_transfer_completed = change\n user.email_on_resource_sent = change\n user.email_on_resource_received = change\n user.save()\n\n def transfer_requested(self):\n user = self.request.user\n user.email_on_transfer_requested = not \\\n self.request.user.email_on_transfer_requested\n user.save()\n\n def transfer_completed(self):\n user = self.request.user\n user.email_on_transfer_completed = not \\\n self.request.user.email_on_transfer_completed\n user.save()\n\n def send_requested(self):\n user = self.request.user\n user.email_on_resource_sent = not \\\n self.request.user.email_on_resource_sent\n user.save()\n\n def received_resource(self):\n user = self.request.user\n user.email_on_resource_received = not \\\n self.request.user.email_on_resource_received\n user.save()\n\n\nclass UsersAutocomplete(LoginRequiredMixin, View):\n \"\"\"\n Endpoint for user autocomplete\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n q = request.GET.get('term')\n if q:\n result_list = User.objects.filter(\n Q(is_validated=True, is_active=True) &\n (Q(first_name__icontains=q) |\n Q(last_name__icontains=q) |\n Q(phone__icontains=q) |\n Q(username__icontains=q) |\n Q(email__icontains=q)))\n\n return JsonResponse(\n [{\n 'value': user.username,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'full_name': user.get_full_name(),\n 'image': (user.thumbnail.url if user.thumbnail else \"\")\n } for user in result_list],\n safe=False)\n\n\nclass AccountsAutocomplete(LoginRequiredMixin, View):\n \"\"\"\n Endpoint for account autocomplete\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n q = request.GET.get('term')\n if q:\n result_list = Account.objects.filter(owner=request.user, name__icontains=q)\n\n return JsonResponse(\n [{\n 'id': account.id,\n 'name': account.name,\n 'is_master': account.is_master,\n 'is_validated': account.is_validated,\n 'username': account.owner.username,\n 'department_id': account.account_department.id,\n 'department_name': account.account_department.name\n } for account in result_list],\n safe=False)\n\n\nclass AccountDetail(LoginRequiredMixin, View):\n \"\"\"\n Endpoint for getting account by id\n \"\"\"\n\n def send_response(self, account):\n return JsonResponse({\n 'id': account.id,\n 'image': self.request.user.thumbnail.url,\n 'name': account.name,\n 'is_master': account.is_master,\n 'is_validated': account.is_validated,\n 'username': account.owner.username,\n 'department_id': account.account_department.id,\n 'department_name': account.account_department.name\n })\n\n def get(self, request, *args, **kwargs):\n account_id = request.GET.get('id')\n\n if account_id:\n try:\n account = Account.objects.get(id=account_id)\n return self.send_response(account)\n\n except Account.DoesNotExist:\n pass\n\n return JsonResponse({}, safe=False)\n\n\n\nclass AccountMaster(AccountDetail):\n \"\"\"\n Endpoint for getting master account\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n department_id = request.GET.get('department')\n\n if department_id:\n try:\n account = Account.objects.get(\n account_department__id=department_id,\n is_master=True\n )\n return self.send_response(account)\n\n except Account.DoesNotExist:\n pass\n\n return JsonResponse({}, safe=False)\n"
},
{
"alpha_fraction": 0.6897274851799011,
"alphanum_fraction": 0.6897274851799011,
"avg_line_length": 40.4782600402832,
"blob_id": "ee0a553fcd7a125d920b12bf60565804f8096394",
"content_id": "15b3af5500dcc8aace3029bb8c2b371a1641299f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 954,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 23,
"path": "/apps/osem_admin/urls.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.views.generic import TemplateView\n\nfrom .views import Users\nfrom .views import Accounts\nfrom .views import Orders\nfrom .views import OrdersDownload\nfrom .views import Transactions\nfrom .views import TransactionsDownload\n\n\napp_name = 'osem_admin'\nurlpatterns = [\n url(r'^transactions/$', Transactions.as_view(), name='home'),\n url(r'^users/$', Users.as_view(), name='users'),\n url(r'^accounts/$', Accounts.as_view(), name='accounts'),\n url(r'^orders/$', Orders.as_view(), name='orders'),\n url(r'^orders/(?P<pk>\\d+)/$', Orders.as_view(), name='order'),\n url(r'^orders/download/$', OrdersDownload.as_view(), name='orders_download'),\n url(r'^transactions/$', Transactions.as_view(), name='transactions'),\n url(r'^transactions/(?P<pk>\\d+)/$', Transactions.as_view(), name='transaction'),\n url(r'^transactions/download/$', TransactionsDownload.as_view(), name='transactions_download'),\n]\n"
},
{
"alpha_fraction": 0.5433333516120911,
"alphanum_fraction": 0.5774999856948853,
"avg_line_length": 33.28571319580078,
"blob_id": "1e5622f6ae2abb3829533aa6cf41c6f6cb4757d3",
"content_id": "e19b4a8a2055086f389649eabc8b2077c462f46a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1200,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 35,
"path": "/apps/orders/migrations/0015_auto_20160712_1252.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-07-12 12:52\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0014_auto_20160328_0821'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='completed_at',\n field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name='Completed at'),\n ),\n migrations.AlterField(\n model_name='order',\n name='created_at',\n field=models.DateTimeField(auto_now_add=True, db_index=True, null=True, verbose_name='Creation date'),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='transaction_status',\n field=models.IntegerField(choices=[(1, 'Pending'), (2, 'Approved'), (3, 'Not approved')], db_index=True, null=True),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='transaction_type',\n field=models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3')], db_index=True, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5769418478012085,
"alphanum_fraction": 0.5994137525558472,
"avg_line_length": 39.939998626708984,
"blob_id": "431986f41bc08710848110072289efcfeeed062a",
"content_id": "633422ccfaf5894e30ba3c554cd5bfceb10b798d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2047,
"license_type": "no_license",
"max_line_length": 424,
"num_lines": 50,
"path": "/apps/users/migrations/0021_auto_20160712_1252.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-07-12 12:52\nfrom __future__ import unicode_literals\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0020_auto_20160609_2137'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='email',\n field=models.EmailField(blank=True, db_index=True, max_length=254, verbose_name='email address'),\n ),\n migrations.AlterField(\n model_name='user',\n name='first_name',\n field=models.CharField(blank=True, db_index=True, max_length=30, verbose_name='first name'),\n ),\n migrations.AlterField(\n model_name='user',\n name='is_approved',\n field=models.BooleanField(db_index=True, default=False, help_text='Designates whether this approved by administrator.', verbose_name='approved'),\n ),\n migrations.AlterField(\n model_name='user',\n name='last_name',\n field=models.CharField(blank=True, db_index=True, max_length=30, verbose_name='last name'),\n ),\n migrations.AlterField(\n model_name='user',\n name='phone',\n field=models.CharField(blank=True, db_index=True, max_length=255, null=True, verbose_name='Phone'),\n ),\n migrations.AlterField(\n model_name='user',\n name='username',\n field=models.CharField(db_index=True, error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),\n ),\n migrations.AlterModelTable(\n name='user',\n table=None,\n ),\n ]\n"
},
{
"alpha_fraction": 0.6101694703102112,
"alphanum_fraction": 0.6110169291496277,
"avg_line_length": 31.77777862548828,
"blob_id": "ed5051443c3847797e89869b362871ee8af92865",
"content_id": "995d1b1f0f16185cc06ad1077cf337c511bc63bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2360,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 72,
"path": "/static/js/pages/deposit-withdraw.js",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "(function () {\n var form = $('form');\n\n var valueField = transferType === 'deposit' ? $('input[name=\"origin_account\"]') : $('input[name=\"receiving_account\"]');\n var masterField = transferType === 'deposit' ? $('input[name=\"receiving_account\"]') : $('input[name=\"origin_account\"]');\n\n var resultElem = $('.account-selector .account-result');\n var selectedElem = $('.account-selector .selected');\n var submitBtn = $('.btn-submit');\n var addAccount = $('.add-account');\n var masterAccount = $('.master-account');\n var results = [];\n\n function getAccountTemplate(account) {\n var template = '<img src=\"' + (account.image ? account.image : defaultImg) + '\" class=\"img-circle avatar avatar-medium\">';\n template += '<h4 class=\"name\">' + account.name + '</h4>';\n template += '<p class=\"username\">' + account.department_name + '</p>';\n\n return template;\n }\n\n function chooseAccount(account) {\n var content = getAccountTemplate(account);\n content += '<p><a href=\"#\" class=\"change-selected\">Change</a></p>';\n\n $.get(urls.getMasterAccount + '?department=' + account.department_id)\n .done(displayMasterAccount)\n\n selectedElem.html(content);\n selectedElem.show();\n resultElem.hide();\n addAccount.hide();\n valueField.val(account.id);\n }\n\n function displayMasterAccount(account) {\n if (account.id) {\n masterField.val(account.id);\n submitBtn.prop('disabled', false);\n }\n //\n // var content;\n //\n // if (account.id) {\n // content = getAccountTemplate(account);\n // submitBtn.prop('disabled', false);\n // } else {\n // content = '<p class=\"info text-red\">Master account not found</p>';\n // }\n //\n // masterAccount.html(content);\n // masterField.val(account.id);\n }\n\n $(document).on('click', '.account-result .result-item', function (e) {\n var accountId = $(this).data('id');\n\n $.get(urls.accountDetail + '?id=' + accountId).done(chooseAccount);\n });\n\n $(document).on('click', '.selected .change-selected', function (e) {\n e.preventDefault();\n\n selectedElem.hide();\n resultElem.show();\n addAccount.show();\n // masterAccount.html('<p class=\"info\">Please select ' + (transferType === 'deposit' ? 'origin' : 'destination') + ' account</p>');\n valueField.val('');\n masterField.val('');\n submitBtn.prop('disabled', true);\n });\n})();\n"
},
{
"alpha_fraction": 0.6444849371910095,
"alphanum_fraction": 0.6581586003303528,
"avg_line_length": 29.47222137451172,
"blob_id": "a88e8c40899872c4a4d6f3f49666be821decec99",
"content_id": "3aee121b1b54570f3bf4bcf43ad7f007a32a69b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1097,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 36,
"path": "/apps/orders/tests/test_models.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom django.core.exceptions import ValidationError\n\nfrom users.models import User\nfrom orders.models import Order\n\n\nclass OrdersModelsTest(TestCase):\n \"\"\"\n tests for orders:models\n \"\"\"\n\n def test_order(self):\n user = User.objects.create_user('temporary_user',\n '[email protected]', 'temporary_user', balance=100)\n reciver = User.objects.create_user('temporary_reciver',\n 'temporary_reciver')\n\n order = Order.objects.create(origin_user=user, receiving_user=reciver,\n order_type=2, amount=10)\n\n reciver.refresh_from_db()\n user.refresh_from_db()\n\n self.assertEqual(reciver.balance, 10)\n self.assertEqual(user.balance, 90)\n\n self.assertRaises(ValidationError, Order.objects.create, origin_user=user,\n receiving_user=reciver, order_type=2, amount=0)\n\n self.assertRaises(ValidationError, Order.objects.create, origin_user=user,\n receiving_user=reciver, order_type=2, amount=-10)\n\n user.delete()\n reciver.delete()\n order.delete()\n"
},
{
"alpha_fraction": 0.7364864945411682,
"alphanum_fraction": 0.7533783912658691,
"avg_line_length": 18.733333587646484,
"blob_id": "e1e17d0e20a12dc386cdadcb803ed1fcbc9deb28",
"content_id": "8e4f3fa3a96fced907a6c61c9647c1529c82d9f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 15,
"path": "/Makefile",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "all:\n\texport DJANGO_SETTINGS_MODULE=osem.settings.local && python manage.py runserver_plus\n\ncelery:\n\tcelery -A osem worker -B -l debug\n\ntest:\n\tREUSE_DB=1 python manage.py test\n\nlocust:\n\tlocust -f locustfile.py --host=http://localhost:8001\n\npush:\n\tgit push origin master\n\tgit push upstream master\n"
},
{
"alpha_fraction": 0.5695876479148865,
"alphanum_fraction": 0.6005154848098755,
"avg_line_length": 30.040000915527344,
"blob_id": "91fa9312b3d1124fad968e9cb8a1e375cdf7701b",
"content_id": "a08fd197674fa1215d553b7711299019544065eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 776,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 25,
"path": "/apps/orders/migrations/0002_auto_20160210_2236.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-10 22:36\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='order_type',\n field=models.IntegerField(blank=True, choices=[(1, 'Resource Allocation'), (2, 'Resource Transfer'), (3, 'Dept Account Reconciliation'), (4, 'Resource Spend')], db_index=True, null=True),\n ),\n migrations.AlterField(\n model_name='order',\n name='transactions',\n field=models.ManyToManyField(blank=True, to='orders.Transaction'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.49835604429244995,
"alphanum_fraction": 0.49835604429244995,
"avg_line_length": 38.425926208496094,
"blob_id": "2510902dd41a43ffc9cebfd71121a247351af5f2",
"content_id": "6ee3a99f1b8905f9d8ff136cc3caaa3fcc55e5c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2129,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 54,
"path": "/apps/users/admin.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseAdmin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import User\nfrom .models import Account\nfrom .models import Department\nfrom users.forms import UserAdminForm\n\n\nclass UserAdmin(BaseAdmin):\n fieldsets = (\n (None, {'fields': ('username', 'email', 'password')}),\n (_('Personal info'), {'fields': ('uid',\n 'first_name',\n 'last_name',\n 'phone',\n 'photo',\n 'validation_code',\n 'approving_user',\n 'balance',\n 'is_validated',\n 'user_department',\n 'is_department', )}),\n (_('Permissions'), {'fields':\n ('is_approved', 'is_active', 'is_staff',\n 'is_superuser', 'groups', 'user_permissions')}),\n (_('Important dates'), {'fields':\n ('last_login', 'date_joined', 'expired_at')}),\n (_('Notification settings'),\n {'fields':\n ('email_on_transfer_requested', 'email_on_transfer_completed',\n 'email_on_resource_sent', 'email_on_resource_received')}),\n (_('Account lockout'), {'fields':('login_attempt', 'is_locked', 'locked_until')}),\n )\n readonly_fields = ('uid', )\n form = UserAdminForm\n\n\nclass AccountAdmin(admin.ModelAdmin):\n list_display = [\n 'owner', 'name', 'account_department', 'number', 'registered_at',\n 'expired_at', 'approving_user', 'is_validated', 'is_master'\n ]\n list_filter = ['registered_at', 'expired_at', 'is_validated']\n\n\nclass DepartmentAdmin(admin.ModelAdmin):\n list_display = ['name']\n\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Account, AccountAdmin)\nadmin.site.register(Department, DepartmentAdmin)\n"
},
{
"alpha_fraction": 0.5502392053604126,
"alphanum_fraction": 0.5808612704277039,
"avg_line_length": 28.85714340209961,
"blob_id": "b0fa3a12395330057ab4bfb98e5450f4d0d30491",
"content_id": "c43c2207bddfd9f192678920eed7f425ac41fb53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1045,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 35,
"path": "/apps/users/migrations/0018_auto_20160512_1957.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-05-12 19:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0017_auto_20160505_1237'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='email_on_resource_received',\n field=models.BooleanField(db_index=True, default=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='email_on_resource_sent',\n field=models.BooleanField(db_index=True, default=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='email_on_transfer_completed',\n field=models.BooleanField(db_index=True, default=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='email_on_transfer_requested',\n field=models.BooleanField(db_index=True, default=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6247507333755493,
"alphanum_fraction": 0.6255262494087219,
"avg_line_length": 33.71538543701172,
"blob_id": "817e46e8b9ff556cbe1d3a6e4a568d82745ef02f",
"content_id": "4ecafdec33fbbc4aa3fc8cafd4b877e3b349315b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9026,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 260,
"path": "/apps/users/views.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "import uuid\nimport random\nimport string\nimport pytz\nfrom datetime import datetime\n\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ugettext\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView, View\nfrom django.views.generic.edit import UpdateView\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.contrib import messages\nfrom django.contrib.auth import login as django_login\nfrom django.contrib.auth import authenticate as django_authenticate\nfrom django.contrib.auth import logout as django_logout\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom users.forms import RegisterForm\nfrom users.forms import LoginForm\nfrom users.forms import UserProfileForm\nfrom users.forms import PhotoForm\nfrom users.forms import ResetPasswordForm\nfrom users.forms import ChangePasswordForm\nfrom users.models import User\nfrom users.tasks import send_validation_code\n\n\nclass RegisterView(TemplateView):\n \"\"\"\n View for sign up page\n \"\"\"\n template_name = \"users/register.html\"\n form_class = RegisterForm\n\n def get_form(self):\n return self.form_class(data=self.request.POST or None)\n\n def get_context_data(self, **kwargs):\n kwargs.update({\n 'form': self.get_form()\n })\n return super(RegisterView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n form = RegisterForm(request.POST, request.FILES)\n if form.is_valid():\n user = form.save()\n # send_validation_code.apply_async(kwargs={'pk': user.id })\n messages.info(request, _('You were successfully registered.'\n 'Validation link was sent to you'), extra_tags='register')\n return HttpResponseRedirect(reverse('users:code_sent'))\n return self.render_to_response({'form': form})\n\n\nclass LoginView(TemplateView):\n \"\"\"\n View for sign in page\n \"\"\"\n template_name = \"users/login.html\"\n form_class = LoginForm\n\n def get_form(self):\n return self.form_class(data=self.request.POST or None)\n\n def get_context_data(self, **kwargs):\n kwargs.update({\n 'form': self.get_form()\n })\n return super(LoginView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n username = request.POST['username']\n password = request.POST['password']\n user = django_authenticate(username=username, password=password)\n if user:\n if not user.is_validated:\n msg = _('User is not validated.')\n form._errors['__all__'] = form.error_class([msg])\n elif user.is_locked:\n if datetime.now(pytz.utc) < user.locked_until:\n msg = _('Your account is locked. Please try again in an hour')\n form._errors['__all__'] = form.error_class([msg])\n else:\n user.unlock_user\n return self.login_user(user)\n else:\n return self.login_user(user)\n else:\n try:\n user = User.objects.get(username=username)\n user.add_login_attempt()\n except User.DoesNotExist:\n pass\n\n if user.is_locked:\n msg = _('Your account is locked. Please try again in an hour')\n form._errors['__all__'] = form.error_class([msg])\n else:\n msg = _('Invalid password.')\n form._errors['password'] = form.error_class([msg])\n return self.render_to_response({'form': form})\n\n def login_user(self, user):\n user.reset_login_attempt()\n django_login(self.request, user)\n return HttpResponseRedirect(reverse('orders:send'))\n\n\nclass UserValidationView(View):\n \"\"\"\n Validation user by id and code\n \"\"\"\n def get(self, request, *args, **kwargs):\n pk = kwargs.get('pk')\n code = kwargs.get('code')\n try:\n user = User.objects.get(pk=pk, validation_code=code)\n user.is_validated = True\n user.save()\n messages.info(request, _('Congratulations!'\\\n 'Your account was successfully validated.'), extra_tags='validate_user')\n except User.DoesNotExist:\n messages.warning(request, _('Wrong validation link!'), extra_tags='validate_user')\n return HttpResponseRedirect(reverse('home'))\n\n\nclass UserProfileView(LoginRequiredMixin, UpdateView):\n \"\"\"\n view for user profile page\n \"\"\"\n template_name = \"users/profile.html\"\n form_class = UserProfileForm\n model = User\n success_url = reverse_lazy('users:profile')\n login_url = reverse_lazy('users:login')\n\n def get_object(self, queryset=None):\n return self.request.user\n\n def form_valid(self, form):\n instance = form.instance\n instance.save()\n messages.info(self.request, _('Profile updated!'), extra_tags='update_profile')\n return super(UserProfileView, self).form_valid(form)\n\n\nclass ProfilePhotoAPI(LoginRequiredMixin, View):\n \"\"\"\n Endpoint to handle photo upload\n \"\"\"\n\n def get_form(self):\n return PhotoForm(data=self.request.POST or None,\n files=self.request.FILES or None,\n instance=self.request.user)\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n form.save()\n # That is 200 OK success\n messages.info(self.request, _('Photo successfully changed'), extra_tags='update_profile')\n return JsonResponse({})\n error = {'errors': form.errors}\n return JsonResponse(error, status=400)\n\n\nclass LogoutView(View):\n \"\"\"\n Logout action\n \"\"\"\n def get(self, request, *args, **kwargs):\n django_logout(request)\n return HttpResponseRedirect(reverse('home'))\n\n\nclass ResetPasswordView(TemplateView):\n \"\"\"\n view for reset password\n \"\"\"\n template_name = \"users/reset_password.html\"\n form_class = ResetPasswordForm\n\n def get_form(self):\n return self.form_class(data=self.request.POST or None)\n\n def get_context_data(self, **kwargs):\n kwargs.update({\n 'form': self.get_form()\n })\n return super(ResetPasswordView, self).get_context_data(**kwargs)\n\n def generate_password(self):\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))\n\n def send_email(self, user, new_password):\n context = {'user': user, 'new_password': new_password}\n\n text_content = render_to_string('users/emails/new_password.html', context)\n html_content = render_to_string('users/emails/new_password.html', context)\n\n msg = EmailMultiAlternatives(\n ugettext('OSEM Reset Password'), # subject\n text_content, # message\n settings.DEFAULT_FROM_EMAIL, # from\n [user.email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send(fail_silently=False)\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n user = User.objects.get(email=request.POST['email'])\n new_password = self.generate_password()\n user.set_password(new_password)\n user.save()\n self.send_email(user, new_password)\n return HttpResponseRedirect(reverse('users:reset_password_success'))\n return self.render_to_response({'form': form})\n\n\nclass ResetPasswordSuccessView(TemplateView):\n \"\"\"\n view for reset password success\n \"\"\"\n template_name = \"users/reset_password_success.html\"\n\n\nclass ChangePasswordView(LoginRequiredMixin, UpdateView):\n \"\"\"\n view for change password\n \"\"\"\n template_name = \"users/change_password.html\"\n form_class = ChangePasswordForm\n model = User\n success_url = reverse_lazy('users:change_password')\n login_url = reverse_lazy('users:login')\n\n def get_object(self, queryset=None):\n return self.request.user\n\n def form_valid(self, form):\n instance = form.instance\n instance.save()\n # update_session_auth_hash(self.request, instance)\n messages.info(self.request,\n _('Password successfully changed. Please log in with your new password.'),\n extra_tags='change_password')\n return super(ChangePasswordView, self).form_valid(form)\n"
},
{
"alpha_fraction": 0.6647350788116455,
"alphanum_fraction": 0.6647350788116455,
"avg_line_length": 32.55555725097656,
"blob_id": "fcda8d7a3b66d3087de5125354cdf919868e9c5a",
"content_id": "6480305b757f8591ab26337384f7b1b5280d2f8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1208,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 36,
"path": "/osem/urls.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include, url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import patterns\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom django.views.decorators.cache import cache_page\n\nfrom orders.views import Home\n\nurlpatterns = [\n url(r'^$', Home.as_view(),\n name='home'),\n url(r'^access-denied/$', TemplateView.as_view(\n template_name='orders/transfer/not_approved.html'),\n name='not_approved'),\n url(r'^does-not-exist/$', TemplateView.as_view(\n template_name='orders/transfer/does_not_exist.html'),\n name='does_not_exist'),\n\n url(r'^user/', include('users.urls')),\n url(r'^order/', include('orders.urls')),\n url(r'^oauth/', include('social.apps.django_app.urls', namespace='social')),\n url(r'^osem_admin/', include('osem_admin.urls')),\n url(r'^admin/', admin.site.urls),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += patterns('',\n url(r'^__debug__/', include(debug_toolbar.urls)),\n )\n"
},
{
"alpha_fraction": 0.5572206974029541,
"alphanum_fraction": 0.5912806391716003,
"avg_line_length": 27.230770111083984,
"blob_id": "e22e888f97f0f3e0ccac5d96a3e402da3bb4626e",
"content_id": "f8351e1c52c7bab4067c3577bab112e2f7dd7408",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 26,
"path": "/apps/orders/migrations/0017_auto_20161230_1134.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-12-30 11:34\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0016_requestorder'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='requestorder',\n name='status',\n field=models.IntegerField(choices=[(1, 'Pending'), (2, 'Approved'), (3, 'Not approved')], db_index=True, default=1, null=True),\n ),\n migrations.AddField(\n model_name='requestorder',\n name='uid',\n field=models.UUIDField(default=uuid.uuid4, editable=False, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5501273274421692,
"alphanum_fraction": 0.5638128519058228,
"avg_line_length": 27.694063186645508,
"blob_id": "1f6bcbdee3be0a9f3824dc8222e6c86d4177d8af",
"content_id": "6f9d35f69dc5d419dc029331ee922d7fcdfbf4ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6284,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 219,
"path": "/apps/orders/tests/test_views.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom users.models import User\nfrom users.models import Account\nfrom users.models import Department\nfrom orders.models import Transaction\n\n\nclass OrdersViewsTest(TestCase):\n \"\"\"\n tests for orders:views\n including forms and models\n \"\"\"\n def setUp(self):\n user = User.objects.create_user(\n 'temporary',\n '[email protected]',\n 'temporary')\n user.is_validated = True\n user.is_approved = True\n user.is_active = True\n user.save()\n user = User.objects.create_user(\n 'temporary_reciver',\n '[email protected]',\n 'temporary_reciver')\n user.is_validated = True\n user.is_approved = True\n user.is_active = True\n user.save()\n\n def test_home(self):\n # login required\n response = self.client.get(reverse('orders:home'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:home'))\n self.assertEqual(response.status_code, 200)\n\n def test_history(self):\n # login required\n response = self.client.get(reverse('orders:history'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:history'))\n self.assertEqual(response.status_code, 200)\n\n def test_send(self):\n # login required\n response = self.client.get(reverse('orders:send'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:send'))\n self.assertEqual(response.status_code, 200)\n\n user = User.objects.get(username='temporary')\n reciver = User.objects.get(username='temporary_reciver')\n\n user.balance = 100\n user.save()\n\n response = self.client.post(reverse('orders:send'),{\n 'username': '[email protected]',\n 'amount': '10'\n }\n )\n\n self.assertRedirects(\n response = response,\n expected_url = reverse('orders:send_success')\n )\n\n # check no balance fail page\n\n response = self.client.post(reverse('orders:send'),{\n 'username': '[email protected]',\n 'amount': '9000'\n }\n )\n\n self.assertRedirects(\n response = response,\n expected_url = reverse('orders:send_fail')\n )\n\n # check negative amount value\n\n response = self.client.post(reverse('orders:send'),{\n 'username': '[email protected]',\n 'amount': '-20'\n }\n )\n\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'amount',\n errors = [\"You can't send order with negative amount\"]\n )\n\n # check zero amount value\n\n response = self.client.post(reverse('orders:send'),{\n 'username': '[email protected]',\n 'amount': '0'\n }\n )\n\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'amount',\n errors = [\"You can't send order \" \\\n 'with zero amount']\n )\n\n # check sending resources to yourself\n\n response = self.client.post(reverse('orders:send'),{\n 'username': 'temporary',\n 'amount': '10'\n }\n )\n\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'username',\n errors = [\"You can't send order \" \\\n 'to yourself']\n )\n\n # check required fields\n\n response = self.client.post(reverse('orders:send'), {\n 'username': '',\n 'amount': ''\n }\n )\n\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'username',\n errors = [\"This field is required.\"]\n )\n\n self.assertFormError(\n response = response,\n form = 'form',\n field = 'amount',\n errors = [\"This field is required.\"]\n )\n\n\n def test_transfer(self):\n # login required\n response = self.client.get(reverse('orders:transfer'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:transfer'))\n self.assertEqual(response.status_code, 200)\n\n def test_new_account(self):\n # login required\n response = self.client.get(reverse('orders:new_account'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:new_account'))\n self.assertEqual(response.status_code, 200)\n\n def test_view_accounts(self):\n # login required\n response = self.client.get(reverse('orders:view_accounts'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:view_accounts'))\n self.assertEqual(response.status_code, 200)\n\n def test_users_search(self):\n # login required\n response = self.client.get(reverse('orders:users'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:users'))\n self.assertEqual(response.status_code, 200)\n\n def test_settings(self):\n # login required\n response = self.client.get(reverse('orders:settings'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='temporary', password='temporary')\n\n # 200\n response = self.client.get(reverse('orders:settings'))\n self.assertEqual(response.status_code, 200)\n"
},
{
"alpha_fraction": 0.529748260974884,
"alphanum_fraction": 0.5675057172775269,
"avg_line_length": 26.3125,
"blob_id": "7e7b908fa9a87bdcb3e8ab3122170650fb584d25",
"content_id": "1aa7c3affd9c797b0dfe14d76df9ea2e0c304a8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 874,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 32,
"path": "/apps/users/migrations/0022_auto_20160924_0803.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-09-24 08:03\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport users.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0021_auto_20160712_1252'),\n ]\n\n operations = [\n migrations.AlterModelManagers(\n name='user',\n managers=[\n ('objects', users.models.CachedUserManager()),\n ],\n ),\n migrations.AddField(\n model_name='user',\n name='login_attempt',\n field=models.IntegerField(default=0),\n ),\n migrations.AlterField(\n model_name='user',\n name='photo',\n field=models.ImageField(blank=True, null=True, upload_to=users.models.PathAndRename('photo'), verbose_name='Photo'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5899903774261475,
"alphanum_fraction": 0.6207892298698425,
"avg_line_length": 38.96154022216797,
"blob_id": "5ae6e307d34be9ebbb2df094b0f92771bf608115",
"content_id": "57b1673b22f72694704caa90b3646334183e671c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1039,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 26,
"path": "/apps/orders/migrations/0004_ordersorders.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-11 12:15\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0003_auto_20160211_1207'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='OrdersOrders',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('remarks', models.TextField(blank=True, null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Created at')),\n ('order_from', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_from', to='orders.Order')),\n ('order_to', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_to', to='orders.Order')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.5929622054100037,
"alphanum_fraction": 0.6150209903717041,
"avg_line_length": 39.51063919067383,
"blob_id": "8bd2d436df8964b355164f8b178fe7e40dae568b",
"content_id": "606dd38c09baef551be5ac982d9e3875a6714527",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1904,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 47,
"path": "/apps/orders/migrations/0003_auto_20160211_1207.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-11 12:07\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0002_auto_20160210_2236'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='order_type',\n field=models.IntegerField(choices=[(1, 'Resource Allocation'), (2, 'Resource Transfer'), (3, 'Dept Account Reconciliation'), (4, 'Resource Spend')], db_index=True, null=True),\n ),\n migrations.AlterField(\n model_name='order',\n name='origin_user',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='origin_user', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='order',\n name='receiving_user',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='receiving_user', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='origin_account',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='origin_account', to='users.Account'),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='receiving_account',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='receiving_account', to='users.Account'),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='transaction_type',\n field=models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3')], null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.716312050819397,
"alphanum_fraction": 0.716312050819397,
"avg_line_length": 24.636363983154297,
"blob_id": "84254962f3fc0da383d00b8c48171cb68243d28d",
"content_id": "d0ed9e1c162d3047563b8f083353f374d8306105",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 11,
"path": "/apps/users/templatetags/users_filters.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django import template\nregister = template.Library()\n\n\[email protected](name='add_class')\ndef add_class(field, css):\n return field.as_widget(attrs={\"class\":css})\n\[email protected](name='hidden')\ndef hidden(field):\n return field.as_widget(attrs={\"style\":\"display: none;\"})\n"
},
{
"alpha_fraction": 0.6979807019233704,
"alphanum_fraction": 0.6979807019233704,
"avg_line_length": 53.238094329833984,
"blob_id": "2d58c33146ff545fb63658c460fb3f9d617991e5",
"content_id": "64b4dc2bd78cf91050f171525ce8fac3feef3a2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1139,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 21,
"path": "/apps/users/urls.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.views.generic import TemplateView\nfrom .views import RegisterView, LoginView, LogoutView, UserValidationView\nfrom .views import UserProfileView, ProfilePhotoAPI\nfrom .views import ResetPasswordView, ResetPasswordSuccessView, ChangePasswordView\n\napp_name = 'users'\nurlpatterns = [\n url(r'^register/$', RegisterView.as_view(), name='register'),\n url(r'^login/$', LoginView.as_view(), name='login'),\n url(r'^logout/$', LogoutView.as_view(), name='logout'),\n url(r'^profile/$', UserProfileView.as_view(), name='profile'),\n url(r'^profile/photo/$', ProfilePhotoAPI.as_view(), name='profile_photo'),\n url(r'^reset_password/$', ResetPasswordView.as_view(), name='reset_password'),\n url(r'^reset_password_success/$', ResetPasswordSuccessView.as_view(), name='reset_password_success'),\n url(r'^change_password/$', ChangePasswordView.as_view(), name='change_password'),\n url(r'^verify/$', TemplateView.as_view(template_name='users/verify.html'),\n name='code_sent'),\n url(r'^verify/(?P<pk>\\d+)/(?P<code>.+)/$', UserValidationView.as_view(),\n name='verify'),\n]\n"
},
{
"alpha_fraction": 0.5243697762489319,
"alphanum_fraction": 0.5815126299858093,
"avg_line_length": 24.869565963745117,
"blob_id": "29b865e226861b11ef4986303f5913e398927098",
"content_id": "a16011781429350c8bfaab44527dd1e1c8453c8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 595,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 23,
"path": "/apps/users/migrations/0004_auto_20160211_1150.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-11 11:50\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0003_auto_20160210_2138'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='department',\n field=models.IntegerField(blank=True, choices=[(1, 'A'), (2, 'B')], null=True, verbose_name='Department'),\n ),\n migrations.DeleteModel(\n name='Department',\n ),\n ]\n"
},
{
"alpha_fraction": 0.7653429508209229,
"alphanum_fraction": 0.7653429508209229,
"avg_line_length": 26.649999618530273,
"blob_id": "4ea4de124bd4a918e4983f3dc156490b4996b4d7",
"content_id": "aa3016b16a251eaf08c87a916fe1ce2de3713638",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 20,
"path": "/osem/celery.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nimport os\nfrom celery import Celery\nfrom django.conf import settings\nfrom raven import Client\nfrom raven.contrib.celery import register_signal\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'osem.settings.local')\n\n\nif hasattr(settings, 'RAVEN_CONFIG'):\n client = Client(dsn=settings.RAVEN_CONFIG['dsn'])\n register_signal(client)\n\n\napp = Celery('osem')\napp.config_from_object('django.conf:settings')\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\nTASK_SERIALIZER = 'json'\nACCEPT_CONTENT = ['json'] \n"
},
{
"alpha_fraction": 0.6342412233352661,
"alphanum_fraction": 0.6420233249664307,
"avg_line_length": 22.363636016845703,
"blob_id": "bde3947196858429d84f6aa89116910651df7483",
"content_id": "bf4a342ec8060cd3eebc7a48b182d6da1805ad1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 11,
"path": "/apps/orders/context_processors.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from .models import RequestOrder\n\ndef requests_to_me(request):\n count = 0\n user = request.user\n if user.id:\n count = RequestOrder.objects.filter(origin_user=request.user, status=1).count()\n\n return {\n 'requests_to_me': count\n }\n"
},
{
"alpha_fraction": 0.5509259104728699,
"alphanum_fraction": 0.5617284178733826,
"avg_line_length": 22.14285659790039,
"blob_id": "84a6715318cd53fb76b2d7757c9f16fb70ece91d",
"content_id": "e9dc0af1009e2445d7437a87414b7a14f52ecbc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 648,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 28,
"path": "/static/js/common.js",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "(function () {\n if (fullPath !== '/' && $(window).width() < 768 ) {\n setTimeout(function () {\n $('.sidebar-toggle').trigger('click');\n }, 100);\n }\n\n $('input.amount').numeric();\n $('input.datepicker').datepicker({\n todayHighlight: true,\n endDate: '0d'\n });\n\n $('.sidebar-toggle').click(function (e) {\n e.preventDefault();\n var body = $('body');\n body.removeClass('initial userbar-in');\n body.toggleClass('sidebar-in');\n });\n\n $('.userbar-toggle').click(function (e) {\n e.preventDefault();\n var body = $('body');\n body.removeClass('initial sidebar-in');\n body.toggleClass('userbar-in');\n });\n\n})();\n"
},
{
"alpha_fraction": 0.7565789222717285,
"alphanum_fraction": 0.7565789222717285,
"avg_line_length": 20.714284896850586,
"blob_id": "d0f1b3ad0393fe7cd736d3c19fc4991bb2b29c2a",
"content_id": "db2454c32bfc13d7322f87f44d5bca4e95f03774",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 7,
"path": "/apps/users/tasks.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from osem.celery import app\nfrom users.models import User\n\n\[email protected]\ndef send_validation_code(pk):\n User.objects.get(pk=pk).email_validation_code()\n"
},
{
"alpha_fraction": 0.5782997608184814,
"alphanum_fraction": 0.5805369019508362,
"avg_line_length": 32.525001525878906,
"blob_id": "1717fed334aeb9bddc5de343af953338a716b502",
"content_id": "bda822183945935bdf1e0e9cb97b2b9210a96239",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2682,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 80,
"path": "/apps/osem_admin/tables.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.utils.html import escape\nfrom django.utils.safestring import mark_safe\n\nimport django_tables2 as tables\nfrom django_tables2.utils import A # alias for Accessor\n\nfrom orders.models import Order\nfrom orders.models import Transaction\nfrom users.models import User\nfrom users.models import Account\n\n\nclass ImageColumn(tables.Column):\n def render(self, value):\n return mark_safe('<a href=\"/media/%s\"><img src=\"/media/%s\" height=\"50\" width=\"50\" /></a>'\n % (escape(value), escape(value)))\n\n\nclass UsersTable(tables.Table):\n selection = tables.CheckBoxColumn(\n accessor='id',\n attrs={\"th__input\": {\"onclick\": \"toggle(this)\"}},\n orderable=False)\n photo = ImageColumn('photo')\n\n class Meta:\n model = User\n fields = ['selection', 'uid', 'username', 'first_name', 'last_name',\n 'phone', 'photo', 'date_joined', 'expired_at',\n 'approving_user', 'is_validated', 'user_department',\n 'is_department']\n attrs = {\"class\": \"paleblue\"}\n\n\nclass AccountsTable(tables.Table):\n selection = tables.CheckBoxColumn(\n accessor='id',\n attrs={\"th__input\": {\"onclick\": \"toggle(this)\"}},\n orderable=False)\n\n class Meta:\n model = Account\n fields = ['selection', 'uid', 'name', 'owner', 'account_department',\n 'number', 'registered_at', 'expired_at', 'approving_user',\n 'is_validated']\n attrs = {\"class\": \"paleblue\"}\n\n\nclass OrdersTable(tables.Table):\n transaction = tables.LinkColumn('osem_admin:transaction', args=[A('transaction.pk')])\n\n selection = tables.CheckBoxColumn(\n accessor='id',\n attrs={\"th__input\": {\"onclick\": \"toggle(this)\"}},\n orderable=False)\n\n class Meta:\n model = Order\n fields = ['selection', 'uid', 'order_type', 'origin_user',\n 'receiving_user', 'amount', 'remarks', 'created_at',\n 'completed_at', 'transaction']\n attrs = {\"class\": \"paleblue\"}\n\n\nclass TransactionsTable(tables.Table):\n order = tables.LinkColumn('osem_admin:order',\n verbose_name=\"order\",\n args=[A('order.pk')])\n\n selection = tables.CheckBoxColumn(\n accessor='id',\n attrs={\"th__input\": {\"onclick\": \"toggle(this)\"}},\n orderable=False)\n\n class Meta:\n model = Transaction\n fields = ['selection', 'uid', 'transaction_type', 'transaction_status',\n 'amount', 'created_at', 'completed_at', 'origin_account',\n 'receiving_account', 'approving_user', 'order']\n attrs = {\"class\": \"paleblue\"}\n"
},
{
"alpha_fraction": 0.556851327419281,
"alphanum_fraction": 0.5762876868247986,
"avg_line_length": 28.399999618530273,
"blob_id": "695d587bb87b616d9d51d435cbe94c0b119f23fd",
"content_id": "1553b258bc7a5f20e02df47ed7c7d1ca72d9562d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1029,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 35,
"path": "/apps/users/migrations/0017_auto_20160505_1237.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-05-05 12:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0016_user_is_approved'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='user',\n old_name='email_notifications_enabled',\n new_name='email_on_resource_received',\n ),\n migrations.AddField(\n model_name='user',\n name='email_on_resource_sent',\n field=models.BooleanField(db_index=True, default=False),\n ),\n migrations.AddField(\n model_name='user',\n name='email_on_transfer_completed',\n field=models.BooleanField(db_index=True, default=False),\n ),\n migrations.AddField(\n model_name='user',\n name='email_on_transfer_requested',\n field=models.BooleanField(db_index=True, default=False),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5986769795417786,
"alphanum_fraction": 0.5986769795417786,
"avg_line_length": 34.568626403808594,
"blob_id": "a5646bd5471b40259ebe2c3cc2b20868d10e3f25",
"content_id": "ca6dc932b6f9c1b2135d52b84102815909f5db2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1814,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 51,
"path": "/templates/users/includes/_change-password-form.html",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "<form action=\"{% url 'users:change_password' %}\" method=\"post\">\n\n {% csrf_token %}\n <input type=\"hidden\" name=\"username\" value=\"{{ user.username }}\">\n\n {% if form.errors %}\n <div class=\"alert alert-danger\">\n {% for message in form.non_field_errors %}\n {{ message }}\n {% endfor %}\n </div>\n {% endif %}\n\n <div class=\"form-group {% if form.old_password.errors %}has-error{% endif %}\">\n <label for=\"old_password\" class=\"control-label\">Old Password</label>\n <input type=\"password\" name=\"old_password\" class=\"form-control\" placeholder=\"Enter old password\">\n {% if form.old_password.errors %}\n <p class=\"help-block\">\n {% for message in form.old_password.errors %}\n {{ message }}\n {% endfor %}\n </p>\n {% endif %}\n </div>\n\n <div class=\"form-group {% if form.new_password.errors %}has-error{% endif %}\">\n <label for=\"new_password\" class=\"control-label\">New Password</label>\n <input type=\"password\" name=\"new_password\" class=\"form-control\" placeholder=\"Enter new password\">\n {% if form.new_password.errors %}\n <p class=\"help-block\">\n {% for message in form.new_password.errors %}\n {{ message }}\n {% endfor %}\n </p>\n {% endif %}\n </div>\n\n <div class=\"form-group {% if form.new_password_again.errors %}has-error{% endif %}\">\n <label for=\"new_password_again\" class=\"control-label\">Confirm New Password</label>\n <input type=\"password\" name=\"new_password_again\" class=\"form-control\" placeholder=\"Enter new password again\">\n {% if form.new_password_again.errors %}\n <p class=\"help-block\">\n {% for message in form.new_password_again.errors %}\n {{ message }}\n {% endfor %}\n </p>\n {% endif %}\n </div>\n\n <button class=\"btn btn-green\" type=\"submit\">Submit</button>\n</form>\n"
},
{
"alpha_fraction": 0.6675062775611877,
"alphanum_fraction": 0.732997477054596,
"avg_line_length": 19.894737243652344,
"blob_id": "fbf18567937ce6bf5770977dd94f5781a9de7686",
"content_id": "77593c0873be876356ea3c946bba08725da1749b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 19,
"path": "/README.md",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# osem\n[](https://circleci.com/gh/shalakhin/osem)\nOSEM for Milonil\n\n## Requirements\n\n- Python 3.4+\n\n## How to run tests\n\n- `pip install -r requirements/dev.txt`\n- `ALTER USER osem CREATEDB`\n- `make test`\n\n## Installation\n\n```bash\nexport SITE_DOMAIN=https://osem.domain.com\n```\n"
},
{
"alpha_fraction": 0.6719104647636414,
"alphanum_fraction": 0.6758432984352112,
"avg_line_length": 47.610294342041016,
"blob_id": "b426003f18cbe058552804eb099ddfbde854587e",
"content_id": "d4c5cdace1c457ee72c982ccf8ee341948961c26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6611,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 136,
"path": "/locustfile.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from locust.core import HttpLocust, TaskSet, task\n\ndef index(l):\n l.client.get(\"http://osem.shalakh.in/\")\n\ndef profile(l):\n l.client.get(\"http://osem.shalakh.in/user/profile/\")\n\ndef home(l):\n l.client.get(\"http://osem.shalakh.in/order/home/\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=uid\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=order_type\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=origin_user\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=receiving_user\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=amount\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=remarks\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=created_at\")\n l.client.get(\"http://osem.shalakh.in/order/home/?sort=completed_at\")\n\ndef history(l):\n l.client.get(\"http://osem.shalakh.in/order/history/\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=uid\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=order_type\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=origin_user\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=receiving_user\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=amount\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=remarks\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=created_at\")\n l.client.get(\"http://osem.shalakh.in/order/history/?sort=completed_at\")\n\ndef send(l):\n response = l.client.get(\"http://osem.shalakh.in/order/send/\")\n csrftoken = response.cookies['csrftoken']\n\n l.client.post(\"http://osem.shalakh.in/order/send/\", {\n \"csrfmiddlewaretoken\": csrftoken,\n \"username\": \"Nobody\",\n \"amount\":\"1\",\n \"remarks\":\"locust\"})\n\ndef transfer_out(l):\n response = l.client.get(\"http://osem.shalakh.in/order/transfer/\")\n csrftoken = response.cookies['csrftoken']\n\n l.client.post(\"http://osem.shalakh.in/order/transfer/\", {\n \"csrfmiddlewaretoken\": csrftoken,\n \"order_type\": 4,\n \"amount\": 1,\n \"receiving_account\": 9,\n \"remarks\":\"locust\"})\n\ndef transfer_in(l):\n response = l.client.get(\"http://osem.shalakh.in/order/transfer/\")\n csrftoken = response.cookies['csrftoken']\n\n l.client.post(\"http://osem.shalakh.in/order/transfer/\", {\n \"csrfmiddlewaretoken\": csrftoken,\n \"order_type\": 1,\n \"amount\": 1,\n \"origin_account\": 9,\n \"remarks\":\"locust\"})\n\ndef transactions(l):\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=uid\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=transaction_type\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=transaction_status\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=amount\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=created_at\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=completed_at\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=origin_account\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=receiving_account\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=approving_user\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/transactions/?sort=order\")\n\ndef orders(l):\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=uid\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=order_type\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=origin_user\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=receiving_user\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=amount\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=remarks\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=created_at\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=completed_at\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/orders/?sort=transaction\")\n\ndef accounts(l):\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=uid\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=name\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=owner\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=account_department\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=number\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=registered_at\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=expired_at\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=approving_user\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/accounts/?sort=is_validated\")\n\ndef users(l):\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=uid\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=username\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=first_name\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=last_name\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=phone\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=photo\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=date_joined\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=expired_at\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=is_validated\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=user_department\")\n l.client.get(\"http://osem.shalakh.in/osem_admin/users/?sort=is_department\")\n\n\nclass UserBehavior(TaskSet):\n tasks = {index:1, profile:1, home:1, history:1, send:5, transfer_out:5,\n transfer_in:5, transactions:1, orders:1, accounts:1, users:1}\n\n @task\n def login(self):\n response = self.client.get(\"http://osem.shalakh.in/user/login/\")\n csrftoken = response.cookies['csrftoken']\n\n self.client.post(\"http://osem.shalakh.in/user/login/\", {\n \"username\":\"admin\",\n \"password\":\"1234qwer\"},\n headers={\"X-CSRFToken\": csrftoken})\n\n def on_start(self):\n self.login()\n\n\nclass WebsiteUser(HttpLocust):\n task_set = UserBehavior\n min_wait=5000\n max_wait=9000\n"
},
{
"alpha_fraction": 0.5397447347640991,
"alphanum_fraction": 0.5427182912826538,
"avg_line_length": 32.87715148925781,
"blob_id": "e59127a5ee28e841457afce1c4964fd363a774d1",
"content_id": "25d3778699064302e8f62535b5632e6dcde9e55a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13788,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 407,
"path": "/apps/users/models.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "import uuid\nimport os\nimport io\nfrom time import time\nfrom datetime import datetime, timedelta\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.base_user import AbstractBaseUser\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.contrib.auth.models import UserManager\nfrom django.core import validators\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ugettext\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils import timezone\nfrom django.utils.deconstruct import deconstructible\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom PIL import Image\nfrom caching.base import CachingManager, CachingMixin\n\n@deconstructible\nclass PathAndRename(object):\n\n def __init__(self, sub_path):\n self.path = sub_path\n\n def __call__(self, instance, filename):\n filename = '{}-{}.png'.format(instance.username, str(time()).replace('.', '_'))\n return os.path.join(self.path, filename)\n\npath_and_rename = PathAndRename('photo')\n\n\nclass CachedUserManager(CachingManager, UserManager):\n pass\n\n\n@python_2_unicode_compatible\nclass ValidationCodeMixin(CachingMixin, models.Model):\n \"\"\"\n Validation code field and generation\n \"\"\"\n validation_code = models.CharField(\n _('Validation code'),\n max_length=32,\n null=True)\n\n objects = CachingManager()\n\n def save(self, *args, **kwargs):\n if not self.validation_code:\n self.validation_code = uuid.uuid4().hex\n super(ValidationCodeMixin, self).save(*args, **kwargs)\n\n class Meta:\n abstract = True\n\n\nclass Department(CachingMixin, models.Model):\n \"\"\"\n Data about department is stored using that model\n \"\"\"\n name = models.CharField(_('Name'), max_length=255, blank=False, null=False)\n is_internal = models.BooleanField(_('internal'), default=False)\n created_at = models.DateTimeField(\n auto_now_add=True,\n blank=True,\n null=True)\n expired_at = models.DateTimeField(blank=True, null=True)\n\n objects = CachingManager()\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'departments'\n\n\nclass User(ValidationCodeMixin, AbstractBaseUser, CachingMixin, PermissionsMixin):\n \"\"\"\n Data about user is stored using that model\n \"\"\"\n username = models.CharField(\n _('username'),\n max_length=30,\n unique=True,\n db_index=True,\n help_text=_('Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.'),\n validators=[\n validators.RegexValidator(\n r'^[\\w.@+-]+$',\n _('Enter a valid username. This value may contain only '\n 'letters, numbers ' 'and @/./+/-/_ characters.')\n ),\n ],\n error_messages={\n 'unique': _(\"A user with that username already exists.\"),\n },\n )\n first_name = models.CharField(_('first name'),\n db_index=True,\n max_length=30,\n blank=True)\n last_name = models.CharField(_('last name'),\n db_index=True,\n max_length=30,\n blank=True)\n email = models.EmailField(_('email address'),\n db_index=True,\n blank=True)\n is_staff = models.BooleanField(\n _('staff status'),\n default=False,\n help_text=_('Designates whether the user can log into this admin site.'),\n )\n is_active = models.BooleanField(\n _('active'),\n default=True,\n help_text=_(\n 'Designates whether this user should be treated as active. '\n 'Unselect this instead of deleting accounts.'\n ),\n )\n date_joined = models.DateTimeField(_('date joined'), default=timezone.now)\n\n objects = CachedUserManager()\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['email']\n\n uid = models.UUIDField(default=uuid.uuid4, editable=False, null=True)\n phone = models.CharField(\n _('Phone'), db_index=True, max_length=255, blank=True, null=True)\n photo = models.ImageField(\n _('Photo'),\n upload_to=path_and_rename,\n blank=True,\n null=True)\n thumbnail = models.ImageField(\n _('thumbnail'),\n upload_to='photo',\n blank=True,\n null=True)\n expired_at = models.DateTimeField(blank=True, null=True)\n is_approved = models.BooleanField(\n _('approved'),\n default=False,\n db_index=True,\n help_text=_('Designates whether this approved by administrator.'), )\n\n approving_user = models.ForeignKey('User',\n blank=True,\n null=True,\n db_index=True,\n related_name='user_approved')\n is_validated = models.BooleanField(blank=True,\n default=False,\n db_index=True)\n balance = models.DecimalField(max_digits=12, decimal_places=1, default=0)\n user_department = models.ForeignKey(Department,\n verbose_name=_('Department'),\n blank=True,\n db_index=True,\n null=True)\n is_department = models.BooleanField(blank=True,\n default=False,\n db_index=True)\n email_on_transfer_requested = models.BooleanField(blank=True,\n default=True,\n db_index=True)\n email_on_transfer_completed = models.BooleanField(blank=True,\n default=True,\n db_index=True)\n email_on_resource_sent = models.BooleanField(blank=True,\n default=True,\n db_index=True)\n email_on_resource_received = models.BooleanField(blank=True,\n default=True,\n db_index=True)\n login_attempt = models.IntegerField(default=0)\n is_locked = models.BooleanField(blank=True,\n default=False)\n locked_until = models.DateTimeField(blank=True, null=True)\n\n @property\n def master_account(self):\n try:\n return Account.objects.get(is_master=True,\n account_department__id=self.user_department.id)\n except Account.DoesNotExist:\n return Account.objects.none()\n\n @property\n def email_notifications_enabled(self):\n return (self.email_on_transfer_requested and\n self.email_on_transfer_completed and\n self.email_on_resource_sent and\n self.email_on_resource_received)\n\n def __str__(self):\n if self.first_name or self.last_name:\n return '%s %s' % (self.first_name, self.last_name)\n else:\n return self.username\n\n def get_validation_url(self):\n return settings.SITE_DOMAIN + reverse('users:verify',\n args=(self.id,\n self.validation_code, ))\n\n def email_validation_code(self):\n \"\"\"\n Send user email with validation code\n \"\"\"\n text_content = render_to_string('users/emails/verification_code.html',\n {'user': self})\n html_content = render_to_string('users/emails/verification_code.html',\n {'user': self})\n msg = EmailMultiAlternatives(\n ugettext('OSEM Validation Code'), # subject\n text_content, # message\n settings.DEFAULT_FROM_EMAIL, # from\n [self.email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send(fail_silently=False)\n\n def update_balance(self, new_balance):\n self.balance = new_balance\n self.save()\n\n def approve(self, user):\n self.approving_user = user\n self.is_validated = True\n self.is_active = True\n self.is_approved = True\n self.save()\n\n def expire(self, user):\n self.approving_user = user\n self.is_validated = False\n self.is_active = False\n self.is_approved = False\n self.expired_at = timezone.now()\n self.save()\n\n def resize_photo(self):\n if not self.photo:\n return\n\n image = Image.open(io.BytesIO(self.photo.read()))\n image.thumbnail(settings.PHOTO_SIZE, Image.ANTIALIAS)\n\n temp_handle = io.BytesIO()\n image.save(temp_handle, 'png')\n temp_handle.seek(0)\n\n suf = SimpleUploadedFile(\n os.path.split(self.photo.name)[-1],\n temp_handle.read(),\n content_type='image/png')\n\n self.photo.delete(save=False)\n self.photo.save(suf.name,\n suf,\n save=True)\n\n\n def create_thumbnail(self):\n if not self.photo:\n return\n\n # DJANGO_TYPE = self.photo.file.content_type\n #\n # if DJANGO_TYPE == 'image/jpeg':\n # PIL_TYPE = 'jpeg'\n # FILE_EXTENSION = 'jpg'\n # elif DJANGO_TYPE == 'image/png':\n # PIL_TYPE = 'png'\n # FILE_EXTENSION = 'png'\n\n image = Image.open(io.BytesIO(self.photo.read()))\n image.thumbnail(settings.THUMBNAIL_SIZE, Image.ANTIALIAS)\n\n temp_handle = io.BytesIO()\n image.save(temp_handle, 'png')\n temp_handle.seek(0)\n\n suf = SimpleUploadedFile(\n os.path.split(self.photo.name)[-1],\n temp_handle.read(),\n content_type='image/png')\n self.thumbnail.save('%s_thumbnail.%s' %\n (os.path.splitext(suf.name)[0], 'png'),\n suf,\n save=True)\n\n def delete(self, *args, **kwargs):\n if self.photo:\n photo_storage = self.photo.storage\n photo_path = self.photo.path\n thumbnail_storage = self.thumbnail.storage\n thumbnail_path = self.thumbnail.path\n photo_storage.delete(photo_path)\n thumbnail_storage.delete(thumbnail_path)\n super(User, self).delete(*args, **kwargs)\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_short_name(self):\n \"Returns the short name for the user.\"\n return self.first_name\n\n def lock_user(self):\n self.is_locked = True\n self.locked_until = datetime.now() + timedelta(hours=1)\n self.save()\n\n def unlock_user(self):\n self.is_locked = False\n self.save()\n\n def add_login_attempt(self):\n if self.is_locked:\n return\n\n self.login_attempt = self.login_attempt + 1\n if self.login_attempt == 3:\n self.lock_user()\n self.save()\n\n def reset_login_attempt(self):\n self.login_attempt = 0\n self.save()\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n\nclass Account(ValidationCodeMixin, CachingMixin, models.Model):\n \"\"\"\n Data about accounts is stored using that model\n \"\"\"\n uid = models.UUIDField(default=uuid.uuid4, editable=False, null=True)\n name = models.CharField(_('Name'), max_length=255, blank=True, null=True)\n owner = models.ForeignKey(User, verbose_name=_('Owner'), null=True)\n account_department = models.ForeignKey(Department,\n verbose_name=_('Department'),\n null=True)\n number = models.CharField(\n _('Account number'),\n max_length=255,\n unique=True,\n blank=True,\n null=True)\n registered_at = models.DateTimeField(\n _('Registration date'),\n auto_now_add=True,\n blank=True,\n null=True)\n expired_at = models.DateTimeField(\n _('Expiration date'),\n blank=True,\n null=True)\n approving_user = models.ForeignKey(User,\n blank=True,\n null=True,\n db_index=True,\n related_name='account_approved')\n is_validated = models.BooleanField(blank=True,\n default=False,\n db_index=True)\n\n is_master = models.BooleanField(\n help_text=_(\"Designates if it is master account\"),\n blank=True,\n default=False,\n db_index=True)\n\n objects = CachingManager()\n\n def __str__(self):\n return self.name\n\n def approve(self, user):\n self.approving_user = user\n self.is_validated = True\n self.save()\n\n def expire(self, user):\n self.approving_user = user\n self.is_validated = False\n self.expired_at = timezone.now()\n self.save()\n\n class Meta:\n db_table = 'accounts'\n"
},
{
"alpha_fraction": 0.6780354976654053,
"alphanum_fraction": 0.6780354976654053,
"avg_line_length": 34.75609588623047,
"blob_id": "083a42f96a707155fd2669ae0f076e42e567997c",
"content_id": "cefb1c1bd61cba04a85c815761c01c09900789b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2932,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 82,
"path": "/apps/orders/tasks.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ugettext\n\nfrom osem.celery import app\n\nfrom users.models import User\n\n\ndef send(email, template, kwargs):\n html_content = render_to_string(template, kwargs)\n msg = EmailMultiAlternatives(\n ugettext('OSEM notification'), # subject\n \"\", # message\n settings.DEFAULT_FROM_EMAIL, # from\n [email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send(fail_silently=False)\n\n\[email protected]\ndef notify_on_requested(transaction, user):\n if user.email_on_transfer_requested:\n send(user.email, 'users/emails/transfer_requested.html',\n {'user': user,\n 'transaction': transaction})\n\n\[email protected]\ndef notify_on_status_change(transaction, user):\n if user.email_on_transfer_completed:\n send(user.email, 'users/emails/transaction_status_changed.html',\n {'user': user,\n 'transaction': transaction})\n\n\[email protected]\ndef notify_on_send(order):\n # Sending notification to origin user\n if order.origin_user.email_on_resource_sent:\n send(order.origin_user.email, 'users/emails/resource_sent.html',\n {'origin_user': order.origin_user,\n 'receiving_user': order.receiving_user,\n 'order': order})\n\n # Sending notification to receiving user\n if order.receiving_user.email_on_resource_received:\n send(order.receiving_user.email, 'users/emails/resource_received.html',\n {'origin_user': order.origin_user,\n 'receiving_user': order.receiving_user,\n 'order': order})\n\[email protected]\ndef notify_on_request_order(request_order):\n send(request_order.origin_user.email, 'users/emails/request_received.html',\n {'origin_user': request_order.origin_user,\n 'receiving_user': request_order.receiving_user,\n 'request_order': request_order})\n\n send(request_order.receiving_user.email, 'users/emails/request_sent.html',\n {'origin_user': request_order.origin_user,\n 'receiving_user': request_order.receiving_user,\n 'request_order': request_order})\n\n\[email protected]\ndef notify_on_request_approved(request_order):\n send(request_order.receiving_user.email, 'users/emails/request_approved.html',\n {'origin_user': request_order.origin_user,\n 'receiving_user': request_order.receiving_user,\n 'request_order': request_order})\n\n\[email protected]\ndef notify_on_request_rejected(request_order):\n send(request_order.receiving_user.email, 'users/emails/request_rejected.html',\n {'origin_user': request_order.origin_user,\n 'receiving_user': request_order.receiving_user,\n 'request_order': request_order})\n"
},
{
"alpha_fraction": 0.5900110006332397,
"alphanum_fraction": 0.5933040380477905,
"avg_line_length": 30.96491241455078,
"blob_id": "ec0801c40ce93e0fc6944fe05df885926e861246",
"content_id": "4083277e592c0283e36049bfff8b9f228412c126",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1822,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 57,
"path": "/apps/orders/tables.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.utils.html import escape\nfrom django.utils.safestring import mark_safe\n\nimport django_tables2 as tables\nfrom django_tables2.utils import A # alias for Accessor\n\nfrom orders.models import Order\nfrom orders.models import Transaction\nfrom users.models import User\nfrom users.models import Account\n\n\nclass ImageColumn(tables.Column):\n def render(self, value):\n return mark_safe('<a href=\"/media/%s\"><img src=\"/media/%s\" height=\"50\" width=\"50\" /></a>'\n % (escape(value), escape(value)))\n\n\nclass UsersTable(tables.Table):\n photo = ImageColumn('photo')\n\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'phone', 'photo',\n 'expired_at', 'approving_user', 'is_validated',\n 'user_department', 'is_department']\n attrs = {\"class\": \"paleblue\"}\n\n\nclass AccountsTable(tables.Table):\n class Meta:\n model = Account\n fields = ['name', 'owner', 'account_department', 'number']\n attrs = {\"class\": \"paleblue\"}\n\n\nclass OrdersTable(tables.Table):\n # transaction = tables.LinkColumn('orders:transaction', args=[A('pk')])\n\n class Meta:\n model = Order\n fields = ['uid', 'order_type', 'origin_user', 'receiving_user',\n 'amount', 'remarks', 'created_at', 'completed_at',]\n attrs = {\"class\": \"paleblue\"}\n\n\nclass TransactionsTable(tables.Table):\n # order = tables.LinkColumn('orders:order',\n # verbose_name=\"order\",\n # args=[A('pk')])\n\n class Meta:\n model = Transaction\n fields = ['uid', 'transaction_type', 'transaction_status', 'amount',\n 'created_at', 'completed_at', 'origin_account',\n 'receiving_account']\n attrs = {\"class\": \"paleblue\"}\n"
},
{
"alpha_fraction": 0.4926495850086212,
"alphanum_fraction": 0.49606838822364807,
"avg_line_length": 37.48684310913086,
"blob_id": "5e2b884537c5538b7c2402864464a71c98e13bc8",
"content_id": "2cf6cb1c3e76e121498398b89fd9666ff3ab6a7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2925,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 76,
"path": "/templates/orders/send/send.html",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "{% extends '_base.html' %}\n\n{% load staticfiles %}\n\n{% block title %}Send{% endblock %}\n\n{% block bodyclass %}page-order{% endblock %}\n\n{% block content %}\n <div class=\"content-container\">\n <div class=\"title-bar\">\n <h1><img src=\"{% static \"img/Button-Send-Black-NoText.svg\" %}\" class=\"title-icon\"> Send</h1>\n </div>\n\n <div class=\"row\">\n <div class=\"col-lg-9\">\n <div class=\"content-panel\">\n {% if form.non_field_errors %}\n <div class=\"alert alert-danger\">\n {% for message in form.non_field_errors %}\n {{ message }}\n {% endfor %}\n </div>\n {% endif %}\n\n <div class=\"row\">\n <form action=\"\" method=\"post\">\n {% csrf_token %}\n <div class=\"col-sm-4 origin-container\">\n <p class=\"note-origin\">Origin user: </p>\n {% include \"_small-profile.html\" with user=user %}\n </div>\n\n <div class=\"col-sm-5 col-sm-push-3 destination-container\">\n <p class=\"note-destination\">Receiving User:</p>\n\n <div class=\"user-selector\">\n <input type=\"text\" class=\"form-control query-field\" placeholder=\"Enter username\" {% if destination %}style=\"display:none\"{% endif %}>\n <input type=\"hidden\" class=\"user-field\" name=\"username\" value=\"{{ destination.username | default:'' }}\">\n\n <div class=\"row user-result\"></div>\n\n <div class=\"selected small-profile dark\" {% if not destination %}style=\"display:none\"{% endif %}>\n {% if destination %}\n <img src=\"{% static \"img/placeholder-img.png\" %}\" class=\"img-circle avatar avatar-medium\">\n <h4 class=\"name\">{{ destination.get_full_name }}</h4>\n <p class=\"username\">@{{ destination.username }}</p>\n <p><a href=\"#\" class=\"change-selected\">Change</a></p>\n {% endif %}\n </div>\n </div>\n </div>\n\n <div class=\"col-sm-3 col-sm-pull-5 amount-container\">\n <div class=\"form-group amount-field\">\n <label class=\"control-label sr-only\">Amount</label>\n <input type=\"text\" class=\"form-control amount\" placeholder=\"Enter Amount\" name=\"amount\" required>\n </div>\n\n <div class=\"form-submit\">\n <button class=\"btn btn-green btn-block btn-with-icon btn-submit\" type=\"submit\" {% if not destination %}disabled{% endif %}>\n <img src=\"{% static \"img/Button-Send-White.svg\" %}\">\n </button>\n </div>\n </div>\n </form>\n </div>\n </div>\n </div>\n </div>\n </div>\n{% endblock %}\n\n{% block scripts %}\n <script src=\"{% static 'js/pages/send-request.js'%}\"></script>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.5402985215187073,
"alphanum_fraction": 0.5910447835922241,
"avg_line_length": 24.769229888916016,
"blob_id": "854255480d29d3638eb22525fcf20270e1f2b390",
"content_id": "3db1cb35c6bb073216176f70fb48738fa1dbaafc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 26,
"path": "/apps/users/migrations/0009_auto_20160303_2344.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-03-03 23:44\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0008_auto_20160222_2318'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='uid',\n field=models.UUIDField(default=uuid.uuid4, editable=False, null=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='uid',\n field=models.UUIDField(default=uuid.uuid4, editable=False, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.581360936164856,
"alphanum_fraction": 0.610946774482727,
"avg_line_length": 26.040000915527344,
"blob_id": "c74101e39661885bbc93b405439f1ace6258d224",
"content_id": "acc3fba1a56aced770742e97553c593b4d392319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 676,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 25,
"path": "/apps/orders/migrations/0011_auto_20160326_1340.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-03-26 13:40\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0010_order_remarks'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='order',\n name='transactions',\n ),\n migrations.AddField(\n model_name='order',\n name='transaction',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order', to='orders.Transaction'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5470588207244873,
"alphanum_fraction": 0.5823529362678528,
"avg_line_length": 26.200000762939453,
"blob_id": "430a590446e653e9f9523654b42e6d285236081b",
"content_id": "be4bc689d104e59725b554dacefea76c3c7bafe2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 25,
"path": "/apps/users/migrations/0007_auto_20160222_2005.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-22 20:05\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0006_user_validation_code'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='account',\n name='uid',\n field=models.CharField(max_length=32, null=True, unique=True, verbose_name='UID'),\n ),\n migrations.AddField(\n model_name='user',\n name='uid',\n field=models.CharField(max_length=32, null=True, unique=True, verbose_name='UID'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5987346172332764,
"alphanum_fraction": 0.6147186160087585,
"avg_line_length": 53.599998474121094,
"blob_id": "c2ef0ea9fdcf3778af8bad676bcdda7c0c820e2e",
"content_id": "5b246339530712fab740a8b89daffa93999f0318",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3003,
"license_type": "no_license",
"max_line_length": 225,
"num_lines": 55,
"path": "/apps/orders/migrations/0001_initial.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-10 22:30\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('users', '0003_auto_20160210_2138'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('completed_at', models.DateTimeField(blank=True, null=True, verbose_name='Completed at')),\n ('created_at', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Creation date')),\n ('order_type', models.IntegerField(blank=True, choices=[(1, 'A, Resource Allocation'), (2, 'B, Resource Transfer'), (3, 'C, Dept Account Reconciliation'), (3, 'D, Resource Spend')], db_index=True, null=True)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=12, null=True)),\n ('origin_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='origin_user', to=settings.AUTH_USER_MODEL)),\n ('receiving_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='receiving_user', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'db_table': 'orders',\n },\n ),\n migrations.CreateModel(\n name='Transaction',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('transaction_type', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3')], null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Created at')),\n ('amount', models.DecimalField(decimal_places=2, max_digits=12, null=True)),\n ('approving_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transaction_approved', to=settings.AUTH_USER_MODEL)),\n ('origin_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='origin_account', to='users.Account')),\n ('receiving_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='receiving_account', to='users.Account')),\n ],\n options={\n 'db_table': 'transactions',\n },\n ),\n migrations.AddField(\n model_name='order',\n name='transactions',\n field=models.ManyToManyField(null=True, to='orders.Transaction'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6398963928222656,
"alphanum_fraction": 0.6658031344413757,
"avg_line_length": 18.299999237060547,
"blob_id": "120d57abdbb2d3971acaa92d68bbf5ee7e797af6",
"content_id": "c1a06798bc69d416342a7858fa7d20be5d5970a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 40,
"path": "/osem/settings/local.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from ._base import *\nfrom django.utils.translation import ugettext_lazy as _\n\n\nDEBUG = True\n\n\nINSTALLED_APPS += (\n 'debug_toolbar',\n 'django_nose',\n)\n\n\nMIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\n\nNOSE_ARGS = [\n '--with-coverage',\n '--cover-html',\n '--cover-erase',\n '--cover-package=order,users',\n]\n\n\nLANGUAGES = (\n ('en', _('English')),\n)\n\n\nEMAIL_HOST = os.environ.get('EMAIL_HOST', 'mailtrap.io')\nEMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER','1665e6379817b4fa')\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD','2f1a896c9f2e6f')\nEMAIL_PORT = os.environ.get('EMAIL_PORT','2525')\n\nSITE_DOMAIN = os.environ.get('SITE_DOMAIN', 'http://localhost:8000')\n"
},
{
"alpha_fraction": 0.5773447155952454,
"alphanum_fraction": 0.6187576055526733,
"avg_line_length": 29.407407760620117,
"blob_id": "3ddc6a949989877c54438759361cf71907a813d8",
"content_id": "b56783583d50800b1fa0f9631a3bb20d38f1fd32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 821,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 27,
"path": "/apps/users/migrations/0005_auto_20160211_1154.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-11 11:54\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0004_auto_20160211_1150'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='department',\n field=models.IntegerField(choices=[(1, 'A'), (2, 'B')], null=True, verbose_name='Department'),\n ),\n migrations.AlterField(\n model_name='account',\n name='owner',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6528428196907043,
"alphanum_fraction": 0.6528428196907043,
"avg_line_length": 34.595237731933594,
"blob_id": "4c27ef6f49e3f3eb995c6adb32d6e61cdaffa701",
"content_id": "27891a73510e5d8f9d1db81f552e1a13a9870898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2990,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 84,
"path": "/apps/orders/urls.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.views.generic import TemplateView\n\nfrom .views import Home\nfrom .views import Send\nfrom .views import OrderDetail\nfrom .views import Request, RequestDetail, RequestApprove, RequestReject\nfrom .views import Deposit\nfrom .views import Withdraw\nfrom .views import SearchUser\nfrom .views import Settings\nfrom .views import NotificationSettings\nfrom .views import History\nfrom .views import NewAccount\nfrom .views import UsersAutocomplete\nfrom .views import AccountsAutocomplete\nfrom .views import AccountDetail\nfrom .views import AccountMaster\n\n\napp_name = 'orders'\nurlpatterns = [\n url(r'^$', Home.as_view(), name='home'),\n\n url(r'^order/(?P<pk>\\d+)/$', OrderDetail.as_view(), name='order'),\n\n url(r'^history/$', History.as_view(), name='history'),\n\n # Send Resource\n url(r'^send/$', Send.as_view(), name='send'),\n url(r'^send/success/$', TemplateView.as_view(\n template_name=\"orders/send/success.html\"),\n name='send_success'\n ),\n url(r'^send/fail/$', TemplateView.as_view(\n template_name=\"orders/send/fail.html\"),\n name='send_fail'\n ),\n\n # Request Resource\n url(r'^request/$', Request.as_view(),\n name='request'\n ),\n url(r'^request/(?P<pk>\\d+)/approve$', RequestApprove.as_view(),\n name='request_approve'\n ),\n url(r'^request/(?P<pk>\\d+)/reject$', RequestReject.as_view(),\n name='request_reject'\n ),\n url(r'^request/(?P<pk>\\d+)/$', RequestDetail.as_view(),\n name='request_detail'\n ),\n\n url(r'^deposit/$', Deposit.as_view(), name='deposit' ),\n url(r'^withdraw/$', Withdraw.as_view(), name='withdraw' ),\n\n # Search\n url(r'^search/$', SearchUser.as_view(), name='search'),\n\n # Setings\n url(r'^settings/$', Settings.as_view(), name='settings'),\n url(r'^settings/notification/email-notifications/$', NotificationSettings.as_view(),\n name='settings_email_notifications'),\n url(r'^settings/notification/transfer-requested/$', NotificationSettings.as_view(),\n name='settings_transfer_requested'),\n url(r'^settings/notification/transfer-completed/$', NotificationSettings.as_view(),\n name='settings_transfer_completed'),\n url(r'^settings/notification/send-requested/$', NotificationSettings.as_view(),\n name='settings_send_requested'),\n url(r'^settings/notification/received-resource/$', NotificationSettings.as_view(),\n name='settings_received_resource'),\n\n url(r'^settings/new-account/$', NewAccount.as_view(), name='new_account'),\n\n # API\n url(r'^users_autocomplete/$', UsersAutocomplete.as_view(),\n name='users_autocomplete'),\n url(r'^accounts_autocomplete/$', AccountsAutocomplete.as_view(),\n name='accounts_autocomplete'),\n url(r'^account_detail/$', AccountDetail.as_view(),\n name='account_detail'),\n url(r'^account_master/$', AccountMaster.as_view(),\n name='account_master'),\n]\n"
},
{
"alpha_fraction": 0.6661080718040466,
"alphanum_fraction": 0.6661080718040466,
"avg_line_length": 33.099998474121094,
"blob_id": "8e6c67f47d5ca8ffbebfb8f37a3fc076d4be2974",
"content_id": "583ab3fc285a72b07acaa8c7ccb3f49dd088ffd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2387,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 70,
"path": "/apps/orders/admin.py",
"repo_name": "amacasieb/meso",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.core.urlresolvers import reverse\n\nfrom .models import Order\nfrom .models import Transaction\nfrom .models import RequestOrder\n\ndef approve_transaction(modeladmin, request, queryset):\n for transaction in queryset:\n transaction.approve(request.user)\napprove_transaction.short_description = 'Approve selected transactions'\n\ndef reject_transaction(modeladmin, request, queryset):\n for transaction in queryset:\n transaction.cancel(request.user)\nreject_transaction.short_description = 'Reject selected transactions'\n\n\nclass OrderAdmin(admin.ModelAdmin):\n list_display = [\n 'uid', 'origin_user', 'receiving_user', 'completed_at',\n 'created_at', 'order_type', 'amount', 'transaction_link'\n ]\n filter_display = ['created_at', 'order_type']\n\n def transaction_link(self, obj):\n if obj.transaction:\n url = reverse('admin:orders_transaction_change', args=[obj.transaction.id] )\n return u'<a href=\"%s\">%s</a>' %(url, obj.transaction.uid)\n return None\n transaction_link.allow_tags = True\n transaction_link.short_description = 'Transaction'\n\n\nclass TransactionAdmin(admin.ModelAdmin):\n list_display = [\n 'uid', 'origin_account', 'receiving_account', 'approving_user',\n 'transaction_type', 'transaction_status', 'amount',\n 'created_at', 'completed_at', 'order_link'\n ]\n filter_display = [\n 'transaction_type', 'status'\n ]\n actions = [approve_transaction, reject_transaction]\n\n def order_link(self, obj):\n url = reverse('admin:orders_order_change', args=[obj.order.id] )\n return u'<a href=\"%s\">%s</a>' %(url, obj.order.uid)\n order_link.allow_tags = True\n order_link.short_description = 'Order'\n\n\nclass RequestOrderAdmin(admin.ModelAdmin):\n list_display = [\n 'uid', 'origin_user', 'receiving_user', 'amount',\n 'created_at', 'status', 'order_link'\n ]\n\n def order_link(self, obj):\n if obj.order:\n url = reverse('admin:orders_order_change', args=[obj.order.id] )\n return u'<a href=\"%s\">%s</a>' %(url, obj.order.uid)\n return ''\n order_link.allow_tags = True\n order_link.short_description = 'Order'\n\n\nadmin.site.register(Order, OrderAdmin)\nadmin.site.register(Transaction, TransactionAdmin)\nadmin.site.register(RequestOrder, RequestOrderAdmin)\n"
}
] | 61 |
ProgrammerBaldy/bunkers_backend
|
https://github.com/ProgrammerBaldy/bunkers_backend
|
bf9ca170c18c94da5b5daf0f94b45e1d0bd3085e
|
d9dcfa34f59bf2349e2880ede7aa9e5c5b9c8706
|
c3a351afef234e7a4a69e348ab1dc90dd6455f1d
|
refs/heads/main
| 2023-03-10T07:00:12.555231 | 2021-03-02T14:14:13 | 2021-03-02T14:14:13 | 337,581,522 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5536487102508545,
"alphanum_fraction": 0.557777464389801,
"avg_line_length": 50.155216217041016,
"blob_id": "45b270488f60110afbd68ed1d08399fbfe2b71e5",
"content_id": "f5d64608375e5a0a5d8600d5e9e1470fd5953c74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20113,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 393,
"path": "/supplies_control/views.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from rest_framework.response import Response\nfrom rest_framework import generics\nfrom . models import Supply, Subproduct, Subproduct_supplies, Product, Product_supplies, Product_subproducts\nfrom . serializers import SupplySerializer, SubproductSerializer, Subproduct_suppliesSerializer, ProductSerializer, Product_suppliesSerializer, Product_subproductsSerializer\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.http import JsonResponse\nfrom django.core import serializers\nimport json\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import status\n\n\nclass SupplyView(generics.RetrieveAPIView):\n permission_classes = (IsAuthenticated,)\n\n def get (self, request, *args, **kwargs):\n header = ['Insumo', 'Unidade de Medida', 'Quantidade em Estoque', 'Custo Médio R$', 'Ação']\n header_keys = ['name', 'measure_unit', 'stock', 'average_cost', 'id']\n raw_supplies = Supply.objects.raw(\"SELECT name, measure_unit, stock, average_cost, id FROM supplies_control_supply\")\n supply_list = []\n supply_list.append(header)\n for s in raw_supplies:\n dummy = []\n dummy.append(s.name)\n dummy.append(s.measure_unit)\n dummy.append(s.stock)\n dummy.append(s.average_cost)\n dummy.append(s.id)\n supply_list.append(dummy)\n return JsonResponse({'raw_data' : supply_list, 'keys' : header_keys})\n\n def post (self, request, *args, **kwargs):\n payload = json.loads(request.body)\n try:\n supply = Supply.objects.create(\n name = payload[\"name\"],\n measure_unit = payload[\"measure_unit\"],\n average_cost = payload[\"average_cost\"],\n stock = payload[\"stock\"]\n )\n serializer = SupplySerializer(supply)\n return JsonResponse({'supply': serializer.data}, safe=False, status=status.HTTP_201_CREATED)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something terrible went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def patch (self, request, *args, **kwargs):\n payload = json.loads(request.body)\n try:\n old_supply = Supply.objects.filter(id = payload[\"id\"])\n old_supply.update(**payload)\n supply = Supply.objects.get(id = payload[\"id\"])\n serializer = SupplySerializer(supply)\n return JsonResponse({'supply': serializer.data}, safe=False, status=status.HTTP_200_OK)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception as e:\n return JsonResponse({'error': 'Something terrible went wrong : ' + str(e)}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def delete (self, request, *args, **kwargs):\n payload = json.loads(request.body)\n try:\n supply = Supply.objects.filter(id = payload[\"id\"])\n supply.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something terrible went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n \n\nclass SubproductView(generics.RetrieveAPIView):\n queryset = Subproduct.objects.all()\n \n def delete (self, request, *args, **kwargs):\n subproductid = self.kwargs.get('subproductid')\n try:\n subproduct = Subproduct.objects.filter(id = subproductid)\n subproduct.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something terrible went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def patch (self, request, *args, **kwargs):\n subproductid = self.kwargs.get('subproductid')\n payload = json.loads(request.body)\n try:\n subproduct = Subproduct.objects.get(pk=subproductid)\n \n subproduct.name = payload[\"name\"]\n subproduct.measure_unit = payload[\"measure_unit\"]\n subproduct.production_cost = payload[\"average_cost\"]\n \n subproduct.stock = payload[\"stock\"]\n \n subproduct.recipe_final_weight = payload[\"recipe_final_weight\"]\n \n subproduct.save()\n #insert supplies\n if (Subproduct_supplies.objects.filter(subproductid_id=subproductid).exists()):\n subproduct_supplies = Subproduct_supplies.objects.all().filter(subproductid_id=subproductid)\n for item in subproduct_supplies:\n item.delete()\n for item in payload[\"supplies\"]:\n subproduct_supplies = Subproduct_supplies.objects.create(\n subproductid = subproduct,\n supplyid = Supply.objects.get(id = item[\"supplyid\"]),\n quantity = float(item[\"quantity\"])\n )\n\n serializer = SubproductSerializer(subproduct)\n return JsonResponse({'subproduct': 'ok'}, safe=False, status=status.HTTP_201_CREATED)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception as e:\n return JsonResponse({'error': 'Something terrible went wrong: '+str(e)}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def get(self, request, *args, **kwargs):\n subproductid = self.kwargs.get('subproductid')\n raw_supplies = Subproduct.objects.raw('''SELECT sp.id, s.name, s.measure_unit, ss.quantity, s.average_cost AS total_cost, s.id AS supply_id\n FROM supplies_control_subproduct sp\n LEFT JOIN supplies_control_subproduct_supplies ss ON ss.subproductid_id = sp.id\n LEFT JOIN supplies_control_supply s ON s.id = ss.supplyid_id\n WHERE sp.id = '''+str(subproductid))\n supply_list = []\n for s in raw_supplies:\n dummy = []\n dummy.append(s.name)\n dummy.append(s.measure_unit)\n dummy.append(s.quantity)\n dummy.append(s.total_cost)\n dummy.append(s.supply_id)\n supply_list.append(dummy)\n raw_subproduct = Subproduct.objects.raw('''SELECT sp.id, sp.name, sp.measure_unit, sp.stock, sp.recipe_final_weight\n FROM supplies_control_subproduct sp\n WHERE sp.id = '''+str(subproductid))\n subproduct = []\n for s in raw_subproduct:\n dummy = []\n dummy.append(s.name)\n dummy.append(s.measure_unit)\n dummy.append(s.stock)\n dummy.append(s.id)\n dummy.append(s.recipe_final_weight)\n subproduct.append(dummy)\n \n return JsonResponse({'supplies' : supply_list, 'subproduct' : subproduct})\n\n def post (self, request, *args, **kwargs):\n payload = json.loads(request.body)\n try:\n subproduct = Subproduct.objects.create(\n name = payload[\"name\"],\n measure_unit = payload[\"measure_unit\"],\n production_cost = payload[\"average_cost\"],\n stock = payload[\"stock\"],\n recipe_final_weight = payload[\"recipe_final_weight\"]\n )\n #insert supplies\n for item in payload[\"supplies\"]:\n subproduct_supplies = Subproduct_supplies.objects.create(\n subproductid = subproduct,\n supplyid = Supply.objects.get(id = item[\"supplyid\"]),\n quantity = item[\"quantity\"]\n )\n\n serializer = SubproductSerializer(subproduct)\n return JsonResponse({'subproduct': 'ok'}, safe=False, status=status.HTTP_201_CREATED)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception as e:\n return JsonResponse({'error': 'Something terrible went wrong: '+str(e)}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass Subproduct_suppliesView(generics.RetrieveAPIView):\n queryset = Subproduct_supplies.objects.all()\n\n def get(self, request, *args, **kwargs):\n header = ['Nome', 'Unidade de Medida', 'Peso Pronto', 'Quantidade em Estoque', 'Custo Médio R$', 'Ação']\n header_keys = ['name', 'measure_unit', 'recipe_final_weight', 'stock', 'total_cost', 'id']\n raw_subproducts = Subproduct.objects.raw('''SELECT sp.name, sp.measure_unit, sp.stock, SUM(ss.quantity * s.average_cost) AS total_cost, sp.recipe_final_weight, sp.id\n FROM supplies_control_subproduct sp\n LEFT JOIN supplies_control_subproduct_supplies ss ON ss.subproductid_id = sp.id\n LEFT JOIN supplies_control_supply s ON s.id = ss.supplyid_id\n GROUP BY sp.id''')\n subproducts_list = []\n subproducts_list.append(header)\n for s in raw_subproducts:\n dummy = []\n dummy.append(s.name)\n dummy.append(s.measure_unit)\n dummy.append(s.recipe_final_weight)\n dummy.append(s.stock)\n dummy.append(s.total_cost)\n dummy.append(s.id)\n subproducts_list.append(dummy)\n return JsonResponse({'raw_data' : subproducts_list, 'keys' : header_keys})\n\n\n\n\nclass ProductView(generics.RetrieveAPIView):\n queryset = Product.objects.all()\n\n def delete (self, request, *args, **kwargs):\n productid = self.kwargs.get('productid')\n try:\n product = Product.objects.filter(id = productid)\n product.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something terrible went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def patch (self, request, *args, **kwargs):\n productid = self.kwargs.get('productid')\n payload = json.loads(request.body)\n try:\n product = Product.objects.get(pk=productid)\n product.name = payload[\"name\"]\n product.measure_unit = payload[\"measure_unit\"]\n product.production_cost = 0\n product.stock = payload[\"stock\"]\n product.selling_price = payload[\"selling_price\"]\n\n product.save()\n print(Product_supplies.objects.filter(productid_id=productid).exists())\n if (Product_supplies.objects.filter(productid_id=productid).exists()):\n product_supplies = Product_supplies.objects.all().filter(productid_id=productid)\n print(product_supplies)\n for item in product_supplies:\n print(item.pk)\n item.delete()\n\n #insert supplies\n for item in payload[\"supplies\"]:\n print (item)\n product_supplies = Product_supplies.objects.create(\n productid = product,\n supplyid = Supply.objects.get(id = item[\"supplyid\"]),\n quantity = item[\"quantity\"]\n )\n\n\n if (Product_subproducts.objects.filter(productid_id=productid).exists()):\n product_subproducts = Product_subproducts.objects.all().filter(productid_id=productid)\n for item in product_subproducts:\n item.delete()\n #insert subproducts\n for item in payload[\"subproducts\"]:\n product_subproducts = Product_subproducts.objects.create(\n productid = product,\n subproductid = Subproduct.objects.get(id = item[\"subproductid\"]),\n quantity = item[\"quantity\"]\n )\n\n serializer = SubproductSerializer(product)\n return JsonResponse({'product': 'ok'}, safe=False, status=status.HTTP_201_CREATED)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception as e:\n return JsonResponse({'error': 'Something terrible went wrong: '+str(e)}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def get(self, request, *args, **kwargs):\n productid = self.kwargs.get('productid')\n raw_supplies = Product.objects.raw('''SELECT p.id, s.name, s.measure_unit, ps.quantity, s.id AS supply_id\n FROM supplies_control_product p\n LEFT JOIN supplies_control_product_supplies ps ON ps.productid_id = p.id\n LEFT JOIN supplies_control_supply s ON s.id = ps.supplyid_id\n WHERE p.id = '''+str(productid))\n supply_list = []\n for s in raw_supplies:\n dummy = []\n dummy.append(s.name)\n dummy.append(s.measure_unit)\n dummy.append(s.quantity)\n dummy.append(s.supply_id)\n supply_list.append(dummy)\n\n\n raw_subproducts = Product.objects.raw('''SELECT p.id, sp.name, sp.measure_unit, psp.quantity, sp.id AS supply_id\n FROM supplies_control_product p\n LEFT JOIN supplies_control_product_subproducts psp ON psp.productid_id = p.id\n LEFT JOIN supplies_control_subproduct sp ON sp.id = psp.subproductid_id\n WHERE p.id = '''+str(productid))\n subproducts = []\n for sp in raw_subproducts:\n dummy = []\n dummy.append(sp.name)\n dummy.append(sp.measure_unit)\n dummy.append(sp.quantity)\n dummy.append(sp.supply_id)\n subproducts.append(dummy)\n\n raw_products = Product.objects.raw('''SELECT p.id, p.name, p.measure_unit, p.stock, p.selling_price\n FROM supplies_control_product p\n WHERE p.id = '''+str(productid))\n product = []\n for p in raw_products:\n dummy = []\n dummy.append(p.name)\n dummy.append(p.measure_unit)\n dummy.append(p.stock)\n dummy.append(p.selling_price)\n dummy.append(p.id)\n product.append(dummy)\n \n return JsonResponse({'supplies' : supply_list, 'subproducts' : subproducts, 'product' : product})\n\n def post (self, request, *args, **kwargs):\n payload = json.loads(request.body)\n try:\n product = Product.objects.create(\n name = payload[\"name\"],\n measure_unit = payload[\"measure_unit\"],\n production_cost = 0,\n stock = payload[\"stock\"],\n selling_price = payload[\"selling_price\"]\n )\n #insert supplies\n for item in payload[\"supplies\"]:\n print (item)\n product_supplies = Product_supplies.objects.create(\n productid = product,\n supplyid = Supply.objects.get(id = item[\"supplyid\"]),\n quantity = item[\"quantity\"]\n )\n #insert subproducts\n for item in payload[\"subproducts\"]:\n print (item)\n product_subproducts = Product_subproducts.objects.create(\n productid = product,\n subproductid = Subproduct.objects.get(id = item[\"subproductid\"]),\n quantity = item[\"quantity\"]\n )\n\n serializer = SubproductSerializer(product)\n return JsonResponse({'product': 'ok'}, safe=False, status=status.HTTP_201_CREATED)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception as e:\n return JsonResponse({'error': 'Something terrible went wrong: '+str(e)}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass Product_suppliesView(generics.RetrieveAPIView):\n queryset = Product_supplies.objects.all()\n\n def get(self, request, *args, **kwargs):\n header = ['Nome', 'Preço de Venda', 'Custo Médio R$', 'Ação']\n header_keys = ['name', 'selling_price', 'cost', 'id']\n raw_products = Product.objects.raw('''SELECT dummy.name, dummy.selling_price, SUM(dummy.cost) AS cost, dummy.id\n FROM (\n (SELECT p.id, p.name, p.measure_unit, p.stock, p.selling_price AS selling_price,\n SUM(s.average_cost * ps.quantity) AS cost\n FROM supplies_control_product p\n LEFT JOIN supplies_control_product_supplies ps ON ps.productid_id = p.id\n LEFT JOIN supplies_control_supply s ON s.id = ps.supplyid_id\n GROUP BY p.id)\n\n UNION ALL \n\n (SELECT p.id, p.name, p.measure_unit, p.stock, p.selling_price AS selling_price,\n SUM(s.average_cost * ss.quantity) AS cost\n FROM supplies_control_product p\n INNER JOIN supplies_control_product_subproducts psp ON p.id = psp.productid_id\n LEFT JOIN supplies_control_subproduct sp ON sp.id = psp.subproductid_id\n LEFT JOIN supplies_control_subproduct_supplies ss ON ss.subproductid_id = sp.id\n LEFT JOIN supplies_control_supply s ON s.id = ss.supplyid_id\n GROUP BY p.id)\n ) dummy\n GROUP BY dummy.id, dummy.name, dummy.measure_unit, dummy.stock, dummy.selling_price''')\n products_list = []\n products_list.append(header)\n for p in raw_products:\n dummy = []\n dummy.append(p.name)\n dummy.append(p.selling_price)\n dummy.append(p.cost)\n dummy.append(p.id)\n products_list.append(dummy)\n return JsonResponse({'raw_data' : products_list, 'keys' : header_keys})\n\nclass Product_subproductsView(generics.RetrieveAPIView):\n queryset = Product_subproducts.objects.all()\n\n def get(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = Product_subproductsSerializer(queryset, many=True)\n return Response(serializer.data)"
},
{
"alpha_fraction": 0.7692307829856873,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 30.5,
"blob_id": "47c70bd1547eddd4c2402497941e0d4989fdd6e9",
"content_id": "72d89d18ee44fdf5c71ecf2b441934e6e0752c6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 12,
"path": "/bunkers_backend/views.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from rest_framework.response import Response\nfrom rest_framework import generics\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.views import APIView\n\n\nclass TestConn(generics.RetrieveAPIView):\n permission_classes = (IsAuthenticated,)\n queryset = True\n\n def get(self, request, *args, **kwargs):\n return Response(data={'message':True})"
},
{
"alpha_fraction": 0.7830188870429993,
"alphanum_fraction": 0.7830188870429993,
"avg_line_length": 20.200000762939453,
"blob_id": "49bb85e3fe066d80658d7ebc11a30ad8567b5cd0",
"content_id": "da45811050ae7c6c2d0332a7d83d84f4abb18ddc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 5,
"path": "/supplies_control/apps.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass SuppliesControlConfig(AppConfig):\n name = 'supplies_control'\n"
},
{
"alpha_fraction": 0.8297872543334961,
"alphanum_fraction": 0.8297872543334961,
"avg_line_length": 22.5,
"blob_id": "aa8a84af98eb61b15d6c85a1743e5df39aaa163b",
"content_id": "1c07fbda110cb46d7b6b8316626f08613c855376",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "# bunkers_backend\nbackend for Bunkers Pizzeria\n"
},
{
"alpha_fraction": 0.7247312068939209,
"alphanum_fraction": 0.7247312068939209,
"avg_line_length": 37.75,
"blob_id": "6d18d2cf43be0be96a8374bdc3efb7c870dfa637",
"content_id": "d2d65817b11979313fde8e10152d2123a4e15aff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 12,
"path": "/bunkers_backend/urls.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_simplejwt.views import (TokenObtainPairView, TokenRefreshView)\nfrom . views import TestConn\n\nurlpatterns = [\n path('api-token/', TokenObtainPairView.as_view()),\n path('api-token-refresh/', TokenRefreshView.as_view()),\n path('admin/', admin.site.urls),\n path('testconn/', TestConn.as_view(), name=\"testconn_view\"),\n path('',include('supplies_control.urls'))\n]\n"
},
{
"alpha_fraction": 0.73384028673172,
"alphanum_fraction": 0.73384028673172,
"avg_line_length": 59.69230651855469,
"blob_id": "740c5ed78ee4720ec92c3f480c60fd237be66835",
"content_id": "3b43acce5c6c517973ab3b16b98b34a1a6380809",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 13,
"path": "/supplies_control/urls.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . views import SupplyView, SubproductView, Subproduct_suppliesView, ProductView, Product_suppliesView, Product_subproductsView\nfrom . import views\n\nurlpatterns = [\n path('supplies/', SupplyView.as_view(), name=\"supplies_view\"),\n path('subproducts/', SubproductView.as_view(), name=\"subproducts_view\"),\n path('subproducts/<int:subproductid>', SubproductView.as_view(), name=\"subproducts_view\"),\n path('subproducts_supplies/', Subproduct_suppliesView.as_view(), name=\"subproducts_supplies_view\"),\n path('products/<int:productid>', ProductView.as_view(), name=\"products_view\"),\n path('products/', ProductView.as_view(), name=\"products_view\"),\n path('products_supplies/', Product_suppliesView.as_view(), name=\"products_supplies_view\"),\n]\n"
},
{
"alpha_fraction": 0.8376068472862244,
"alphanum_fraction": 0.8376068472862244,
"avg_line_length": 38.11111068725586,
"blob_id": "7d60445c3f9ffb0c91389066ac71691b070ef671",
"content_id": "269d53a99c801442009ea07ea3fe620405226836",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 9,
"path": "/supplies_control/admin.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom . models import Supply, Subproduct, Subproduct_supplies, Product, Product_supplies, Product_subproducts\n\nadmin.site.register(Supply)\nadmin.site.register(Subproduct)\nadmin.site.register(Subproduct_supplies)\nadmin.site.register(Product)\nadmin.site.register(Product_supplies)\nadmin.site.register(Product_subproducts)"
},
{
"alpha_fraction": 0.6125907897949219,
"alphanum_fraction": 0.6240919828414917,
"avg_line_length": 36.54545593261719,
"blob_id": "cc0b8d94cccad2dd364474f10807659ff0bc0995",
"content_id": "17f0cdee3667a2ed714e6fe4a61a57cb8fb66d5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1652,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 44,
"path": "/supplies_control/migrations/0002_auto_20210210_1100.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.6 on 2021-02-10 14:00\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('supplies_control', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='product_subproducts',\n name='productid',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplies_control.product'),\n ),\n migrations.AlterField(\n model_name='product_subproducts',\n name='subproductid',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplies_control.subproduct'),\n ),\n migrations.AlterField(\n model_name='product_supplies',\n name='productid',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplies_control.product'),\n ),\n migrations.AlterField(\n model_name='product_supplies',\n name='supplyid',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplies_control.supply'),\n ),\n migrations.AlterField(\n model_name='subproduct_supplies',\n name='subproductid',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplies_control.subproduct'),\n ),\n migrations.AlterField(\n model_name='subproduct_supplies',\n name='supplyid',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplies_control.supply'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7144140005111694,
"alphanum_fraction": 0.7359676957130432,
"avg_line_length": 48.511112213134766,
"blob_id": "8a22522f3a22d65c00b067504219142e6da8d367",
"content_id": "c38cd21894d34edee3fee9ba0682f34426d5a03d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2227,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 45,
"path": "/supplies_control/models.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Supply (models.Model):\n name = models.CharField(max_length=255)\n measure_unit = models.CharField(max_length=20)\n stock = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n average_cost = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n \n def __str__(self):\n return self.name\n\nclass Subproduct (models.Model):\n name = models.CharField(max_length=255)\n measure_unit = models.CharField(max_length=20)\n recipe_final_weight = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n stock = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n production_cost = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n\n def __str__(self):\n return self.name\n\nclass Subproduct_supplies (models.Model):\n subproductid = models.ForeignKey(Subproduct, on_delete=models.CASCADE, null=False)\n supplyid = models.ForeignKey(Supply, on_delete=models.CASCADE, null=False)\n quantity = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n\nclass Product (models.Model):\n name = models.CharField(max_length=255)\n measure_unit = models.CharField(max_length=20)\n stock = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n production_cost = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n selling_price = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n\n def __str__(self):\n return self.name\n\nclass Product_supplies (models.Model):\n productid = models.ForeignKey(Product, on_delete=models.CASCADE, null=False)\n supplyid = models.ForeignKey(Supply, on_delete=models.CASCADE, null=False)\n quantity = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)\n\nclass Product_subproducts (models.Model):\n productid = models.ForeignKey(Product, on_delete=models.CASCADE, null=False)\n subproductid = models.ForeignKey(Subproduct, on_delete=models.CASCADE, null=False)\n quantity = models.DecimalField(max_digits=10, decimal_places=4, default=None, null=True)"
},
{
"alpha_fraction": 0.6912539601325989,
"alphanum_fraction": 0.6912539601325989,
"avg_line_length": 28.6875,
"blob_id": "645824e2dc9ed4d15b109bce7efcce19938106c3",
"content_id": "097acd03a1a50862a56ccee55d98a57bcf03f470",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 949,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 32,
"path": "/supplies_control/serializers.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nfrom . models import Supply, Subproduct, Subproduct_supplies, Product, Product_supplies, Product_subproducts\n\nclass SupplySerializer(serializers.ModelSerializer):\n class Meta:\n model = Supply\n fields = '__all__'\n\nclass SubproductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Subproduct\n fields = '__all__'\n\nclass Subproduct_suppliesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Subproduct_supplies\n fields = '__all__'\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = '__all__'\n\nclass Product_suppliesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product_supplies\n fields = '__all__'\n\nclass Product_subproductsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product_subproducts\n fields = '__all__'"
},
{
"alpha_fraction": 0.5576649308204651,
"alphanum_fraction": 0.5851922035217285,
"avg_line_length": 35.32758712768555,
"blob_id": "deb5d8ba262cf8bb0ddbc2d6c3975e324e9a032b",
"content_id": "da215e44e178a89aa865cf28d448590d163c9151",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2107,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 58,
"path": "/supplies_control/migrations/0003_auto_20210218_1110.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.6 on 2021-02-18 14:10\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('supplies_control', '0002_auto_20210210_1100'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='production_cost',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='product',\n name='selling_price',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='product',\n name='stock',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='product_subproducts',\n name='quantity',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='product_supplies',\n name='quantity',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='subproduct',\n name='production_cost',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='subproduct',\n name='recipe_final_weight',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='subproduct',\n name='stock',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n ),\n migrations.AddField(\n model_name='subproduct_supplies',\n name='quantity',\n field=models.DecimalField(decimal_places=4, default=None, max_digits=10, null=True),\n )\n ]\n"
},
{
"alpha_fraction": 0.5138097405433655,
"alphanum_fraction": 0.5269618630409241,
"avg_line_length": 35.790321350097656,
"blob_id": "5452fbedf431f88f306ddea67f5dd66c3d794a2a",
"content_id": "9a23f860bd46da6d8fd5fa0b86db036810603cec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2281,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 62,
"path": "/supplies_control/migrations/0001_initial.py",
"repo_name": "ProgrammerBaldy/bunkers_backend",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.6 on 2021-02-10 13:08\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('measure_unit', models.CharField(max_length=20)),\n ],\n ),\n migrations.CreateModel(\n name='Product_subproducts',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('productid', models.IntegerField()),\n ('subproductid', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Product_supplies',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('productid', models.IntegerField()),\n ('supplyid', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Subproduct',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('measure_unit', models.CharField(max_length=20)),\n ],\n ),\n migrations.CreateModel(\n name='Subproduct_supplies',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('subproductid', models.IntegerField()),\n ('supplyid', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Supply',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('measure_unit', models.CharField(max_length=20)),\n ],\n ),\n ]\n"
}
] | 12 |
makc2299/Optimization-methods
|
https://github.com/makc2299/Optimization-methods
|
6160e421b5482e66758912e6d6f684d75e022071
|
2e55a2a04afee67d0580a72ebededd47e749c98c
|
32bfb4172d8a2dc4d8730443d477872db4022806
|
refs/heads/master
| 2022-10-29T10:34:38.176631 | 2020-06-17T09:23:38 | 2020-06-17T09:23:38 | 272,929,406 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6496374607086182,
"alphanum_fraction": 0.6845748424530029,
"avg_line_length": 47.935482025146484,
"blob_id": "3e68dae6876e57a03c9a8e1d40d9c66219131718",
"content_id": "26effec56970f7f51273e5d9af61b30145d3a6e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3034,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 62,
"path": "/README.md",
"repo_name": "makc2299/Optimization-methods",
"src_encoding": "UTF-8",
"text": "This is a module containing numerical methods of unconditional multidimensional minimization such as:\n\noptimize_module.CoordinateDescent()\n\noptimize_module.NelderMead()\n\noptimize_module.GradientDescent()\n\noptimize_module.ConjugateGradients()\n\noptimize_module.Newton()\n\noptimize_module.Marquardt()\n\nZero order methods\n\n\t1. Coordinate descent method\n\t\tcall example: optimize_module.CoordinateDescent(f,x,eps),\n\t\tf - is function object that you declared above such as def f(x): return 4.*(x[0]-5)**2.+(x[1]-6)**2\n\t\tx - point coordinate exmple: x = np.array([1., -1.])\n\t\teps - up to this number, the algorithm will work\n\t2. Nelder-Mead method\n\t\tcall example: optimize_module.NelderMead(f,x1, x2, x3, eps),\n\t\tf - is function object that you declared above such as def f(x): return 4.*(x[0]-5)**2.+(x[1]-6)**2\n\t\tx1,x2,x3 - point coordinates exmple: x1=np.array([9. , 2.]), x2=np.array([3., 5.]), x3=np.array([4. , 10.])\nFirst order methods\n\n\t1. Method of the fastest gradient descent\n\t\tcall example: optimize_module.GradientDescent(f,grad,x,eps),\n\t\tf - is function object that you declared above such as def f(x): return 4.*(x[0]-5)**2.+(x[1]-6)**2\n\t\tgrad - this is a function object that returns a gradient from f function\n\t\t\texample: def grad(x): return np.array([8.*(x[0]-5), 2.*(x[1]-6)])\n\t\tx - point coordinate exmple: x = np.array([1., -1.])\n\t\teps - up to this number, the algorithm will work\n\t2. Method of conjugate gradients\n\t\tcall example: optimize_module.ConjugateGradients(f,grad,x,eps),\n\t\tf - is function object that you declared above such as def f(x): return 4.*(x[0]-5)**2.+(x[1]-6)**2\n\t\tgrad - this is a function object that returns a gradient from f function\n\t\t\texample: def grad(x): return np.array([8.*(x[0]-5), 2.*(x[1]-6)])\n\t\tx - point coordinate exmple: x = np.array([1., -1.])\n\t\teps - up to this number, the algorithm will work\n\nSecond order methods\n\n\t1. Newton's method\n\t\tcall example: optimize_module.Newton(f,grad,hesse,x,eps),\n\t\tf - is function object that you declared above such as def f(x): return 4*(x[0]-5)**2 + (x[1]-6)**2\n\t\tgrad - this is a function object that returns a gradient from f function\n\t\t\texample: def grad(x): return np.array([8*(x[0]-5), 2*(x[1]-6)])\n\t\thesse - is function object that returns the matrix of partial derivatives of grad\n\t\t\texample: def hesse(x): return np.array([[8., 0.], [0., 2.]])\n\t\tx - point coordinate exmple: x = np.array([1., -1.])\n\t\teps - up to this number, the algorithm will work\n\t2. The McWard method\n\t\tcall example: optimize_module.Marquardt(f,grad,hesse,x,eps),\n\t\tf - is function object that you declared above such as def f(x): return 4*(x[0]-5)**2 + (x[1]-6)**2\n\t\tgrad - this is a function object that returns a gradient from f function\n\t\t\texample: def grad(x): return np.array([8*(x[0]-5), 2*(x[1]-6)])\n\t\thesse - is function object that returns the matrix of partial derivatives of grad\n\t\t\texample: def hesse(x): return np.array([[8., 0.], [0., 2.]])\n\t\tx - point coordinate exmple: x = np.array([1., -1.])\n\t\teps - up to this number, the algorithm will work\n"
},
{
"alpha_fraction": 0.35321101546287537,
"alphanum_fraction": 0.41263553500175476,
"avg_line_length": 23.984375,
"blob_id": "939802d36bd2efc177b6d3e52ba5326d7546ba59",
"content_id": "a43f1d898ea9fdab39e7994fdd5d5ee9f1011ec1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4796,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 192,
"path": "/optimize_module.py",
"repo_name": "makc2299/Optimization-methods",
"src_encoding": "UTF-8",
"text": "# Module block 2. Numerical methods of unconditional multidimensional minimization\nimport numpy as np\nfrom numpy.linalg import norm, inv, det\n\n# Zero order methods ##########################################\n# 1. Coordinate descent method\n\ndef Dichotomy(x, i, eps):\n delta = eps/10.\n x_left = x.copy()\n x_right = x.copy()\n a = -10.0\n b = 10.0\n while abs(b-a) > eps:\n x_left[i] = (a + b - delta)/2.\n x_right[i] = (a + b + delta)/2.\n if f(x_left) < f(x_right):\n b = x_right[i]\n else:\n a = x_left[i]\n return (a+b)/2\n\ndef CoordinateDescent(f, x0, eps):\n n = len(x0)\n x1 = np.zeros(n, dtype = np.float)\n for i in range(0,n):\n x1[i] = Dichotomy(x0, i, eps)\n k = 1\n while norm(x1 - x0, 1) > eps and k < 5000:\n x0 = x1.copy()\n for i in range(0, n):\n x1[i] = Dichotomy(x0, i, eps)\n k += 1\n return [x1, f(x1), k]\n\n# 2. Nelder-Mead method\n\ndef NelderMead(f, x1, x2, x3, eps):\n alpha = 1.0\n beta = 0.5\n gamma = 2.0\n \n lst = sorted([[f(x1), x1], [f(x2), x2], [f(x3), x3]])\n \n xl = np.array(lst[0][1])\n xs = np.array(lst[1][1])\n xh = np.array(lst[2][1])\n\n x4 = (xl + xs) / 2\n\n sigma = np.sqrt(1./3 * ((f(x1) - f(x4))**2 + (f(x2) - f(x4))**2 + (f(x3) - f(x4))**2))\n k = 0\n\n while (sigma > eps) & (k <= 250):\n \n flag = True\n x5 = x4 + alpha * (x4 - xh)\n if f(x5) <= f(xl): \n x6 = x4 + gamma*(x5 - x4) \n if f(x6) < f(xl):\n xh = x6\n else:\n xh = x5\n elif f(xs) < f(x5) and f(x5) <= f(xh): \n x7 = x4 + beta*(xh - x4) \n xh = x7\n elif f(xl) < f(x5) and f(x5) <= f(xs): \n xh = x5\n else: \n x1 = xl + 0.5 * (x1 - xl)\n x2 = xl + 0.5 * (x2 - xl) \n x3 = xl + 0.5 * (x3 - xl) \n flag = False\n\n if flag == True:\n x1 = xl\n x2 = xs\n x3 = xh\n\n lst = sorted([[f(x1), x1], [f(x2), x2], [f(x3), x3]])\n \n xl = np.array(lst[0][1])\n xs = np.array(lst[1][1])\n xh = np.array(lst[2][1])\n\n x4 = (xl + xs) / 2\n\n sigma = np.sqrt(1./3 * ((f(x1) - f(x4))**2 + (f(x2) - f(x4))**2 + (f(x3) - f(x4))**2))\n k += 1\n\n return [xl, f(xl), k]\n###############################################################\n\n# First order methods #########################################\n#1. The method of the fastest gradient descent\n\ndef Dichotomy1(x0, eps):\n delta = eps/10.\n a = -2.0\n b = 2.5\n while np.abs(b-a) > eps:\n alpha1 = (a + b - delta)/2.\n alpha2 = (a + b + delta)/2.\n f1 = f(x0 - alpha1*grad(x0))\n f2 = f(x0 - alpha2*grad(x0))\n if f1 < f2:\n b = alpha2\n else:\n a = alpha1\n return (a + b)/2.\n\ndef GradientDescent(f, grad, x0, eps):\n alpha = Dichotomy1(x0, eps)\n x1 = x0 - alpha*grad(x0) \n k = 1\n while norm((x1-x0), 1) > eps and k < 5000:\n x0 = x1\n alpha = Dichotomy1(x0, eps)\n x1 = x0 - alpha*grad(x0) \n k = k + 1\n return [x1, f(x1), k]\n\n# 2. The method of conjugate gradients\ndef ConjugateGradients(f, grad, x0, eps):\n p = -grad(x0)\n alpha = Dichotomy1(x0, eps)\n x1 = x0 + alpha*p\n k = 1\n while norm(x1-x0, 1) > eps and k < 5000:\n b = (norm(grad(x1), 1))**2/(norm(grad(x0), 1))**2 \n p = -grad(x1) + b*p \n x0 = x1\n alpha = Dichotomy1(x0, eps)\n x1 = x0 + alpha*p\n k = k + 1\n\n return [x1, f(x1), k]\n###############################################################\n\n# Second order methods ########################################\n\n# 1. Newton's method\ndef Newton(f, grad, hesse, x0, eps):\n k = 0\n gr = grad(x0)\n while norm(gr, 1) > eps and k < 50:\n hs = inv(hesse(x0))\n dt1 = hs[0][0]\n dt2 = det(hs)\n if dt1 > 0 and dt2 > 0:\n p = -np.dot(hs , gr) \n else:\n p = -gr \n x1 = x0 + p\n k = k + 1\n x0 = x1\n gr = grad(x0)\n\n return [x1, f(x1), k]\n\n# 2. The McWard method\ndef Marquardt(f, grad, hesse, x0, eps):\n k = 0\n E = np.eye(2)\n my = 10**3\n gr = grad(x0)\n while norm(gr, 1) > eps and k < 50:\n hs = hesse(x0)\n dod = np.dot(inv(hs + np.dot(my, E)), gr)\n x1 = x0 - dod\n if f(x1) < f(x0):\n my = my/2\n else:\n my = 2*my\n x0 = x1\n k = k + 1\n gr = grad(x0)\n\n return [x1, f(x1), k]\n\n# # Test functions\n\n# def f(x):\n# return 4*(x[0]-5)**2 + (x[1]-6)**2\n\n# def grad(x):\n# return np.array([8*(x[0]-5), 2*(x[1]-6)])\n\n# def hesse(x):\n# return np.array([[8., 0.], [0., 2.]])\n\n# print(Marquardt(f, grad, hesse, np.array([-2., 2.]), 1e-6))"
}
] | 2 |
mikepatel/Temporary-Happiness
|
https://github.com/mikepatel/Temporary-Happiness
|
266f13ad021336f66fd446831bc62226c5762cc6
|
42415666df0ee70ebfd02baed91ca2b7201a0e1b
|
a63104bdcd2181a7b5757b8fc07f7e01ff4e958d
|
refs/heads/master
| 2022-12-30T23:51:17.718736 | 2020-10-05T21:59:57 | 2020-10-05T21:59:57 | 298,754,440 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7669903039932251,
"alphanum_fraction": 0.77130526304245,
"avg_line_length": 34.653846740722656,
"blob_id": "046f0ce1faea200aa10c4bc5027fb144afdcf6ac",
"content_id": "d839da5d5c1d03c2fff6345d4d51591f184db121",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 927,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 26,
"path": "/README.md",
"repo_name": "mikepatel/Temporary-Happiness",
"src_encoding": "UTF-8",
"text": "# Temporary-Happiness\n## Overview\nA simple text classifier\n\n## Dataset\n* dataset courtesy of [t-davidson](https://github.com/t-davidson/hate-speech-and-offensive-language/tree/master/data)\n* **Choice of dataset is temporary and will be revisited!**\n\n## Environment\n* Python 3.7\n* Anaconda environment\n * TensorFlow 2.1\n\n## File descriptions\n* [model.py](https://github.com/mikepatel/Temporary-Happiness/blob/master/model.py) for model definition\n* [parameters.py](https://github.com/mikepatel/Temporary-Happiness/blob/master/parameters.py) for model parameters\n* [train.py](https://github.com/mikepatel/Temporary-Happiness/blob/master/train.py) for model training and prediction\n\n## Instructions\nTemporarily using train.py to train AND to also perform inference (will save trained model in the future)\n```\npython train.py\n```\n\n## Results\n\n"
},
{
"alpha_fraction": 0.4532085657119751,
"alphanum_fraction": 0.4745989441871643,
"avg_line_length": 27.769229888916016,
"blob_id": "f1b96d7dddd95b5240671781cc34c97a149bfb27",
"content_id": "95427e278e22082620e0918874a9ba3a395ffa40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 748,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 26,
"path": "/parameters.py",
"repo_name": "mikepatel/Temporary-Happiness",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\"\"\"\n################################################################################\n# Imports\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n\n################################################################################\n# Filepaths\nDATA_DIR = os.path.join(os.getcwd(), \"data\")\nCSV_FILEPATH = os.path.join(DATA_DIR, \"data.csv\")\nPLOT_FILEPATH = os.path.join(os.getcwd(), \"training_accuracy\")\nSAVED_MODEL_FILEPATH = os.path.join(os.getcwd(), \"saved_model\")\n\n\n################################################################################\nNUM_EPOCHS = 5\nBATCH_SIZE = 16\nNUM_RNN_UNITS = 128\nMAX_WORDS = 5000 # limit data to top x words\nMAX_SEQ_LENGTH = 100 #\nEMBEDDING_DIM = 100 #\n"
},
{
"alpha_fraction": 0.568855345249176,
"alphanum_fraction": 0.5729227066040039,
"avg_line_length": 24.124088287353516,
"blob_id": "1a1307ef9acb828d029b2f29dffec0c5f0863a2e",
"content_id": "2b58e42a986063ffe12d2596efdfd48eb7e657d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3442,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 137,
"path": "/train.py",
"repo_name": "mikepatel/Temporary-Happiness",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\"\"\"\n################################################################################\n# Imports\nfrom parameters import *\nfrom model import build_model\n\n\n################################################################################\n# Main\nif __name__ == \"__main__\":\n # print out TF version\n print(f'TF version: {tf.__version__}')\n\n # ----- ETL ----- #\n # ETL = Extraction, Transformation, Load\n text = []\n labels = []\n\n # read in csv\n df = pd.read_csv(CSV_FILEPATH)\n\n # Clean dataset\n # SKIP FOR NOW - will change from using Twitter dataset in the future\n\n # get text sequences and category labels\n text = list(df[\"text\"])\n labels = list(df[\"class\"])\n\n # will make labels binary for now\n # 0 = not offenseive\n # 1 = offensive\n for i in range(len(labels)):\n if labels[i] == 0:\n labels[i] = 1\n if labels[i] == 2:\n labels[i] = 0\n\n # map class categories to integers\n # arbitrary class names\n class_names = [\n \"not offensive\",\n \"offensive\"\n ]\n\n num_classes = len(class_names)\n\n class2int = {class_names[i]: i for i in range(len(class_names))}\n\n # map integers to class categories\n int2class = {v: k for k, v in class2int.items()}\n\n # split data set into training, validation, test\n # SKIP FOR NOW\n # training\n train_text = text\n train_labels = np.array(labels)\n\n # validation\n\n # test\n\n #print(f'Number of training text examples: {len(train_text)}')\n #print(f'Number of training labels: {len(train_labels)}')\n\n # Tokenization\n tokenizer = tf.keras.preprocessing.text.Tokenizer(\n num_words=MAX_WORDS\n )\n\n # tokenize on training set\n tokenizer.fit_on_texts(train_text)\n\n word2int = tokenizer.word_index # unique tokens\n vocab_size = len(word2int)\n\n # Vectorization\n train_text = tokenizer.texts_to_matrix(train_text)\n train_text = np.array(train_text)\n\n # one-hot encode labels\n train_labels = tf.keras.utils.to_categorical(train_labels, num_classes)\n\n # print shape\n print(f'Shape of train text: {train_text.shape}')\n print(f'Shape of train labels: {train_labels.shape}')\n\n # ----- MODEL ----- #\n # build model\n model = build_model(num_categories=num_classes)\n model.summary()\n\n # loss function, optimizer\n model.compile(\n loss=tf.keras.losses.categorical_crossentropy,\n optimizer=tf.keras.optimizers.Adam(),\n metrics=[\"accuracy\"]\n )\n\n # ----- TRAIN ----- #\n # train model\n history = model.fit(\n x=train_text,\n y=train_labels,\n epochs=NUM_EPOCHS,\n batch_size=BATCH_SIZE\n )\n\n # save model\n # SKIP FOR NOW\n\n # plot accuracy\n plt.scatter(range(1, NUM_EPOCHS+1), history.history[\"accuracy\"], label=\"accuracy\")\n plt.title(\"Training Accuracy\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.grid()\n plt.legend(loc=\"lower right\")\n plt.savefig(PLOT_FILEPATH)\n\n # ----- PREDICT ----- #\n test_text = [\n \"Miami vs Lebron, here we go\",\n \"What the fuck is this shit?\"\n ]\n\n # tokenize\n test_tokens = tokenizer.texts_to_matrix(test_text)\n test_tokens = np.array(test_tokens)\n\n # make predictions\n predictions = model.predict(test_tokens)\n\n for i in range(len(test_text)):\n print()\n print(f'Text: {test_text[i]}')\n print(f'Prediction: {int2class[np.argmax(predictions[i])]}')\n"
},
{
"alpha_fraction": 0.4922206401824951,
"alphanum_fraction": 0.49929279088974,
"avg_line_length": 25.185184478759766,
"blob_id": "da74885000c6a429a8d56391c37fcce1212519e5",
"content_id": "883e3d024e3021a4041dbca865a5d0f9cf8822ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 707,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 27,
"path": "/model.py",
"repo_name": "mikepatel/Temporary-Happiness",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\"\"\"\n################################################################################\n# Imports\nfrom parameters import *\n\n\n################################################################################\n# keeping model architecture simple for now with fully connected (dense) layers\n# Fully Connected\ndef build_model(num_categories):\n model = tf.keras.Sequential()\n\n # layer 1\n model.add(tf.keras.layers.Dense(\n units=512,\n input_shape=(MAX_WORDS, ), # (batch, MAX_WORDS)\n activation=tf.keras.activations.relu\n ))\n\n # layer 2\n model.add(tf.keras.layers.Dense(\n units=num_categories,\n activation=tf.keras.activations.softmax\n ))\n\n return model\n"
}
] | 4 |
Edinburgh-iGEM2016/Modelling
|
https://github.com/Edinburgh-iGEM2016/Modelling
|
9a3289bdebc91716af0aab6af05de1af8ba915a8
|
d2cdfab4ebcdfbffafa30362970740abd1f2e928
|
7d6866f4ea711919cd4e1a80f9e8d3d311091d09
|
refs/heads/master
| 2020-04-21T01:15:30.269873 | 2016-08-31T13:43:05 | 2016-08-31T13:43:05 | 67,036,705 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8050633072853088,
"alphanum_fraction": 0.8050633072853088,
"avg_line_length": 130.6666717529297,
"blob_id": "fb54bb0287708e08372dc509af424b4f0e7379eb",
"content_id": "637940cc5a78f26733a973c436cd80359ccbebc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 381,
"num_lines": 3,
"path": "/README.md",
"repo_name": "Edinburgh-iGEM2016/Modelling",
"src_encoding": "UTF-8",
"text": "# Modelling\n\nA simple model to work out how many sequencing reactions we would have to do to have a certain % certainty of having at least one babblebrick from a gblock of n babblebricks. The results of this eventually caused us to order BabbleBricks in the form of primers rather than as gBlocks. This allowed a significant cost reduction as we did not have to do as many sequencing reactions.\n"
},
{
"alpha_fraction": 0.6695652008056641,
"alphanum_fraction": 0.6929765939712524,
"avg_line_length": 45.71875,
"blob_id": "3a3551e4d3adf29e5a0b502da90d7563ae8dc422",
"content_id": "6452a632ec84b7107f892f6904e0f65296716ee7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1495,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 32,
"path": "/sequencingCountSim.py",
"repo_name": "Edinburgh-iGEM2016/Modelling",
"src_encoding": "UTF-8",
"text": "# a simple model to work out how many sequencing reactions we would have to do to have a certain % certainty \n# of having at least one babblebrick from a gblock of n babblebricks\n# babbled - edinburgh ug igem 2016\n#freddie starkey\n\nfrom itertools import product\nimport matplotlib.pyplot as plt\n\ndef plotOut(noReactions):\n plt.plot(range(1, noReactions + 1), [simMain(2, reactionCount) for reactionCount in xrange(1, noReactions + 1)], 'b-', label=\"2 words per gBlock\")\n plt.plot(range(1, noReactions + 1), [simMain(3, reactionCount) for reactionCount in xrange(1, noReactions + 1)], 'r-', label=\"3 words per gBlock\")\n plt.plot(range(1, noReactions + 1), [simMain(4, reactionCount) for reactionCount in xrange(1, noReactions + 1)], 'g-', label=\"4 words per gBlock\")\n plt.plot(range(1, noReactions + 1), [simMain(5, reactionCount) for reactionCount in xrange(1, noReactions + 1)], 'k-', label=\"5 words per gBlock\")\n plt.xlabel(\"number of sequencing reactions\")\n plt.ylabel(\"accuracy (%)\")\n plt.legend()\n plt.show()\n\ndef simMain(noInGBlock, noReactions):\n miss = 0.0\n table = list(product(range(0, noInGBlock), repeat=noReactions))\n total = float(len(table))\n for eachPerm in table:\n if checkAllPresent(eachPerm, noInGBlock) == False:\n miss = miss + 1\n return 1.0 - miss/total\n\ndef checkAllPresent(eachPerm, noInGBlock):\n for eachWord in range(0, noInGBlock):\n if eachWord not in eachPerm:\n return False\n return True\n"
}
] | 2 |
everypolitician-scrapers/uk-ynmp-2015-winners
|
https://github.com/everypolitician-scrapers/uk-ynmp-2015-winners
|
0e96b7f8d9b1e46ebff8812bfb9d8911dc73534d
|
e27ee9d0a9dcd3bae58438c32c4c9e9fef073bbe
|
ecc8fa07e024e88692dbafc9a0ac19fb5668c518
|
refs/heads/master
| 2021-06-06T03:39:22.862270 | 2016-09-15T15:39:07 | 2016-09-15T15:43:35 | 68,300,186 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5801253318786621,
"alphanum_fraction": 0.5881826281547546,
"avg_line_length": 29.189189910888672,
"blob_id": "501f4517a9f30722d96d2f536c8b34c7410820df",
"content_id": "fd95d938db54eacf46df99e0f01cff9056309c7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 37,
"path": "/scraper.py",
"repo_name": "everypolitician-scrapers/uk-ynmp-2015-winners",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport csv\nimport re\nfrom urlparse import urlsplit\n\nimport requests\nimport scraperwiki\n\n'''This \"scraper\" just changes the columns in the YourNextMP elected\ncandidates data from the UK 2015 general election'''\n\nurl = 'https://candidates.democracyclub.org.uk/media/candidates-elected-2015.csv'\n\nr = requests.get(url, stream=True)\n\nfor row in csv.DictReader(r.raw):\n parlparse_person_id = re.sub(r'^.*/(\\d+)$', r'\\1', row['parlparse_id'])\n wikiname = ''\n if row['wikipedia_url']:\n split = urlsplit(row['wikipedia_url'])\n wikiname = split.path[len('/wiki/'):]\n wikiname = wikiname.replace('_', ' ')\n scraperwiki.sqlite.save(\n unique_keys=['id'],\n data={\n 'id': parlparse_person_id,\n 'name': row['name'],\n 'twitter': row['twitter_username'],\n 'facebook': row['facebook_page_url'],\n 'wikipedia': row['wikipedia_url'],\n 'wikiname': wikiname,\n 'birth_date': row['birth_date'],\n 'linkedin': row['linkedin_url'],\n 'image': row['image_url'],\n }\n )\n"
}
] | 1 |
nothke/b3d-id-bake
|
https://github.com/nothke/b3d-id-bake
|
1608dcbe24a026796cb0b7cc1b24b91eee381caf
|
300489e64ff7721e0cd4a82514a813997a66ec44
|
72ba39cd7f7efd4acc2ba72bc376abdc03210d5a
|
refs/heads/master
| 2023-01-06T04:14:56.357388 | 2020-11-05T01:18:08 | 2020-11-05T01:18:08 | 310,097,771 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7673147320747375,
"alphanum_fraction": 0.7770352363586426,
"avg_line_length": 46.05714416503906,
"blob_id": "65c9d3a05b7ec74045cbd016bbd7cbe22a594fb9",
"content_id": "1c00af36dbd44c88d00e4102b9f4ca92a30d5c71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1646,
"license_type": "no_license",
"max_line_length": 244,
"num_lines": 35,
"path": "/README.md",
"repo_name": "nothke/b3d-id-bake",
"src_encoding": "UTF-8",
"text": "Single command id map baker for Blender. Bakes color of all selected object's materials to a single texture.\n\nBlender 2.8's baking workflow overhaul has made baking id maps require much more clicks than before. This is intended to simplify this process by providing a single command.\n\n### Usage\n\n1. Meshes with assigned materials as ids (for example, but you could also use color textures as the source) should be prepared for baking: Make sure they are UV unwrapped and that the UV islands don't overlap, as you would with standard baking.\n\n2. Select all objects you wish a signle id map to be baked for.\n\n3. Search \"Bake id map\" in commands search bar and click on it.\n\n\n\nAnd that's it! The texture will be created in the same folder as your blend file, with the name of ACTIVEOBJECTNAME_id.png.\n\n\n\nYou can additionally set a custom texture size, which will cause a rebake. 512 is default.\n\n### What does it actually do?\n\n1. It duplicates the source objects to use them as baking target objects,\n2. Removes all materials from duplicates,\n3. Applies modifiers and joins all duplicates which becomes the baking target mesh,\n4. Adds a temporary baking material and assigns a baking texture node,\n5. Creates the target texture,\n6. Bakes the color from source objects to joined target mesh\n7. Cleans up, deleting duplicated mesh and temporary materials and textures\n\nAs you can see, it does a bunch of stuff with a single command!\n\n### Installation\n\nInstall like any other blender plugin: Edit > Preferences > Add-Ons > Install.. find bake_id_map.py. Then tick it to enable it."
},
{
"alpha_fraction": 0.6127384305000305,
"alphanum_fraction": 0.616314709186554,
"avg_line_length": 31.269229888916016,
"blob_id": "a2a47ce96082ab04c3178c1c3ef9e18608c62584",
"content_id": "db3e715981e7f17c867caee6d709a28354b8d8e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5872,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 182,
"path": "/bake_id_map.py",
"repo_name": "nothke/b3d-id-bake",
"src_encoding": "UTF-8",
"text": "# By Nothke\n\nbl_info = {\n \"name\": \"Bake id map\",\n \"description\": \"Bakes id map to texture with a single command.\",\n \"author\": \"Nothke\",\n \"category\": \"Object\",\n \"blender\": (2, 80, 0),\n# \"location\": \"Object > Apply > Unity Rotation Fix\",\n}\n\nimport bpy\nfrom math import pi\nfrom mathutils import Vector\nfrom mathutils import Matrix\n\nclass NOTHKE_OT_bake_id_map(bpy.types.Operator):\n \"\"\"Bakes id map\"\"\"\n bl_idname = \"object.bake_id_map\"\n bl_label = \"Bake id map\"\n bl_options = {'REGISTER', 'UNDO'}\n\n tex_size: bpy.props.IntProperty(name=\"Texture Size\", default=512)\n ray_distance: bpy.props.FloatProperty(name=\"Ray Distance\", default=0.1)\n\n def execute(self, context):\n\n original_active_object = context.view_layer.objects.active\n name = original_active_object.name\n filename = name + \"_id.png\"\n\n layer = context.view_layer\n \n selected_objects = []\n \n for obj in context.selected_objects:\n selected_objects.append(obj)\n\n bpy.ops.object.select_all(action='DESELECT')\n \n layer.objects.active = None\n duplicated_objects = []\n\n # duplicate each object individually\n for obj in selected_objects:\n layer.objects.active = obj\n obj.select_set(True)\n bpy.ops.object.duplicate()\n dobj = context.object\n duplicated_objects.append(dobj)\n\n #print(dobj.name)\n\n # Deselect this object\n layer.objects.active = None\n bpy.ops.object.select_all(action='DESELECT')\n\n bakeSize = self.tex_size\n\n #5 remember render engine and switch to CYCLES for baking\n orig_renderer = bpy.data.scenes[bpy.context.scene.name].render.engine\n bpy.data.scenes[bpy.context.scene.name].render.engine = \"CYCLES\"\n\n #6 create temporary bake image and material\n bakeimage = bpy.data.images.new(\"BakeImage\", width=bakeSize, height=bakeSize)\n bakemat = bpy.data.materials.new(name=\"bakemat\")\n bakemat.use_nodes = True\n\n # Set materials\n for obj in duplicated_objects:\n layer.objects.active = obj\n\n # remove material slots\n for x in obj.material_slots:\n obj.active_material_index = 0 \n bpy.ops.object.material_slot_remove()\n\n # add and assign material\n bpy.ops.object.material_slot_add()\n obj.data.materials[0] = bakemat\n\n # Select duplicated objects\n for obj in duplicated_objects:\n obj.select_set(True)\n layer.objects.active = obj\n\n # apply modifiers, will work for all selected meshes\n bpy.ops.object.convert(target='MESH')\n \n # join\n bpy.ops.object.join()\n joined_obj = layer.objects.active\n\n #8 select lowpoly target\n #bpy.context.view_layer.objects.active = original_active_object #context.scene.lowpoly\n\n #9 select lowpoly material and create temporary render target\n #orig_mat = bpy.context.active_object.data.materials[0]\n bpy.context.active_object.data.materials[0] = bakemat\n node_tree = bakemat.node_tree\n node = node_tree.nodes.new(\"ShaderNodeTexImage\")\n node.select = True\n node_tree.nodes.active = node\n node.image = bakeimage\n\n # setup id baking\n bpy.context.scene.cycles.samples = context.scene.samplesColor\n bpy.context.scene.render.bake.use_pass_direct = False\n bpy.context.scene.render.bake.use_pass_indirect = False\n bpy.context.scene.render.bake.use_pass_color = True\n\n # select this\n context.view_layer.objects.active.select_set(True)\n\n # Select all original\n for obj in selected_objects:\n obj.select_set(True)\n\n\n # BAKE!\n bpy.ops.object.bake(\n type = 'DIFFUSE', \n use_clear = True, \n use_selected_to_active = True,\n cage_extrusion = self.ray_distance) # changed in 2.9 to max_ray_distance?\n\n folder_path = bpy.path.abspath(\"//\")\n map_path = folder_path + filename\n bakeimage.filepath_raw = map_path # context.scene.bakeFolder+context.scene.bakePrefix+\"_color.tga\"\n bakeimage.file_format = 'PNG'\n bakeimage.save()\n \n print(\"Saved at \" + map_path)\n\n # Reselect and reactivate previously selected objects\n for obj in selected_objects:\n obj.select_set(True)\n\n context.view_layer.objects.active = original_active_object\n \n print(\"Baked\")\n\n # Cleanup\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.images.remove(bakeimage)\n bakemat.node_tree.nodes.remove(node)\n bpy.data.materials.remove(bakemat)\n bpy.data.scenes[bpy.context.scene.name].render.engine = orig_renderer\n\n # Delete joined object\n layer.objects.active = joined_obj\n joined_obj.select_set(True)\n bpy.ops.object.delete(use_global=False)\n\n # Reselect old objects and make active\n for obj in selected_objects:\n obj.select_set(True)\n\n layer.objects.active = original_active_object\n\n self.report({'INFO'}, \"Baked id successfully and saved to \" + map_path)\n\n return {'FINISHED'}\n\n def invoke(self, context, event):\n wm = context.window_manager\n return wm.invoke_props_dialog(self)\n\ndef menu_draw(self, context):\n layout = self.layout\n layout.operator(\"object.bake_id_map\", text=\"Bake id map\")\n\ndef register():\n bpy.utils.register_class(NOTHKE_OT_bake_id_map)\n bpy.types.VIEW3D_MT_object_apply.append(menu_draw)\n\ndef unregister():\n bpy.utils.unregister_class(NOTHKE_OT_bake_id_map)\n bpy.types.VIEW3D_MT_object_apply.remove(menu_draw)\n\nif __name__ == \"__main__\":\n register()"
}
] | 2 |
users-tree/alex
|
https://github.com/users-tree/alex
|
b529270155c32ab38eba429b1a70589c60997f31
|
140545e42fdd34142fffbbb4eb804cfb8d622971
|
42b07606c5a038b789491fd6f802b380b005d5f6
|
refs/heads/master
| 2021-01-10T02:04:21.276960 | 2016-02-13T01:23:03 | 2016-02-13T01:23:03 | 51,626,276 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5199999809265137,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 24,
"blob_id": "d47ad55b3a38945a75525f41fccd6412687c7bb6",
"content_id": "f33b705dcbeae5958014d4a751277ad6436db7b6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 25,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 1,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib/common_test/vsn.mk",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "COMMON_TEST_VSN = 1.11.1\n"
},
{
"alpha_fraction": 0.6388140320777893,
"alphanum_fraction": 0.671159029006958,
"avg_line_length": 22.125,
"blob_id": "02895ba3eaf18ae3596b8b4841474a97bf02a8dd",
"content_id": "ebe50888b2d4b9bd6e63f97cfe38b590b0ebe522",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 371,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 16,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/erl_pbifs.c",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/*\n * Warning: Do not edit this file. It was automatically\n * generated by 'make_tables' on Thu Dec 31 02:09:59 2015.\n */\n\n#ifdef HAVE_CONFIG_H\n# include \"config.h\"\n#endif /* HAVE_CONFIG_H */\n#include \"export.h\"\n#include \"sys.h\"\n#include \"erl_vm.h\"\n#include \"global.h\"\n#include \"erl_process.h\"\n#include \"bif.h\"\n#include \"erl_bif_table.h\"\n#include \"erl_atom_table.h\"\n\n"
},
{
"alpha_fraction": 0.6031594276428223,
"alphanum_fraction": 0.6093824505805969,
"avg_line_length": 30.41353416442871,
"blob_id": "b14e0874da66d0fcf02ba3ed59abfb7f65bd8ec4",
"content_id": "e1c64ce68e02a44057ee6466adb09c4ff08dbfc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4178,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 133,
"path": "/bin/backup",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Shell script (BASH) to backup the selected directory on server and upload to\n# another ftp server securely. This script uses the gpg command to\n# encrypt the .tar.gz file before upload take place.\n#\n# In order to run this script you must have following tools installed:\n# - /usr/bin/ncftpput\n# - /bin/tar\n# - /usr/bin/mail\n# - /usr/bin/gpg\n#\n# Script also mails back the ftp operation failed or not\n#\n# Installation:\n# Customize the script according to your need. You need to setup ftp\n# server, password etc. Next, you need to setup gpg user name and\n# import public key so that you can encrypt the files. Usually following two\n# commands needed for gpg:\n# gpg --import userkey\n# gpg --edit-key KEY_ID|USER_ID\n# Command>trust\n#\n# --------------------------------------------------------------------\n# This is a free shell script under GNU GPL version 2.0 or above\n# Copyright (C) 2005 nixCraft project.\n# Feedback/comment/suggestions : http://cyberciti.biz/fb/\n# -------------------------------------------------------------------------\n# This script is part of nixCraft shell script collection (NSSC)\n# Visit http://bash.cyberciti.biz/ for more information.\n# -------------------------------------------------------------------------\n \n# Dirs to backup, Separate multiple directories using space\n# for example /home /www /data2\nBACKUP=\"/Users/aldavis\"\n \n# Remote ftp server\nFTPH=\"foodhip.com\"\n \n# Remote ftp user name\nFTPU=\"foodhipc\"\n \n# Remote ftp user password\nFTPP=\"PASSWORD\"\n \n# Local gpg user_id\nGPGU=\"Alex Davis\"\n \n# Remote directory, blank for default remote dir\n# If dir does not exist it will be created automatically by ncftpput :)\nFTPD=\"backup/\"\n \n# Temporary directory to store tar.gz file and process it\nTMPD=\"/tmp\"\n \n# Mail message\n# Admin email [email protected] or [email protected]\nMTO=\"[email protected]\"\n# Mail subject\nMSUB=\"Backup $(hostname) report\"\n# Admin info, URL email id; change it according to your need :)\nADMIN_INFO=\"For support visit http://cyberciti.biz/fb/ or write an email to [email protected]\"\n \n# Only change if your UNIX stores bin in diffrent location\nNCFTP=\"/usr/bin/ncftpput\"\nTAR=\"/bin/tar\" # must be gnu tar\nMAILC=\"/usr/bin/mail\"\nGPG=\"/usr/bin/gpg\"\n \n#######################################################################\n# Do not change anything below\n#######################################################################\nFILE=\"$(hostname).$(date +\"%d-%m-%Y\").tar.gz\"\nOUT=\"$TMPD/$FILE\"\nFOUT=\"$OUT.gpg\"\nMFILE=\"/tmp/ftpout.$$.txt\"\nMESS=\"\"\n \nif [ ! -x $TAR ]; then\n echo \"$TAR command not found, contact $ADMIN_INFO\"\n exit 1\nfi\n \nif [ ! -x $NCFTP ]; then\n echo \"$NCFTP command not found, contact $ADMIN_INFO\"\n exit 1\nfi\n \nif [ ! -x $GPG ] ; then\n echo \"$GPG command not found, contact $ADMIN_INFO\"\n exit 1\nfi\n \n$TAR -zcf $OUT $BACKUP\nif [ $? -ne 0 ];\nthen\n MESS=\"$TAR failed to create backup. Nothing uploaded to remote FTP $FTPH server\"\nelse\n # Encrypt the .tar.gz file before upload\n $GPG -e -r $GPGU -o $FOUT $OUT\n $NCFTP -m -u \"$FTPU\" -p \"$FTPP\" \"$FTPH\" \"$FTPD\" \"$FOUT\"\n OSTAT=\"$?\"\n case $OSTAT in\n\t0) MESS=\"Success.\";;\n\t1) MESS=\"Could not connect to remote host $FTPH.\";;\n 2) MESS=\"Could not connect to remote host $FTPH - timed out.\";;\n 3) MESS=\"Transfer failed.\";;\n 4) MESS=\"Transfer failed - timed out.\";;\n 5) MESS=\"Directory change failed.\";;\n 6) MESS=\"Directory change failed - timed out.\";;\n 7) MESS=\"Malformed URL.\";;\n 8) MESS=\"Usage error. May be your version of ncftpput ($NCFTP) is old\";;\n 9) MESS=\"Error in login configuration file.\";;\n 10)MESS=\"Library initialization failed.\";;\n 11) MESS=\"Session initialization failed.\";;\n\t*) MESS=\"Unknown error, contact admin $ADMIN_INFO\";;\n esac\nfi\n \n>$MFILE\necho \"Backup status for $(hostname) as on $(date):\" >>$MFILE\necho \"\" >>$MFILE\necho \"Backup File : $FOUT\" >>$MFILE\necho \"Backup ftp server : $FTPH\" >>$MFILE\necho \"Backup status message : $MESS\" >>$MFILE\necho \"\" >>$MFILE\necho \"-- Automatically generated by $(basename $0)\" >>$MFILE\n \n# send an email to admin\n$MAILC -s \"$MSUB\" $MTO <$MFILE\n# remove the files\n[ -f $MFILE ] && rm -f $MFILE || :\n[ -f $FOUT ] && rm -f $FOUT || :\n[ -f $OUT ] && rm -f $OUT || :\n"
},
{
"alpha_fraction": 0.627743661403656,
"alphanum_fraction": 0.6312555074691772,
"avg_line_length": 28.205127716064453,
"blob_id": "669c9f01b4ddf92a6a8403267e97e811b9f1c213",
"content_id": "911d8ded815d51933d161c2eebb517bc112ba68f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1139,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 39,
"path": "/.profile",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "export START=$START:profile\n\nexport CLICOLOR=1\n#export LSCOLORS=ExFxCxDxBxegedabagacad\nexport LSCOLORS=gxBxhxDxfxhxhxhxhxcxcx\n[[ -s \"$HOME/.rvm/scripts/rvm\" ]] && source \"$HOME/.rvm/scripts/rvm\" # Load RVM into a shell session *as a function*\n[[ -s \"$HOME/.gvm/scripts/gvm\" ]] && source \"$HOME/.gvm/scripts/gvm\"\n[[ -s \"$HOME/.nvm/nvm.sh\" ]] && source \"$HOME/.nvm/nvm.sh\"\n\nexport PATH=\"$HOME/bin:$HOME/.pyenv/bin:$PATH:$HOME/.rvm/bin:$HOME/.gvm/bin:$PATH\" # Add RVM to PATH for scripting\n\neval \"$(pyenv init -)\"\neval \"$(pyenv virtualenv-init -)\"\n\n\nexport ANDROID_HOME=/usr/local/opt/android-sdk\n\nSSH_ENV=\"$HOME/.ssh/environment\"\n\nfunction start_agent {\n echo \"Initialising new SSH agent...\"\n /usr/bin/ssh-agent | sed 's/^echo/#echo/' > \"${SSH_ENV}\"\n echo succeeded\n chmod 600 \"${SSH_ENV}\"\n . \"${SSH_ENV}\" > /dev/null\n /usr/bin/ssh-add;\n}\n\n# Source SSH settings, if applicable\n\nif [ -f \"${SSH_ENV}\" ]; then\n . \"${SSH_ENV}\" > /dev/null\n #ps ${SSH_AGENT_PID} doesn't work under cywgin\n ps -ef | grep ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || {\n start_agent;\n }\nelse\n start_agent;\nfi\n"
},
{
"alpha_fraction": 0.5569853186607361,
"alphanum_fraction": 0.576286792755127,
"avg_line_length": 27.63157844543457,
"blob_id": "4122b78a6a5c8dd12467403e222f8bff005a444a",
"content_id": "af196a8a1e4976e371a3eaea382dde5ff571a472",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 2176,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 76,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/make/x86_64-apple-darwin14.5.0/ose_lm.mk",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#-*-makefile-*- ; force emacs to enter makefile-mode\n# ----------------------------------------------------\n# Template target for generating an OSE5 load module\n#\n# %CopyrightBegin%\n#\n# Copyright Ericsson AB 2013. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# %CopyrightEnd%\n#\n# Author: Petre Pircalabu\n# ----------------------------------------------------\n\n# ----------------------------------------------------\n# \tbuild-ose-load-module\n#\tCreates an OSE5 load module\n#\tparams:\n#\t\t$(1) - The output target\n#\t\t$(2) - Objects\n#\t\t$(3) - Libraries\n#\t\t$(4) - LM configuration file\n# ----------------------------------------------------\n\nifeq ($(findstring ose,$(TARGET)),ose)\nLDR1FLAGS = \nLDR2FLAGS = \nOSEROOT\t = \nLCF\t = \nBEAM_LMCONF = \nEPMD_LMCONF = \nRUN_ERL_LMCONF = \nSTRIP\t = \nLM_POST_LINK = \nLM_SET_CONF = \nLM_ELF_SIZE = \nOSE_CONFD = \nCRT0_LM = \nendif\n\ndefine build-ose-load-module\n\t@echo \" --- Linking $(1)\"\n\n\t@echo \" --- Linking $(1) (pass 1)\"\n\t$(ld_verbose)$(PURIFY) $(LD) -o $(1)_unconfigured_ro -r \\\n\t$(2) --start-group $(3) --end-group --cref --discard-none -M > $(1)_1.map\n\n\t@echo \" --- Linking $(1) (pass 2)\"\n\t$(ld_verbose)$(PURIFY) $(LD) -o $(1)_unconfigured \\\n\t$(1)_unconfigured_ro -T $(LCF) -n --emit-relocs -e crt0_lm --cref \\\n\t--discard-none -M > $(1)_2.map\n\n\t@echo \" --- Inserting configuration\"\n\t$(ld_verbose) $(LM_SET_CONF) $(1)_unconfigured < $(4)\n\n\t@echo \" --- Striping $(1)\"\n#\t$(ld_verbose) $(STRIP) $(1)_unconfigured\n\n\t@echo \" --- Postlinking $(1)\"\n\t$(ld_verbose) $(LM_POST_LINK) $(1)_unconfigured\n\n\t@echo \" --- Sizing $(1)\"\n\t$(ld_verbose) $(LM_ELF_SIZE) $(1)_unconfigured\n\tmv $(1)_unconfigured $(1)\nendef\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 36,
"blob_id": "544388cebe1d725f6a61c95fd2d779e95ee772fd",
"content_id": "e920c9ac2b06892004b10f8ee59bb296f2b34ab9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 1,
"path": "/.escaped_colors.rb",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/Users/z/.yadr/irb/escaped_colors.rb"
},
{
"alpha_fraction": 0.6998254656791687,
"alphanum_fraction": 0.7085514664649963,
"avg_line_length": 30.77777862548828,
"blob_id": "a2ad65d8ac42bdad47379e52ac092afc4f9f9f35",
"content_id": "5578565476926d57a260f56557e387cee380a6ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 573,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 18,
"path": "/.bashrc",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "\nexport JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_66.jdk/Contents/Home/\nexport IBM=/usr/local/ibm\n\n[[ -s \"$HOME/.gvm/scripts/gvm\" ]] && source \"$HOME/.gvm/scripts/gvm\"\n\n\nexport NVM_DIR=\"/Users/z/.nvm\"\n[ -s \"$NVM_DIR/nvm.sh\" ] && . \"$NVM_DIR/nvm.sh\" # This loads nvm\n\nexport REACT_EDITOR=/usr/local/bin/vim\nexport EDITOR=$REACT_EDITOR\n\n\nexport START=$START:bashrc\nexport PATH=\"$HOME/.rvm/bin:$HOME/.gvm/bin:$JAVA_HOME/bin:$IBM:/usr/local/bin:/usr/local/sbin:$PATH\" # Add RVM to PATH for scripting\n\nexport LT_HOME=\"/Applications/local\"\nexport BOOT_EMIT_TARGET=no\n"
},
{
"alpha_fraction": 0.8048780560493469,
"alphanum_fraction": 0.8048780560493469,
"avg_line_length": 41,
"blob_id": "617241f9057c13c19ca2bcf6b892b0948d6b5ec6",
"content_id": "06cbcc5195d7d899dc2fc28b9b1bdfa4831ffa40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 1,
"path": "/.zlogout",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/Users/z/.yadr/zsh/prezto/runcoms/zlogout"
},
{
"alpha_fraction": 0.6819267272949219,
"alphanum_fraction": 0.7271432876586914,
"avg_line_length": 42.56007766723633,
"blob_id": "d36978a397b50293c5205b37cb59015f1e44d54a",
"content_id": "34048bc52c85b89b53b792e2e54e6d8c202a39ce",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 67431,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 1548,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/erl_bif_table.h",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/*\n * Warning: Do not edit this file. It was automatically\n * generated by 'make_tables' on Thu Dec 31 02:09:59 2015.\n */\n\n#ifndef __ERL_BIF_TABLE_H__\n#define __ERL_BIF_TABLE_H__\ntypedef void *BifFunction;\n\ntypedef struct bif_entry {\n Eterm module;\n Eterm name;\n int arity;\n BifFunction f;\n BifFunction traced;\n} BifEntry;\n\nextern BifEntry bif_table[];\nextern Export* bif_export[];\n\n#define BIF_SIZE 508\n\n#define BIF_abs_1 0\n#define BIF_adler32_1 1\n#define BIF_adler32_2 2\n#define BIF_adler32_combine_3 3\n#define BIF_apply_3 4\n#define BIF_atom_to_list_1 5\n#define BIF_binary_to_list_1 6\n#define BIF_binary_to_list_3 7\n#define BIF_binary_to_term_1 8\n#define BIF_crc32_1 9\n#define BIF_crc32_2 10\n#define BIF_crc32_combine_3 11\n#define BIF_date_0 12\n#define BIF_delete_module_1 13\n#define BIF_display_1 14\n#define BIF_display_string_1 15\n#define BIF_display_nl_0 16\n#define BIF_element_2 17\n#define BIF_erase_0 18\n#define BIF_erase_1 19\n#define BIF_exit_1 20\n#define BIF_exit_2 21\n#define BIF_external_size_1 22\n#define BIF_external_size_2 23\n#define BIF_float_1 24\n#define BIF_float_to_list_1 25\n#define BIF_float_to_list_2 26\n#define BIF_fun_info_2 27\n#define BIF_garbage_collect_0 28\n#define BIF_get_0 29\n#define BIF_get_1 30\n#define BIF_get_keys_1 31\n#define BIF_group_leader_0 32\n#define BIF_group_leader_2 33\n#define BIF_halt_0 34\n#define BIF_halt_1 35\n#define BIF_halt_2 36\n#define BIF_phash_2 37\n#define BIF_phash2_1 38\n#define BIF_phash2_2 39\n#define BIF_hd_1 40\n#define BIF_integer_to_list_1 41\n#define BIF_is_alive_0 42\n#define BIF_length_1 43\n#define BIF_link_1 44\n#define BIF_list_to_atom_1 45\n#define BIF_list_to_binary_1 46\n#define BIF_list_to_float_1 47\n#define BIF_list_to_integer_1 48\n#define BIF_list_to_pid_1 49\n#define BIF_list_to_tuple_1 50\n#define BIF_loaded_0 51\n#define BIF_localtime_0 52\n#define BIF_localtime_to_universaltime_2 53\n#define BIF_make_ref_0 54\n#define BIF_unique_integer_0 55\n#define BIF_unique_integer_1 56\n#define BIF_md5_1 57\n#define BIF_md5_init_0 58\n#define BIF_md5_update_2 59\n#define BIF_md5_final_1 60\n#define BIF_module_loaded_1 61\n#define BIF_function_exported_3 62\n#define BIF_monitor_node_2 63\n#define BIF_monitor_node_3 64\n#define BIF_node_1 65\n#define BIF_node_0 66\n#define BIF_nodes_1 67\n#define BIF_now_0 68\n#define BIF_monotonic_time_0 69\n#define BIF_monotonic_time_1 70\n#define BIF_system_time_0 71\n#define BIF_system_time_1 72\n#define BIF_time_offset_0 73\n#define BIF_time_offset_1 74\n#define BIF_timestamp_0 75\n#define BIF_open_port_2 76\n#define BIF_pid_to_list_1 77\n#define BIF_ports_0 78\n#define BIF_pre_loaded_0 79\n#define BIF_process_flag_2 80\n#define BIF_process_flag_3 81\n#define BIF_process_info_1 82\n#define BIF_process_info_2 83\n#define BIF_processes_0 84\n#define BIF_purge_module_1 85\n#define BIF_put_2 86\n#define BIF_register_2 87\n#define BIF_registered_0 88\n#define BIF_round_1 89\n#define BIF_self_0 90\n#define BIF_setelement_3 91\n#define BIF_size_1 92\n#define BIF_spawn_3 93\n#define BIF_spawn_link_3 94\n#define BIF_split_binary_2 95\n#define BIF_statistics_1 96\n#define BIF_term_to_binary_1 97\n#define BIF_term_to_binary_2 98\n#define BIF_throw_1 99\n#define BIF_time_0 100\n#define BIF_tl_1 101\n#define BIF_trunc_1 102\n#define BIF_tuple_to_list_1 103\n#define BIF_universaltime_0 104\n#define BIF_universaltime_to_localtime_1 105\n#define BIF_unlink_1 106\n#define BIF_unregister_1 107\n#define BIF_whereis_1 108\n#define BIF_spawn_opt_1 109\n#define BIF_setnode_2 110\n#define BIF_setnode_3 111\n#define BIF_dist_exit_3 112\n#define BIF_erts_internal_port_info_1 113\n#define BIF_erts_internal_port_info_2 114\n#define BIF_erts_internal_port_call_3 115\n#define BIF_erts_internal_port_command_3 116\n#define BIF_erts_internal_port_control_3 117\n#define BIF_erts_internal_port_close_1 118\n#define BIF_erts_internal_port_connect_2 119\n#define BIF_erts_internal_request_system_task_3 120\n#define BIF_erts_internal_check_process_code_2 121\n#define BIF_erts_internal_map_to_tuple_keys_1 122\n#define BIF_erts_internal_map_type_1 123\n#define BIF_erts_internal_map_hashmap_children_1 124\n#define BIF_erts_internal_time_unit_0 125\n#define BIF_erts_internal_is_system_process_1 126\n#define BIF_port_set_data_2 127\n#define BIF_port_get_data_1 128\n#define BIF_trace_pattern_2 129\n#define BIF_trace_pattern_3 130\n#define BIF_trace_3 131\n#define BIF_trace_info_2 132\n#define BIF_trace_delivered_1 133\n#define BIF_seq_trace_2 134\n#define BIF_seq_trace_info_1 135\n#define BIF_seq_trace_print_1 136\n#define BIF_seq_trace_print_2 137\n#define BIF_suspend_process_2 138\n#define BIF_resume_process_1 139\n#define BIF_process_display_2 140\n#define BIF_bump_reductions_1 141\n#define BIF_math_cos_1 142\n#define BIF_math_cosh_1 143\n#define BIF_math_sin_1 144\n#define BIF_math_sinh_1 145\n#define BIF_math_tan_1 146\n#define BIF_math_tanh_1 147\n#define BIF_math_acos_1 148\n#define BIF_math_acosh_1 149\n#define BIF_math_asin_1 150\n#define BIF_math_asinh_1 151\n#define BIF_math_atan_1 152\n#define BIF_math_atanh_1 153\n#define BIF_math_erf_1 154\n#define BIF_math_erfc_1 155\n#define BIF_math_exp_1 156\n#define BIF_math_log_1 157\n#define BIF_math_log2_1 158\n#define BIF_math_log10_1 159\n#define BIF_math_sqrt_1 160\n#define BIF_math_atan2_2 161\n#define BIF_math_pow_2 162\n#define BIF_start_timer_3 163\n#define BIF_start_timer_4 164\n#define BIF_send_after_3 165\n#define BIF_send_after_4 166\n#define BIF_cancel_timer_1 167\n#define BIF_cancel_timer_2 168\n#define BIF_read_timer_1 169\n#define BIF_read_timer_2 170\n#define BIF_make_tuple_2 171\n#define BIF_append_element_2 172\n#define BIF_make_tuple_3 173\n#define BIF_system_flag_2 174\n#define BIF_system_info_1 175\n#define BIF_system_monitor_0 176\n#define BIF_system_monitor_1 177\n#define BIF_system_monitor_2 178\n#define BIF_system_profile_2 179\n#define BIF_system_profile_0 180\n#define BIF_ref_to_list_1 181\n#define BIF_port_to_list_1 182\n#define BIF_fun_to_list_1 183\n#define BIF_monitor_2 184\n#define BIF_demonitor_1 185\n#define BIF_demonitor_2 186\n#define BIF_is_process_alive_1 187\n#define BIF_error_1 188\n#define BIF_error_2 189\n#define BIF_raise_3 190\n#define BIF_get_stacktrace_0 191\n#define BIF_is_builtin_3 192\n#define BIF_and_2 193\n#define BIF_or_2 194\n#define BIF_xor_2 195\n#define BIF_not_1 196\n#define BIF_sgt_2 197\n#define BIF_sge_2 198\n#define BIF_slt_2 199\n#define BIF_sle_2 200\n#define BIF_seq_2 201\n#define BIF_seqeq_2 202\n#define BIF_sneq_2 203\n#define BIF_sneqeq_2 204\n#define BIF_splus_2 205\n#define BIF_sminus_2 206\n#define BIF_stimes_2 207\n#define BIF_div_2 208\n#define BIF_intdiv_2 209\n#define BIF_rem_2 210\n#define BIF_bor_2 211\n#define BIF_band_2 212\n#define BIF_bxor_2 213\n#define BIF_bsl_2 214\n#define BIF_bsr_2 215\n#define BIF_bnot_1 216\n#define BIF_sminus_1 217\n#define BIF_splus_1 218\n#define BIF_ebif_bang_2 219\n#define BIF_send_2 220\n#define BIF_send_3 221\n#define BIF_ebif_plusplus_2 222\n#define BIF_append_2 223\n#define BIF_ebif_minusminus_2 224\n#define BIF_subtract_2 225\n#define BIF_is_atom_1 226\n#define BIF_is_list_1 227\n#define BIF_is_tuple_1 228\n#define BIF_is_float_1 229\n#define BIF_is_integer_1 230\n#define BIF_is_number_1 231\n#define BIF_is_pid_1 232\n#define BIF_is_port_1 233\n#define BIF_is_reference_1 234\n#define BIF_is_binary_1 235\n#define BIF_is_function_1 236\n#define BIF_is_function_2 237\n#define BIF_is_record_2 238\n#define BIF_is_record_3 239\n#define BIF_match_spec_test_3 240\n#define BIF_ets_all_0 241\n#define BIF_ets_new_2 242\n#define BIF_ets_delete_1 243\n#define BIF_ets_delete_2 244\n#define BIF_ets_delete_all_objects_1 245\n#define BIF_ets_delete_object_2 246\n#define BIF_ets_first_1 247\n#define BIF_ets_is_compiled_ms_1 248\n#define BIF_ets_lookup_2 249\n#define BIF_ets_lookup_element_3 250\n#define BIF_ets_info_1 251\n#define BIF_ets_info_2 252\n#define BIF_ets_last_1 253\n#define BIF_ets_match_1 254\n#define BIF_ets_match_2 255\n#define BIF_ets_match_3 256\n#define BIF_ets_match_object_1 257\n#define BIF_ets_match_object_2 258\n#define BIF_ets_match_object_3 259\n#define BIF_ets_member_2 260\n#define BIF_ets_next_2 261\n#define BIF_ets_prev_2 262\n#define BIF_ets_insert_2 263\n#define BIF_ets_insert_new_2 264\n#define BIF_ets_rename_2 265\n#define BIF_ets_safe_fixtable_2 266\n#define BIF_ets_slot_2 267\n#define BIF_ets_update_counter_3 268\n#define BIF_ets_select_1 269\n#define BIF_ets_select_2 270\n#define BIF_ets_select_3 271\n#define BIF_ets_select_count_2 272\n#define BIF_ets_select_reverse_1 273\n#define BIF_ets_select_reverse_2 274\n#define BIF_ets_select_reverse_3 275\n#define BIF_ets_select_delete_2 276\n#define BIF_ets_match_spec_compile_1 277\n#define BIF_ets_match_spec_run_r_3 278\n#define BIF_os_putenv_2 279\n#define BIF_os_getenv_0 280\n#define BIF_os_getenv_1 281\n#define BIF_os_getpid_0 282\n#define BIF_os_timestamp_0 283\n#define BIF_os_system_time_0 284\n#define BIF_os_system_time_1 285\n#define BIF_erl_ddll_try_load_3 286\n#define BIF_erl_ddll_try_unload_2 287\n#define BIF_erl_ddll_loaded_drivers_0 288\n#define BIF_erl_ddll_info_2 289\n#define BIF_erl_ddll_format_error_int_1 290\n#define BIF_erl_ddll_monitor_2 291\n#define BIF_erl_ddll_demonitor_1 292\n#define BIF_re_compile_1 293\n#define BIF_re_compile_2 294\n#define BIF_re_run_2 295\n#define BIF_re_run_3 296\n#define BIF_lists_member_2 297\n#define BIF_lists_reverse_2 298\n#define BIF_lists_keymember_3 299\n#define BIF_lists_keysearch_3 300\n#define BIF_lists_keyfind_3 301\n#define BIF_erts_debug_disassemble_1 302\n#define BIF_erts_debug_breakpoint_2 303\n#define BIF_erts_debug_same_2 304\n#define BIF_erts_debug_flat_size_1 305\n#define BIF_erts_debug_get_internal_state_1 306\n#define BIF_erts_debug_set_internal_state_2 307\n#define BIF_erts_debug_display_1 308\n#define BIF_erts_debug_dist_ext_to_term_2 309\n#define BIF_erts_debug_instructions_0 310\n#define BIF_erts_debug_dump_monitors_1 311\n#define BIF_erts_debug_dump_links_1 312\n#define BIF_erts_debug_lock_counters_1 313\n#define BIF_code_get_chunk_2 314\n#define BIF_code_module_md5_1 315\n#define BIF_code_make_stub_module_3 316\n#define BIF_code_is_module_native_1 317\n#define BIF_hibernate_3 318\n#define BIF_error_logger_warning_map_0 319\n#define BIF_get_module_info_1 320\n#define BIF_get_module_info_2 321\n#define BIF_is_boolean_1 322\n#define BIF_string_to_integer_1 323\n#define BIF_string_to_float_1 324\n#define BIF_make_fun_3 325\n#define BIF_iolist_size_1 326\n#define BIF_iolist_to_binary_1 327\n#define BIF_list_to_existing_atom_1 328\n#define BIF_is_bitstring_1 329\n#define BIF_tuple_size_1 330\n#define BIF_byte_size_1 331\n#define BIF_bit_size_1 332\n#define BIF_list_to_bitstring_1 333\n#define BIF_bitstring_to_list_1 334\n#define BIF_ets_update_element_3 335\n#define BIF_decode_packet_3 336\n#define BIF_unicode_characters_to_binary_2 337\n#define BIF_unicode_characters_to_list_2 338\n#define BIF_unicode_bin_is_7bit_1 339\n#define BIF_atom_to_binary_2 340\n#define BIF_binary_to_atom_2 341\n#define BIF_binary_to_existing_atom_2 342\n#define BIF_net_kernel_dflag_unicode_io_1 343\n#define BIF_ets_give_away_3 344\n#define BIF_ets_setopts_2 345\n#define BIF_load_nif_2 346\n#define BIF_call_on_load_function_1 347\n#define BIF_finish_after_on_load_2 348\n#define BIF_binary_to_term_2 349\n#define BIF_binary_part_2 350\n#define BIF_binary_part_3 351\n#define BIF_binary_compile_pattern_1 352\n#define BIF_binary_match_2 353\n#define BIF_binary_match_3 354\n#define BIF_binary_matches_2 355\n#define BIF_binary_matches_3 356\n#define BIF_binary_longest_common_prefix_1 357\n#define BIF_binary_longest_common_suffix_1 358\n#define BIF_binary_first_1 359\n#define BIF_binary_last_1 360\n#define BIF_binary_at_2 361\n#define BIF_binary_binary_part_2 362\n#define BIF_binary_binary_part_3 363\n#define BIF_binary_bin_to_list_1 364\n#define BIF_binary_bin_to_list_2 365\n#define BIF_binary_bin_to_list_3 366\n#define BIF_binary_list_to_bin_1 367\n#define BIF_binary_copy_1 368\n#define BIF_binary_copy_2 369\n#define BIF_binary_referenced_byte_size_1 370\n#define BIF_binary_encode_unsigned_1 371\n#define BIF_binary_encode_unsigned_2 372\n#define BIF_binary_decode_unsigned_1 373\n#define BIF_binary_decode_unsigned_2 374\n#define BIF_nif_error_1 375\n#define BIF_nif_error_2 376\n#define BIF_prim_file_internal_name2native_1 377\n#define BIF_prim_file_internal_native2name_1 378\n#define BIF_prim_file_internal_normalize_utf8_1 379\n#define BIF_prim_file_is_translatable_1 380\n#define BIF_file_native_name_encoding_0 381\n#define BIF_check_old_code_1 382\n#define BIF_universaltime_to_posixtime_1 383\n#define BIF_posixtime_to_universaltime_1 384\n#define BIF_dt_put_tag_1 385\n#define BIF_dt_get_tag_0 386\n#define BIF_dt_get_tag_data_0 387\n#define BIF_dt_spread_tag_1 388\n#define BIF_dt_restore_tag_1 389\n#define BIF_dt_prepend_vm_tag_data_1 390\n#define BIF_dt_append_vm_tag_data_1 391\n#define BIF_prepare_loading_2 392\n#define BIF_finish_loading_1 393\n#define BIF_insert_element_3 394\n#define BIF_delete_element_2 395\n#define BIF_binary_to_integer_1 396\n#define BIF_binary_to_integer_2 397\n#define BIF_integer_to_binary_1 398\n#define BIF_list_to_integer_2 399\n#define BIF_float_to_binary_1 400\n#define BIF_float_to_binary_2 401\n#define BIF_binary_to_float_1 402\n#define BIF_io_printable_range_0 403\n#define BIF_os_unsetenv_1 404\n#define BIF_re_inspect_2 405\n#define BIF_is_map_1 406\n#define BIF_map_size_1 407\n#define BIF_maps_to_list_1 408\n#define BIF_maps_find_2 409\n#define BIF_maps_get_2 410\n#define BIF_maps_from_list_1 411\n#define BIF_maps_is_key_2 412\n#define BIF_maps_keys_1 413\n#define BIF_maps_merge_2 414\n#define BIF_maps_new_0 415\n#define BIF_maps_put_3 416\n#define BIF_maps_remove_2 417\n#define BIF_maps_update_3 418\n#define BIF_maps_values_1 419\n#define BIF_erts_internal_cmp_term_2 420\n#define BIF_ets_take_2 421\n#define BIF_fun_info_mfa_1 422\n#define BIF_get_keys_0 423\n#define BIF_ets_update_counter_4 424\n#define BIF_erts_debug_map_info_1 425\n#define BIF_hash_2 426\n#define BIF_hipe_bifs_write_u8_2 427\n#define BIF_hipe_bifs_write_u32_2 428\n#define BIF_hipe_bifs_bytearray_2 429\n#define BIF_hipe_bifs_bytearray_sub_2 430\n#define BIF_hipe_bifs_bytearray_update_3 431\n#define BIF_hipe_bifs_bitarray_2 432\n#define BIF_hipe_bifs_bitarray_sub_2 433\n#define BIF_hipe_bifs_bitarray_update_3 434\n#define BIF_hipe_bifs_array_2 435\n#define BIF_hipe_bifs_array_length_1 436\n#define BIF_hipe_bifs_array_sub_2 437\n#define BIF_hipe_bifs_array_update_3 438\n#define BIF_hipe_bifs_ref_1 439\n#define BIF_hipe_bifs_ref_get_1 440\n#define BIF_hipe_bifs_ref_set_2 441\n#define BIF_hipe_bifs_enter_code_2 442\n#define BIF_hipe_bifs_alloc_data_2 443\n#define BIF_hipe_bifs_constants_size_0 444\n#define BIF_hipe_bifs_merge_term_1 445\n#define BIF_hipe_bifs_fun_to_address_1 446\n#define BIF_hipe_bifs_set_native_address_3 447\n#define BIF_hipe_bifs_set_funinfo_native_address_3 448\n#define BIF_hipe_bifs_invalidate_funinfo_native_addresses_1 449\n#define BIF_hipe_bifs_update_code_size_3 450\n#define BIF_hipe_bifs_code_size_1 451\n#define BIF_hipe_bifs_enter_sdesc_1 452\n#define BIF_hipe_bifs_bif_address_3 453\n#define BIF_hipe_bifs_primop_address_1 454\n#define BIF_hipe_bifs_atom_to_word_1 455\n#define BIF_hipe_bifs_term_to_word_1 456\n#define BIF_hipe_bifs_get_fe_2 457\n#define BIF_hipe_bifs_set_native_address_in_fe_2 458\n#define BIF_hipe_bifs_find_na_or_make_stub_2 459\n#define BIF_hipe_bifs_check_crc_1 460\n#define BIF_hipe_bifs_system_crc_0 461\n#define BIF_hipe_bifs_get_rts_param_1 462\n#define BIF_hipe_bifs_patch_insn_3 463\n#define BIF_hipe_bifs_patch_call_3 464\n#define BIF_hipe_bifs_add_ref_2 465\n#define BIF_hipe_bifs_mark_referred_from_1 466\n#define BIF_hipe_bifs_remove_refs_from_1 467\n#define BIF_hipe_bifs_redirect_referred_from_1 468\n#define BIF_hipe_bifs_call_count_on_1 469\n#define BIF_hipe_bifs_call_count_off_1 470\n#define BIF_hipe_bifs_call_count_get_1 471\n#define BIF_hipe_bifs_call_count_clear_1 472\n#define BIF_hipe_bifs_trap_count_get_0 473\n#define BIF_hipe_bifs_trap_count_clear_0 474\n#define BIF_hipe_bifs_process_info_0 475\n#define BIF_hipe_bifs_process_info_clear_0 476\n#define BIF_hipe_bifs_message_info_0 477\n#define BIF_hipe_bifs_message_info_clear_0 478\n#define BIF_hipe_bifs_message_sizes_0 479\n#define BIF_hipe_bifs_gc_info_0 480\n#define BIF_hipe_bifs_shared_gc_info_0 481\n#define BIF_hipe_bifs_incremental_gc_info_0 482\n#define BIF_hipe_bifs_gc_info_clear_0 483\n#define BIF_hipe_bifs_pause_times_0 484\n#define BIF_hipe_bifs_system_timer_0 485\n#define BIF_hipe_bifs_system_timer_clear_0 486\n#define BIF_hipe_bifs_send_timer_0 487\n#define BIF_hipe_bifs_send_timer_clear_0 488\n#define BIF_hipe_bifs_gc_timer_0 489\n#define BIF_hipe_bifs_shared_gc_timer_0 490\n#define BIF_hipe_bifs_gc_timer_clear_0 491\n#define BIF_hipe_bifs_misc_timer_0 492\n#define BIF_hipe_bifs_misc_timer_clear_0 493\n#define BIF_hipe_bifs_get_hrvtime_0 494\n#define BIF_hipe_bifs_stop_hrvtime_0 495\n#define BIF_hipe_bifs_show_estack_1 496\n#define BIF_hipe_bifs_show_heap_1 497\n#define BIF_hipe_bifs_show_nstack_1 498\n#define BIF_hipe_bifs_nstack_used_size_0 499\n#define BIF_hipe_bifs_show_pcb_1 500\n#define BIF_hipe_bifs_show_term_1 501\n#define BIF_hipe_bifs_in_native_0 502\n#define BIF_hipe_bifs_modeswitch_debug_on_0 503\n#define BIF_hipe_bifs_modeswitch_debug_off_0 504\n#define BIF_hipe_bifs_debug_native_called_2 505\n#define BIF_hipe_bifs_llvm_fix_pinned_regs_0 506\n#define BIF_hipe_bifs_write_u64_2 507\n\nEterm abs_1(Process*, Eterm*);\nEterm wrap_abs_1(Process*, Eterm*, UWord *I);\nEterm adler32_1(Process*, Eterm*);\nEterm wrap_adler32_1(Process*, Eterm*, UWord *I);\nEterm adler32_2(Process*, Eterm*);\nEterm wrap_adler32_2(Process*, Eterm*, UWord *I);\nEterm adler32_combine_3(Process*, Eterm*);\nEterm wrap_adler32_combine_3(Process*, Eterm*, UWord *I);\nEterm apply_3(Process*, Eterm*);\nEterm wrap_apply_3(Process*, Eterm*, UWord *I);\nEterm atom_to_list_1(Process*, Eterm*);\nEterm wrap_atom_to_list_1(Process*, Eterm*, UWord *I);\nEterm binary_to_list_1(Process*, Eterm*);\nEterm wrap_binary_to_list_1(Process*, Eterm*, UWord *I);\nEterm binary_to_list_3(Process*, Eterm*);\nEterm wrap_binary_to_list_3(Process*, Eterm*, UWord *I);\nEterm binary_to_term_1(Process*, Eterm*);\nEterm wrap_binary_to_term_1(Process*, Eterm*, UWord *I);\nEterm crc32_1(Process*, Eterm*);\nEterm wrap_crc32_1(Process*, Eterm*, UWord *I);\nEterm crc32_2(Process*, Eterm*);\nEterm wrap_crc32_2(Process*, Eterm*, UWord *I);\nEterm crc32_combine_3(Process*, Eterm*);\nEterm wrap_crc32_combine_3(Process*, Eterm*, UWord *I);\nEterm date_0(Process*, Eterm*);\nEterm wrap_date_0(Process*, Eterm*, UWord *I);\nEterm delete_module_1(Process*, Eterm*);\nEterm wrap_delete_module_1(Process*, Eterm*, UWord *I);\nEterm display_1(Process*, Eterm*);\nEterm wrap_display_1(Process*, Eterm*, UWord *I);\nEterm display_string_1(Process*, Eterm*);\nEterm wrap_display_string_1(Process*, Eterm*, UWord *I);\nEterm display_nl_0(Process*, Eterm*);\nEterm wrap_display_nl_0(Process*, Eterm*, UWord *I);\nEterm element_2(Process*, Eterm*);\nEterm wrap_element_2(Process*, Eterm*, UWord *I);\nEterm erase_0(Process*, Eterm*);\nEterm wrap_erase_0(Process*, Eterm*, UWord *I);\nEterm erase_1(Process*, Eterm*);\nEterm wrap_erase_1(Process*, Eterm*, UWord *I);\nEterm exit_1(Process*, Eterm*);\nEterm wrap_exit_1(Process*, Eterm*, UWord *I);\nEterm exit_2(Process*, Eterm*);\nEterm wrap_exit_2(Process*, Eterm*, UWord *I);\nEterm external_size_1(Process*, Eterm*);\nEterm wrap_external_size_1(Process*, Eterm*, UWord *I);\nEterm external_size_2(Process*, Eterm*);\nEterm wrap_external_size_2(Process*, Eterm*, UWord *I);\nEterm float_1(Process*, Eterm*);\nEterm wrap_float_1(Process*, Eterm*, UWord *I);\nEterm float_to_list_1(Process*, Eterm*);\nEterm wrap_float_to_list_1(Process*, Eterm*, UWord *I);\nEterm float_to_list_2(Process*, Eterm*);\nEterm wrap_float_to_list_2(Process*, Eterm*, UWord *I);\nEterm fun_info_2(Process*, Eterm*);\nEterm wrap_fun_info_2(Process*, Eterm*, UWord *I);\nEterm garbage_collect_0(Process*, Eterm*);\nEterm wrap_garbage_collect_0(Process*, Eterm*, UWord *I);\nEterm get_0(Process*, Eterm*);\nEterm wrap_get_0(Process*, Eterm*, UWord *I);\nEterm get_1(Process*, Eterm*);\nEterm wrap_get_1(Process*, Eterm*, UWord *I);\nEterm get_keys_1(Process*, Eterm*);\nEterm wrap_get_keys_1(Process*, Eterm*, UWord *I);\nEterm group_leader_0(Process*, Eterm*);\nEterm wrap_group_leader_0(Process*, Eterm*, UWord *I);\nEterm group_leader_2(Process*, Eterm*);\nEterm wrap_group_leader_2(Process*, Eterm*, UWord *I);\nEterm halt_0(Process*, Eterm*);\nEterm wrap_halt_0(Process*, Eterm*, UWord *I);\nEterm halt_1(Process*, Eterm*);\nEterm wrap_halt_1(Process*, Eterm*, UWord *I);\nEterm halt_2(Process*, Eterm*);\nEterm wrap_halt_2(Process*, Eterm*, UWord *I);\nEterm phash_2(Process*, Eterm*);\nEterm wrap_phash_2(Process*, Eterm*, UWord *I);\nEterm phash2_1(Process*, Eterm*);\nEterm wrap_phash2_1(Process*, Eterm*, UWord *I);\nEterm phash2_2(Process*, Eterm*);\nEterm wrap_phash2_2(Process*, Eterm*, UWord *I);\nEterm hd_1(Process*, Eterm*);\nEterm wrap_hd_1(Process*, Eterm*, UWord *I);\nEterm integer_to_list_1(Process*, Eterm*);\nEterm wrap_integer_to_list_1(Process*, Eterm*, UWord *I);\nEterm is_alive_0(Process*, Eterm*);\nEterm wrap_is_alive_0(Process*, Eterm*, UWord *I);\nEterm length_1(Process*, Eterm*);\nEterm wrap_length_1(Process*, Eterm*, UWord *I);\nEterm link_1(Process*, Eterm*);\nEterm wrap_link_1(Process*, Eterm*, UWord *I);\nEterm list_to_atom_1(Process*, Eterm*);\nEterm wrap_list_to_atom_1(Process*, Eterm*, UWord *I);\nEterm list_to_binary_1(Process*, Eterm*);\nEterm wrap_list_to_binary_1(Process*, Eterm*, UWord *I);\nEterm list_to_float_1(Process*, Eterm*);\nEterm wrap_list_to_float_1(Process*, Eterm*, UWord *I);\nEterm list_to_integer_1(Process*, Eterm*);\nEterm wrap_list_to_integer_1(Process*, Eterm*, UWord *I);\nEterm list_to_pid_1(Process*, Eterm*);\nEterm wrap_list_to_pid_1(Process*, Eterm*, UWord *I);\nEterm list_to_tuple_1(Process*, Eterm*);\nEterm wrap_list_to_tuple_1(Process*, Eterm*, UWord *I);\nEterm loaded_0(Process*, Eterm*);\nEterm wrap_loaded_0(Process*, Eterm*, UWord *I);\nEterm localtime_0(Process*, Eterm*);\nEterm wrap_localtime_0(Process*, Eterm*, UWord *I);\nEterm localtime_to_universaltime_2(Process*, Eterm*);\nEterm wrap_localtime_to_universaltime_2(Process*, Eterm*, UWord *I);\nEterm make_ref_0(Process*, Eterm*);\nEterm wrap_make_ref_0(Process*, Eterm*, UWord *I);\nEterm unique_integer_0(Process*, Eterm*);\nEterm wrap_unique_integer_0(Process*, Eterm*, UWord *I);\nEterm unique_integer_1(Process*, Eterm*);\nEterm wrap_unique_integer_1(Process*, Eterm*, UWord *I);\nEterm md5_1(Process*, Eterm*);\nEterm wrap_md5_1(Process*, Eterm*, UWord *I);\nEterm md5_init_0(Process*, Eterm*);\nEterm wrap_md5_init_0(Process*, Eterm*, UWord *I);\nEterm md5_update_2(Process*, Eterm*);\nEterm wrap_md5_update_2(Process*, Eterm*, UWord *I);\nEterm md5_final_1(Process*, Eterm*);\nEterm wrap_md5_final_1(Process*, Eterm*, UWord *I);\nEterm module_loaded_1(Process*, Eterm*);\nEterm wrap_module_loaded_1(Process*, Eterm*, UWord *I);\nEterm function_exported_3(Process*, Eterm*);\nEterm wrap_function_exported_3(Process*, Eterm*, UWord *I);\nEterm monitor_node_2(Process*, Eterm*);\nEterm wrap_monitor_node_2(Process*, Eterm*, UWord *I);\nEterm monitor_node_3(Process*, Eterm*);\nEterm wrap_monitor_node_3(Process*, Eterm*, UWord *I);\nEterm node_1(Process*, Eterm*);\nEterm wrap_node_1(Process*, Eterm*, UWord *I);\nEterm node_0(Process*, Eterm*);\nEterm wrap_node_0(Process*, Eterm*, UWord *I);\nEterm nodes_1(Process*, Eterm*);\nEterm wrap_nodes_1(Process*, Eterm*, UWord *I);\nEterm now_0(Process*, Eterm*);\nEterm wrap_now_0(Process*, Eterm*, UWord *I);\nEterm monotonic_time_0(Process*, Eterm*);\nEterm wrap_monotonic_time_0(Process*, Eterm*, UWord *I);\nEterm monotonic_time_1(Process*, Eterm*);\nEterm wrap_monotonic_time_1(Process*, Eterm*, UWord *I);\nEterm system_time_0(Process*, Eterm*);\nEterm wrap_system_time_0(Process*, Eterm*, UWord *I);\nEterm system_time_1(Process*, Eterm*);\nEterm wrap_system_time_1(Process*, Eterm*, UWord *I);\nEterm time_offset_0(Process*, Eterm*);\nEterm wrap_time_offset_0(Process*, Eterm*, UWord *I);\nEterm time_offset_1(Process*, Eterm*);\nEterm wrap_time_offset_1(Process*, Eterm*, UWord *I);\nEterm timestamp_0(Process*, Eterm*);\nEterm wrap_timestamp_0(Process*, Eterm*, UWord *I);\nEterm open_port_2(Process*, Eterm*);\nEterm wrap_open_port_2(Process*, Eterm*, UWord *I);\nEterm pid_to_list_1(Process*, Eterm*);\nEterm wrap_pid_to_list_1(Process*, Eterm*, UWord *I);\nEterm ports_0(Process*, Eterm*);\nEterm wrap_ports_0(Process*, Eterm*, UWord *I);\nEterm pre_loaded_0(Process*, Eterm*);\nEterm wrap_pre_loaded_0(Process*, Eterm*, UWord *I);\nEterm process_flag_2(Process*, Eterm*);\nEterm wrap_process_flag_2(Process*, Eterm*, UWord *I);\nEterm process_flag_3(Process*, Eterm*);\nEterm wrap_process_flag_3(Process*, Eterm*, UWord *I);\nEterm process_info_1(Process*, Eterm*);\nEterm wrap_process_info_1(Process*, Eterm*, UWord *I);\nEterm process_info_2(Process*, Eterm*);\nEterm wrap_process_info_2(Process*, Eterm*, UWord *I);\nEterm processes_0(Process*, Eterm*);\nEterm wrap_processes_0(Process*, Eterm*, UWord *I);\nEterm purge_module_1(Process*, Eterm*);\nEterm wrap_purge_module_1(Process*, Eterm*, UWord *I);\nEterm put_2(Process*, Eterm*);\nEterm wrap_put_2(Process*, Eterm*, UWord *I);\nEterm register_2(Process*, Eterm*);\nEterm wrap_register_2(Process*, Eterm*, UWord *I);\nEterm registered_0(Process*, Eterm*);\nEterm wrap_registered_0(Process*, Eterm*, UWord *I);\nEterm round_1(Process*, Eterm*);\nEterm wrap_round_1(Process*, Eterm*, UWord *I);\nEterm self_0(Process*, Eterm*);\nEterm wrap_self_0(Process*, Eterm*, UWord *I);\nEterm setelement_3(Process*, Eterm*);\nEterm wrap_setelement_3(Process*, Eterm*, UWord *I);\nEterm size_1(Process*, Eterm*);\nEterm wrap_size_1(Process*, Eterm*, UWord *I);\nEterm spawn_3(Process*, Eterm*);\nEterm wrap_spawn_3(Process*, Eterm*, UWord *I);\nEterm spawn_link_3(Process*, Eterm*);\nEterm wrap_spawn_link_3(Process*, Eterm*, UWord *I);\nEterm split_binary_2(Process*, Eterm*);\nEterm wrap_split_binary_2(Process*, Eterm*, UWord *I);\nEterm statistics_1(Process*, Eterm*);\nEterm wrap_statistics_1(Process*, Eterm*, UWord *I);\nEterm term_to_binary_1(Process*, Eterm*);\nEterm wrap_term_to_binary_1(Process*, Eterm*, UWord *I);\nEterm term_to_binary_2(Process*, Eterm*);\nEterm wrap_term_to_binary_2(Process*, Eterm*, UWord *I);\nEterm throw_1(Process*, Eterm*);\nEterm wrap_throw_1(Process*, Eterm*, UWord *I);\nEterm time_0(Process*, Eterm*);\nEterm wrap_time_0(Process*, Eterm*, UWord *I);\nEterm tl_1(Process*, Eterm*);\nEterm wrap_tl_1(Process*, Eterm*, UWord *I);\nEterm trunc_1(Process*, Eterm*);\nEterm wrap_trunc_1(Process*, Eterm*, UWord *I);\nEterm tuple_to_list_1(Process*, Eterm*);\nEterm wrap_tuple_to_list_1(Process*, Eterm*, UWord *I);\nEterm universaltime_0(Process*, Eterm*);\nEterm wrap_universaltime_0(Process*, Eterm*, UWord *I);\nEterm universaltime_to_localtime_1(Process*, Eterm*);\nEterm wrap_universaltime_to_localtime_1(Process*, Eterm*, UWord *I);\nEterm unlink_1(Process*, Eterm*);\nEterm wrap_unlink_1(Process*, Eterm*, UWord *I);\nEterm unregister_1(Process*, Eterm*);\nEterm wrap_unregister_1(Process*, Eterm*, UWord *I);\nEterm whereis_1(Process*, Eterm*);\nEterm wrap_whereis_1(Process*, Eterm*, UWord *I);\nEterm spawn_opt_1(Process*, Eterm*);\nEterm wrap_spawn_opt_1(Process*, Eterm*, UWord *I);\nEterm setnode_2(Process*, Eterm*);\nEterm wrap_setnode_2(Process*, Eterm*, UWord *I);\nEterm setnode_3(Process*, Eterm*);\nEterm wrap_setnode_3(Process*, Eterm*, UWord *I);\nEterm dist_exit_3(Process*, Eterm*);\nEterm wrap_dist_exit_3(Process*, Eterm*, UWord *I);\nEterm erts_internal_port_info_1(Process*, Eterm*);\nEterm wrap_erts_internal_port_info_1(Process*, Eterm*, UWord *I);\nEterm erts_internal_port_info_2(Process*, Eterm*);\nEterm wrap_erts_internal_port_info_2(Process*, Eterm*, UWord *I);\nEterm erts_internal_port_call_3(Process*, Eterm*);\nEterm wrap_erts_internal_port_call_3(Process*, Eterm*, UWord *I);\nEterm erts_internal_port_command_3(Process*, Eterm*);\nEterm wrap_erts_internal_port_command_3(Process*, Eterm*, UWord *I);\nEterm erts_internal_port_control_3(Process*, Eterm*);\nEterm wrap_erts_internal_port_control_3(Process*, Eterm*, UWord *I);\nEterm erts_internal_port_close_1(Process*, Eterm*);\nEterm wrap_erts_internal_port_close_1(Process*, Eterm*, UWord *I);\nEterm erts_internal_port_connect_2(Process*, Eterm*);\nEterm wrap_erts_internal_port_connect_2(Process*, Eterm*, UWord *I);\nEterm erts_internal_request_system_task_3(Process*, Eterm*);\nEterm wrap_erts_internal_request_system_task_3(Process*, Eterm*, UWord *I);\nEterm erts_internal_check_process_code_2(Process*, Eterm*);\nEterm wrap_erts_internal_check_process_code_2(Process*, Eterm*, UWord *I);\nEterm erts_internal_map_to_tuple_keys_1(Process*, Eterm*);\nEterm wrap_erts_internal_map_to_tuple_keys_1(Process*, Eterm*, UWord *I);\nEterm erts_internal_map_type_1(Process*, Eterm*);\nEterm wrap_erts_internal_map_type_1(Process*, Eterm*, UWord *I);\nEterm erts_internal_map_hashmap_children_1(Process*, Eterm*);\nEterm wrap_erts_internal_map_hashmap_children_1(Process*, Eterm*, UWord *I);\nEterm erts_internal_time_unit_0(Process*, Eterm*);\nEterm wrap_erts_internal_time_unit_0(Process*, Eterm*, UWord *I);\nEterm erts_internal_is_system_process_1(Process*, Eterm*);\nEterm wrap_erts_internal_is_system_process_1(Process*, Eterm*, UWord *I);\nEterm port_set_data_2(Process*, Eterm*);\nEterm wrap_port_set_data_2(Process*, Eterm*, UWord *I);\nEterm port_get_data_1(Process*, Eterm*);\nEterm wrap_port_get_data_1(Process*, Eterm*, UWord *I);\nEterm trace_pattern_2(Process*, Eterm*);\nEterm wrap_trace_pattern_2(Process*, Eterm*, UWord *I);\nEterm trace_pattern_3(Process*, Eterm*);\nEterm wrap_trace_pattern_3(Process*, Eterm*, UWord *I);\nEterm trace_3(Process*, Eterm*);\nEterm wrap_trace_3(Process*, Eterm*, UWord *I);\nEterm trace_info_2(Process*, Eterm*);\nEterm wrap_trace_info_2(Process*, Eterm*, UWord *I);\nEterm trace_delivered_1(Process*, Eterm*);\nEterm wrap_trace_delivered_1(Process*, Eterm*, UWord *I);\nEterm seq_trace_2(Process*, Eterm*);\nEterm wrap_seq_trace_2(Process*, Eterm*, UWord *I);\nEterm seq_trace_info_1(Process*, Eterm*);\nEterm wrap_seq_trace_info_1(Process*, Eterm*, UWord *I);\nEterm seq_trace_print_1(Process*, Eterm*);\nEterm wrap_seq_trace_print_1(Process*, Eterm*, UWord *I);\nEterm seq_trace_print_2(Process*, Eterm*);\nEterm wrap_seq_trace_print_2(Process*, Eterm*, UWord *I);\nEterm suspend_process_2(Process*, Eterm*);\nEterm wrap_suspend_process_2(Process*, Eterm*, UWord *I);\nEterm resume_process_1(Process*, Eterm*);\nEterm wrap_resume_process_1(Process*, Eterm*, UWord *I);\nEterm process_display_2(Process*, Eterm*);\nEterm wrap_process_display_2(Process*, Eterm*, UWord *I);\nEterm bump_reductions_1(Process*, Eterm*);\nEterm wrap_bump_reductions_1(Process*, Eterm*, UWord *I);\nEterm math_cos_1(Process*, Eterm*);\nEterm wrap_math_cos_1(Process*, Eterm*, UWord *I);\nEterm math_cosh_1(Process*, Eterm*);\nEterm wrap_math_cosh_1(Process*, Eterm*, UWord *I);\nEterm math_sin_1(Process*, Eterm*);\nEterm wrap_math_sin_1(Process*, Eterm*, UWord *I);\nEterm math_sinh_1(Process*, Eterm*);\nEterm wrap_math_sinh_1(Process*, Eterm*, UWord *I);\nEterm math_tan_1(Process*, Eterm*);\nEterm wrap_math_tan_1(Process*, Eterm*, UWord *I);\nEterm math_tanh_1(Process*, Eterm*);\nEterm wrap_math_tanh_1(Process*, Eterm*, UWord *I);\nEterm math_acos_1(Process*, Eterm*);\nEterm wrap_math_acos_1(Process*, Eterm*, UWord *I);\nEterm math_acosh_1(Process*, Eterm*);\nEterm wrap_math_acosh_1(Process*, Eterm*, UWord *I);\nEterm math_asin_1(Process*, Eterm*);\nEterm wrap_math_asin_1(Process*, Eterm*, UWord *I);\nEterm math_asinh_1(Process*, Eterm*);\nEterm wrap_math_asinh_1(Process*, Eterm*, UWord *I);\nEterm math_atan_1(Process*, Eterm*);\nEterm wrap_math_atan_1(Process*, Eterm*, UWord *I);\nEterm math_atanh_1(Process*, Eterm*);\nEterm wrap_math_atanh_1(Process*, Eterm*, UWord *I);\nEterm math_erf_1(Process*, Eterm*);\nEterm wrap_math_erf_1(Process*, Eterm*, UWord *I);\nEterm math_erfc_1(Process*, Eterm*);\nEterm wrap_math_erfc_1(Process*, Eterm*, UWord *I);\nEterm math_exp_1(Process*, Eterm*);\nEterm wrap_math_exp_1(Process*, Eterm*, UWord *I);\nEterm math_log_1(Process*, Eterm*);\nEterm wrap_math_log_1(Process*, Eterm*, UWord *I);\nEterm math_log2_1(Process*, Eterm*);\nEterm wrap_math_log2_1(Process*, Eterm*, UWord *I);\nEterm math_log10_1(Process*, Eterm*);\nEterm wrap_math_log10_1(Process*, Eterm*, UWord *I);\nEterm math_sqrt_1(Process*, Eterm*);\nEterm wrap_math_sqrt_1(Process*, Eterm*, UWord *I);\nEterm math_atan2_2(Process*, Eterm*);\nEterm wrap_math_atan2_2(Process*, Eterm*, UWord *I);\nEterm math_pow_2(Process*, Eterm*);\nEterm wrap_math_pow_2(Process*, Eterm*, UWord *I);\nEterm start_timer_3(Process*, Eterm*);\nEterm wrap_start_timer_3(Process*, Eterm*, UWord *I);\nEterm start_timer_4(Process*, Eterm*);\nEterm wrap_start_timer_4(Process*, Eterm*, UWord *I);\nEterm send_after_3(Process*, Eterm*);\nEterm wrap_send_after_3(Process*, Eterm*, UWord *I);\nEterm send_after_4(Process*, Eterm*);\nEterm wrap_send_after_4(Process*, Eterm*, UWord *I);\nEterm cancel_timer_1(Process*, Eterm*);\nEterm wrap_cancel_timer_1(Process*, Eterm*, UWord *I);\nEterm cancel_timer_2(Process*, Eterm*);\nEterm wrap_cancel_timer_2(Process*, Eterm*, UWord *I);\nEterm read_timer_1(Process*, Eterm*);\nEterm wrap_read_timer_1(Process*, Eterm*, UWord *I);\nEterm read_timer_2(Process*, Eterm*);\nEterm wrap_read_timer_2(Process*, Eterm*, UWord *I);\nEterm make_tuple_2(Process*, Eterm*);\nEterm wrap_make_tuple_2(Process*, Eterm*, UWord *I);\nEterm append_element_2(Process*, Eterm*);\nEterm wrap_append_element_2(Process*, Eterm*, UWord *I);\nEterm make_tuple_3(Process*, Eterm*);\nEterm wrap_make_tuple_3(Process*, Eterm*, UWord *I);\nEterm system_flag_2(Process*, Eterm*);\nEterm wrap_system_flag_2(Process*, Eterm*, UWord *I);\nEterm system_info_1(Process*, Eterm*);\nEterm wrap_system_info_1(Process*, Eterm*, UWord *I);\nEterm system_monitor_0(Process*, Eterm*);\nEterm wrap_system_monitor_0(Process*, Eterm*, UWord *I);\nEterm system_monitor_1(Process*, Eterm*);\nEterm wrap_system_monitor_1(Process*, Eterm*, UWord *I);\nEterm system_monitor_2(Process*, Eterm*);\nEterm wrap_system_monitor_2(Process*, Eterm*, UWord *I);\nEterm system_profile_2(Process*, Eterm*);\nEterm wrap_system_profile_2(Process*, Eterm*, UWord *I);\nEterm system_profile_0(Process*, Eterm*);\nEterm wrap_system_profile_0(Process*, Eterm*, UWord *I);\nEterm ref_to_list_1(Process*, Eterm*);\nEterm wrap_ref_to_list_1(Process*, Eterm*, UWord *I);\nEterm port_to_list_1(Process*, Eterm*);\nEterm wrap_port_to_list_1(Process*, Eterm*, UWord *I);\nEterm fun_to_list_1(Process*, Eterm*);\nEterm wrap_fun_to_list_1(Process*, Eterm*, UWord *I);\nEterm monitor_2(Process*, Eterm*);\nEterm wrap_monitor_2(Process*, Eterm*, UWord *I);\nEterm demonitor_1(Process*, Eterm*);\nEterm wrap_demonitor_1(Process*, Eterm*, UWord *I);\nEterm demonitor_2(Process*, Eterm*);\nEterm wrap_demonitor_2(Process*, Eterm*, UWord *I);\nEterm is_process_alive_1(Process*, Eterm*);\nEterm wrap_is_process_alive_1(Process*, Eterm*, UWord *I);\nEterm error_1(Process*, Eterm*);\nEterm wrap_error_1(Process*, Eterm*, UWord *I);\nEterm error_2(Process*, Eterm*);\nEterm wrap_error_2(Process*, Eterm*, UWord *I);\nEterm raise_3(Process*, Eterm*);\nEterm wrap_raise_3(Process*, Eterm*, UWord *I);\nEterm get_stacktrace_0(Process*, Eterm*);\nEterm wrap_get_stacktrace_0(Process*, Eterm*, UWord *I);\nEterm is_builtin_3(Process*, Eterm*);\nEterm wrap_is_builtin_3(Process*, Eterm*, UWord *I);\nEterm and_2(Process*, Eterm*);\nEterm wrap_and_2(Process*, Eterm*, UWord *I);\nEterm or_2(Process*, Eterm*);\nEterm wrap_or_2(Process*, Eterm*, UWord *I);\nEterm xor_2(Process*, Eterm*);\nEterm wrap_xor_2(Process*, Eterm*, UWord *I);\nEterm not_1(Process*, Eterm*);\nEterm wrap_not_1(Process*, Eterm*, UWord *I);\nEterm sgt_2(Process*, Eterm*);\nEterm wrap_sgt_2(Process*, Eterm*, UWord *I);\nEterm sge_2(Process*, Eterm*);\nEterm wrap_sge_2(Process*, Eterm*, UWord *I);\nEterm slt_2(Process*, Eterm*);\nEterm wrap_slt_2(Process*, Eterm*, UWord *I);\nEterm sle_2(Process*, Eterm*);\nEterm wrap_sle_2(Process*, Eterm*, UWord *I);\nEterm seq_2(Process*, Eterm*);\nEterm wrap_seq_2(Process*, Eterm*, UWord *I);\nEterm seqeq_2(Process*, Eterm*);\nEterm wrap_seqeq_2(Process*, Eterm*, UWord *I);\nEterm sneq_2(Process*, Eterm*);\nEterm wrap_sneq_2(Process*, Eterm*, UWord *I);\nEterm sneqeq_2(Process*, Eterm*);\nEterm wrap_sneqeq_2(Process*, Eterm*, UWord *I);\nEterm splus_2(Process*, Eterm*);\nEterm wrap_splus_2(Process*, Eterm*, UWord *I);\nEterm sminus_2(Process*, Eterm*);\nEterm wrap_sminus_2(Process*, Eterm*, UWord *I);\nEterm stimes_2(Process*, Eterm*);\nEterm wrap_stimes_2(Process*, Eterm*, UWord *I);\nEterm div_2(Process*, Eterm*);\nEterm wrap_div_2(Process*, Eterm*, UWord *I);\nEterm intdiv_2(Process*, Eterm*);\nEterm wrap_intdiv_2(Process*, Eterm*, UWord *I);\nEterm rem_2(Process*, Eterm*);\nEterm wrap_rem_2(Process*, Eterm*, UWord *I);\nEterm bor_2(Process*, Eterm*);\nEterm wrap_bor_2(Process*, Eterm*, UWord *I);\nEterm band_2(Process*, Eterm*);\nEterm wrap_band_2(Process*, Eterm*, UWord *I);\nEterm bxor_2(Process*, Eterm*);\nEterm wrap_bxor_2(Process*, Eterm*, UWord *I);\nEterm bsl_2(Process*, Eterm*);\nEterm wrap_bsl_2(Process*, Eterm*, UWord *I);\nEterm bsr_2(Process*, Eterm*);\nEterm wrap_bsr_2(Process*, Eterm*, UWord *I);\nEterm bnot_1(Process*, Eterm*);\nEterm wrap_bnot_1(Process*, Eterm*, UWord *I);\nEterm sminus_1(Process*, Eterm*);\nEterm wrap_sminus_1(Process*, Eterm*, UWord *I);\nEterm splus_1(Process*, Eterm*);\nEterm wrap_splus_1(Process*, Eterm*, UWord *I);\nEterm ebif_bang_2(Process*, Eterm*);\nEterm wrap_ebif_bang_2(Process*, Eterm*, UWord *I);\nEterm send_2(Process*, Eterm*);\nEterm wrap_send_2(Process*, Eterm*, UWord *I);\nEterm send_3(Process*, Eterm*);\nEterm wrap_send_3(Process*, Eterm*, UWord *I);\nEterm ebif_plusplus_2(Process*, Eterm*);\nEterm wrap_ebif_plusplus_2(Process*, Eterm*, UWord *I);\nEterm append_2(Process*, Eterm*);\nEterm wrap_append_2(Process*, Eterm*, UWord *I);\nEterm ebif_minusminus_2(Process*, Eterm*);\nEterm wrap_ebif_minusminus_2(Process*, Eterm*, UWord *I);\nEterm subtract_2(Process*, Eterm*);\nEterm wrap_subtract_2(Process*, Eterm*, UWord *I);\nEterm is_atom_1(Process*, Eterm*);\nEterm wrap_is_atom_1(Process*, Eterm*, UWord *I);\nEterm is_list_1(Process*, Eterm*);\nEterm wrap_is_list_1(Process*, Eterm*, UWord *I);\nEterm is_tuple_1(Process*, Eterm*);\nEterm wrap_is_tuple_1(Process*, Eterm*, UWord *I);\nEterm is_float_1(Process*, Eterm*);\nEterm wrap_is_float_1(Process*, Eterm*, UWord *I);\nEterm is_integer_1(Process*, Eterm*);\nEterm wrap_is_integer_1(Process*, Eterm*, UWord *I);\nEterm is_number_1(Process*, Eterm*);\nEterm wrap_is_number_1(Process*, Eterm*, UWord *I);\nEterm is_pid_1(Process*, Eterm*);\nEterm wrap_is_pid_1(Process*, Eterm*, UWord *I);\nEterm is_port_1(Process*, Eterm*);\nEterm wrap_is_port_1(Process*, Eterm*, UWord *I);\nEterm is_reference_1(Process*, Eterm*);\nEterm wrap_is_reference_1(Process*, Eterm*, UWord *I);\nEterm is_binary_1(Process*, Eterm*);\nEterm wrap_is_binary_1(Process*, Eterm*, UWord *I);\nEterm is_function_1(Process*, Eterm*);\nEterm wrap_is_function_1(Process*, Eterm*, UWord *I);\nEterm is_function_2(Process*, Eterm*);\nEterm wrap_is_function_2(Process*, Eterm*, UWord *I);\nEterm is_record_2(Process*, Eterm*);\nEterm wrap_is_record_2(Process*, Eterm*, UWord *I);\nEterm is_record_3(Process*, Eterm*);\nEterm wrap_is_record_3(Process*, Eterm*, UWord *I);\nEterm match_spec_test_3(Process*, Eterm*);\nEterm wrap_match_spec_test_3(Process*, Eterm*, UWord *I);\nEterm ets_all_0(Process*, Eterm*);\nEterm wrap_ets_all_0(Process*, Eterm*, UWord *I);\nEterm ets_new_2(Process*, Eterm*);\nEterm wrap_ets_new_2(Process*, Eterm*, UWord *I);\nEterm ets_delete_1(Process*, Eterm*);\nEterm wrap_ets_delete_1(Process*, Eterm*, UWord *I);\nEterm ets_delete_2(Process*, Eterm*);\nEterm wrap_ets_delete_2(Process*, Eterm*, UWord *I);\nEterm ets_delete_all_objects_1(Process*, Eterm*);\nEterm wrap_ets_delete_all_objects_1(Process*, Eterm*, UWord *I);\nEterm ets_delete_object_2(Process*, Eterm*);\nEterm wrap_ets_delete_object_2(Process*, Eterm*, UWord *I);\nEterm ets_first_1(Process*, Eterm*);\nEterm wrap_ets_first_1(Process*, Eterm*, UWord *I);\nEterm ets_is_compiled_ms_1(Process*, Eterm*);\nEterm wrap_ets_is_compiled_ms_1(Process*, Eterm*, UWord *I);\nEterm ets_lookup_2(Process*, Eterm*);\nEterm wrap_ets_lookup_2(Process*, Eterm*, UWord *I);\nEterm ets_lookup_element_3(Process*, Eterm*);\nEterm wrap_ets_lookup_element_3(Process*, Eterm*, UWord *I);\nEterm ets_info_1(Process*, Eterm*);\nEterm wrap_ets_info_1(Process*, Eterm*, UWord *I);\nEterm ets_info_2(Process*, Eterm*);\nEterm wrap_ets_info_2(Process*, Eterm*, UWord *I);\nEterm ets_last_1(Process*, Eterm*);\nEterm wrap_ets_last_1(Process*, Eterm*, UWord *I);\nEterm ets_match_1(Process*, Eterm*);\nEterm wrap_ets_match_1(Process*, Eterm*, UWord *I);\nEterm ets_match_2(Process*, Eterm*);\nEterm wrap_ets_match_2(Process*, Eterm*, UWord *I);\nEterm ets_match_3(Process*, Eterm*);\nEterm wrap_ets_match_3(Process*, Eterm*, UWord *I);\nEterm ets_match_object_1(Process*, Eterm*);\nEterm wrap_ets_match_object_1(Process*, Eterm*, UWord *I);\nEterm ets_match_object_2(Process*, Eterm*);\nEterm wrap_ets_match_object_2(Process*, Eterm*, UWord *I);\nEterm ets_match_object_3(Process*, Eterm*);\nEterm wrap_ets_match_object_3(Process*, Eterm*, UWord *I);\nEterm ets_member_2(Process*, Eterm*);\nEterm wrap_ets_member_2(Process*, Eterm*, UWord *I);\nEterm ets_next_2(Process*, Eterm*);\nEterm wrap_ets_next_2(Process*, Eterm*, UWord *I);\nEterm ets_prev_2(Process*, Eterm*);\nEterm wrap_ets_prev_2(Process*, Eterm*, UWord *I);\nEterm ets_insert_2(Process*, Eterm*);\nEterm wrap_ets_insert_2(Process*, Eterm*, UWord *I);\nEterm ets_insert_new_2(Process*, Eterm*);\nEterm wrap_ets_insert_new_2(Process*, Eterm*, UWord *I);\nEterm ets_rename_2(Process*, Eterm*);\nEterm wrap_ets_rename_2(Process*, Eterm*, UWord *I);\nEterm ets_safe_fixtable_2(Process*, Eterm*);\nEterm wrap_ets_safe_fixtable_2(Process*, Eterm*, UWord *I);\nEterm ets_slot_2(Process*, Eterm*);\nEterm wrap_ets_slot_2(Process*, Eterm*, UWord *I);\nEterm ets_update_counter_3(Process*, Eterm*);\nEterm wrap_ets_update_counter_3(Process*, Eterm*, UWord *I);\nEterm ets_select_1(Process*, Eterm*);\nEterm wrap_ets_select_1(Process*, Eterm*, UWord *I);\nEterm ets_select_2(Process*, Eterm*);\nEterm wrap_ets_select_2(Process*, Eterm*, UWord *I);\nEterm ets_select_3(Process*, Eterm*);\nEterm wrap_ets_select_3(Process*, Eterm*, UWord *I);\nEterm ets_select_count_2(Process*, Eterm*);\nEterm wrap_ets_select_count_2(Process*, Eterm*, UWord *I);\nEterm ets_select_reverse_1(Process*, Eterm*);\nEterm wrap_ets_select_reverse_1(Process*, Eterm*, UWord *I);\nEterm ets_select_reverse_2(Process*, Eterm*);\nEterm wrap_ets_select_reverse_2(Process*, Eterm*, UWord *I);\nEterm ets_select_reverse_3(Process*, Eterm*);\nEterm wrap_ets_select_reverse_3(Process*, Eterm*, UWord *I);\nEterm ets_select_delete_2(Process*, Eterm*);\nEterm wrap_ets_select_delete_2(Process*, Eterm*, UWord *I);\nEterm ets_match_spec_compile_1(Process*, Eterm*);\nEterm wrap_ets_match_spec_compile_1(Process*, Eterm*, UWord *I);\nEterm ets_match_spec_run_r_3(Process*, Eterm*);\nEterm wrap_ets_match_spec_run_r_3(Process*, Eterm*, UWord *I);\nEterm os_putenv_2(Process*, Eterm*);\nEterm wrap_os_putenv_2(Process*, Eterm*, UWord *I);\nEterm os_getenv_0(Process*, Eterm*);\nEterm wrap_os_getenv_0(Process*, Eterm*, UWord *I);\nEterm os_getenv_1(Process*, Eterm*);\nEterm wrap_os_getenv_1(Process*, Eterm*, UWord *I);\nEterm os_getpid_0(Process*, Eterm*);\nEterm wrap_os_getpid_0(Process*, Eterm*, UWord *I);\nEterm os_timestamp_0(Process*, Eterm*);\nEterm wrap_os_timestamp_0(Process*, Eterm*, UWord *I);\nEterm os_system_time_0(Process*, Eterm*);\nEterm wrap_os_system_time_0(Process*, Eterm*, UWord *I);\nEterm os_system_time_1(Process*, Eterm*);\nEterm wrap_os_system_time_1(Process*, Eterm*, UWord *I);\nEterm erl_ddll_try_load_3(Process*, Eterm*);\nEterm wrap_erl_ddll_try_load_3(Process*, Eterm*, UWord *I);\nEterm erl_ddll_try_unload_2(Process*, Eterm*);\nEterm wrap_erl_ddll_try_unload_2(Process*, Eterm*, UWord *I);\nEterm erl_ddll_loaded_drivers_0(Process*, Eterm*);\nEterm wrap_erl_ddll_loaded_drivers_0(Process*, Eterm*, UWord *I);\nEterm erl_ddll_info_2(Process*, Eterm*);\nEterm wrap_erl_ddll_info_2(Process*, Eterm*, UWord *I);\nEterm erl_ddll_format_error_int_1(Process*, Eterm*);\nEterm wrap_erl_ddll_format_error_int_1(Process*, Eterm*, UWord *I);\nEterm erl_ddll_monitor_2(Process*, Eterm*);\nEterm wrap_erl_ddll_monitor_2(Process*, Eterm*, UWord *I);\nEterm erl_ddll_demonitor_1(Process*, Eterm*);\nEterm wrap_erl_ddll_demonitor_1(Process*, Eterm*, UWord *I);\nEterm re_compile_1(Process*, Eterm*);\nEterm wrap_re_compile_1(Process*, Eterm*, UWord *I);\nEterm re_compile_2(Process*, Eterm*);\nEterm wrap_re_compile_2(Process*, Eterm*, UWord *I);\nEterm re_run_2(Process*, Eterm*);\nEterm wrap_re_run_2(Process*, Eterm*, UWord *I);\nEterm re_run_3(Process*, Eterm*);\nEterm wrap_re_run_3(Process*, Eterm*, UWord *I);\nEterm lists_member_2(Process*, Eterm*);\nEterm wrap_lists_member_2(Process*, Eterm*, UWord *I);\nEterm lists_reverse_2(Process*, Eterm*);\nEterm wrap_lists_reverse_2(Process*, Eterm*, UWord *I);\nEterm lists_keymember_3(Process*, Eterm*);\nEterm wrap_lists_keymember_3(Process*, Eterm*, UWord *I);\nEterm lists_keysearch_3(Process*, Eterm*);\nEterm wrap_lists_keysearch_3(Process*, Eterm*, UWord *I);\nEterm lists_keyfind_3(Process*, Eterm*);\nEterm wrap_lists_keyfind_3(Process*, Eterm*, UWord *I);\nEterm erts_debug_disassemble_1(Process*, Eterm*);\nEterm wrap_erts_debug_disassemble_1(Process*, Eterm*, UWord *I);\nEterm erts_debug_breakpoint_2(Process*, Eterm*);\nEterm wrap_erts_debug_breakpoint_2(Process*, Eterm*, UWord *I);\nEterm erts_debug_same_2(Process*, Eterm*);\nEterm wrap_erts_debug_same_2(Process*, Eterm*, UWord *I);\nEterm erts_debug_flat_size_1(Process*, Eterm*);\nEterm wrap_erts_debug_flat_size_1(Process*, Eterm*, UWord *I);\nEterm erts_debug_get_internal_state_1(Process*, Eterm*);\nEterm wrap_erts_debug_get_internal_state_1(Process*, Eterm*, UWord *I);\nEterm erts_debug_set_internal_state_2(Process*, Eterm*);\nEterm wrap_erts_debug_set_internal_state_2(Process*, Eterm*, UWord *I);\nEterm erts_debug_display_1(Process*, Eterm*);\nEterm wrap_erts_debug_display_1(Process*, Eterm*, UWord *I);\nEterm erts_debug_dist_ext_to_term_2(Process*, Eterm*);\nEterm wrap_erts_debug_dist_ext_to_term_2(Process*, Eterm*, UWord *I);\nEterm erts_debug_instructions_0(Process*, Eterm*);\nEterm wrap_erts_debug_instructions_0(Process*, Eterm*, UWord *I);\nEterm erts_debug_dump_monitors_1(Process*, Eterm*);\nEterm wrap_erts_debug_dump_monitors_1(Process*, Eterm*, UWord *I);\nEterm erts_debug_dump_links_1(Process*, Eterm*);\nEterm wrap_erts_debug_dump_links_1(Process*, Eterm*, UWord *I);\nEterm erts_debug_lock_counters_1(Process*, Eterm*);\nEterm wrap_erts_debug_lock_counters_1(Process*, Eterm*, UWord *I);\nEterm code_get_chunk_2(Process*, Eterm*);\nEterm wrap_code_get_chunk_2(Process*, Eterm*, UWord *I);\nEterm code_module_md5_1(Process*, Eterm*);\nEterm wrap_code_module_md5_1(Process*, Eterm*, UWord *I);\nEterm code_make_stub_module_3(Process*, Eterm*);\nEterm wrap_code_make_stub_module_3(Process*, Eterm*, UWord *I);\nEterm code_is_module_native_1(Process*, Eterm*);\nEterm wrap_code_is_module_native_1(Process*, Eterm*, UWord *I);\nEterm hibernate_3(Process*, Eterm*);\nEterm wrap_hibernate_3(Process*, Eterm*, UWord *I);\nEterm error_logger_warning_map_0(Process*, Eterm*);\nEterm wrap_error_logger_warning_map_0(Process*, Eterm*, UWord *I);\nEterm get_module_info_1(Process*, Eterm*);\nEterm wrap_get_module_info_1(Process*, Eterm*, UWord *I);\nEterm get_module_info_2(Process*, Eterm*);\nEterm wrap_get_module_info_2(Process*, Eterm*, UWord *I);\nEterm is_boolean_1(Process*, Eterm*);\nEterm wrap_is_boolean_1(Process*, Eterm*, UWord *I);\nEterm string_to_integer_1(Process*, Eterm*);\nEterm wrap_string_to_integer_1(Process*, Eterm*, UWord *I);\nEterm string_to_float_1(Process*, Eterm*);\nEterm wrap_string_to_float_1(Process*, Eterm*, UWord *I);\nEterm make_fun_3(Process*, Eterm*);\nEterm wrap_make_fun_3(Process*, Eterm*, UWord *I);\nEterm iolist_size_1(Process*, Eterm*);\nEterm wrap_iolist_size_1(Process*, Eterm*, UWord *I);\nEterm iolist_to_binary_1(Process*, Eterm*);\nEterm wrap_iolist_to_binary_1(Process*, Eterm*, UWord *I);\nEterm list_to_existing_atom_1(Process*, Eterm*);\nEterm wrap_list_to_existing_atom_1(Process*, Eterm*, UWord *I);\nEterm is_bitstring_1(Process*, Eterm*);\nEterm wrap_is_bitstring_1(Process*, Eterm*, UWord *I);\nEterm tuple_size_1(Process*, Eterm*);\nEterm wrap_tuple_size_1(Process*, Eterm*, UWord *I);\nEterm byte_size_1(Process*, Eterm*);\nEterm wrap_byte_size_1(Process*, Eterm*, UWord *I);\nEterm bit_size_1(Process*, Eterm*);\nEterm wrap_bit_size_1(Process*, Eterm*, UWord *I);\nEterm list_to_bitstring_1(Process*, Eterm*);\nEterm wrap_list_to_bitstring_1(Process*, Eterm*, UWord *I);\nEterm bitstring_to_list_1(Process*, Eterm*);\nEterm wrap_bitstring_to_list_1(Process*, Eterm*, UWord *I);\nEterm ets_update_element_3(Process*, Eterm*);\nEterm wrap_ets_update_element_3(Process*, Eterm*, UWord *I);\nEterm decode_packet_3(Process*, Eterm*);\nEterm wrap_decode_packet_3(Process*, Eterm*, UWord *I);\nEterm unicode_characters_to_binary_2(Process*, Eterm*);\nEterm wrap_unicode_characters_to_binary_2(Process*, Eterm*, UWord *I);\nEterm unicode_characters_to_list_2(Process*, Eterm*);\nEterm wrap_unicode_characters_to_list_2(Process*, Eterm*, UWord *I);\nEterm unicode_bin_is_7bit_1(Process*, Eterm*);\nEterm wrap_unicode_bin_is_7bit_1(Process*, Eterm*, UWord *I);\nEterm atom_to_binary_2(Process*, Eterm*);\nEterm wrap_atom_to_binary_2(Process*, Eterm*, UWord *I);\nEterm binary_to_atom_2(Process*, Eterm*);\nEterm wrap_binary_to_atom_2(Process*, Eterm*, UWord *I);\nEterm binary_to_existing_atom_2(Process*, Eterm*);\nEterm wrap_binary_to_existing_atom_2(Process*, Eterm*, UWord *I);\nEterm net_kernel_dflag_unicode_io_1(Process*, Eterm*);\nEterm wrap_net_kernel_dflag_unicode_io_1(Process*, Eterm*, UWord *I);\nEterm ets_give_away_3(Process*, Eterm*);\nEterm wrap_ets_give_away_3(Process*, Eterm*, UWord *I);\nEterm ets_setopts_2(Process*, Eterm*);\nEterm wrap_ets_setopts_2(Process*, Eterm*, UWord *I);\nEterm load_nif_2(Process*, Eterm*);\nEterm wrap_load_nif_2(Process*, Eterm*, UWord *I);\nEterm call_on_load_function_1(Process*, Eterm*);\nEterm wrap_call_on_load_function_1(Process*, Eterm*, UWord *I);\nEterm finish_after_on_load_2(Process*, Eterm*);\nEterm wrap_finish_after_on_load_2(Process*, Eterm*, UWord *I);\nEterm binary_to_term_2(Process*, Eterm*);\nEterm wrap_binary_to_term_2(Process*, Eterm*, UWord *I);\nEterm binary_part_2(Process*, Eterm*);\nEterm wrap_binary_part_2(Process*, Eterm*, UWord *I);\nEterm binary_part_3(Process*, Eterm*);\nEterm wrap_binary_part_3(Process*, Eterm*, UWord *I);\nEterm binary_compile_pattern_1(Process*, Eterm*);\nEterm wrap_binary_compile_pattern_1(Process*, Eterm*, UWord *I);\nEterm binary_match_2(Process*, Eterm*);\nEterm wrap_binary_match_2(Process*, Eterm*, UWord *I);\nEterm binary_match_3(Process*, Eterm*);\nEterm wrap_binary_match_3(Process*, Eterm*, UWord *I);\nEterm binary_matches_2(Process*, Eterm*);\nEterm wrap_binary_matches_2(Process*, Eterm*, UWord *I);\nEterm binary_matches_3(Process*, Eterm*);\nEterm wrap_binary_matches_3(Process*, Eterm*, UWord *I);\nEterm binary_longest_common_prefix_1(Process*, Eterm*);\nEterm wrap_binary_longest_common_prefix_1(Process*, Eterm*, UWord *I);\nEterm binary_longest_common_suffix_1(Process*, Eterm*);\nEterm wrap_binary_longest_common_suffix_1(Process*, Eterm*, UWord *I);\nEterm binary_first_1(Process*, Eterm*);\nEterm wrap_binary_first_1(Process*, Eterm*, UWord *I);\nEterm binary_last_1(Process*, Eterm*);\nEterm wrap_binary_last_1(Process*, Eterm*, UWord *I);\nEterm binary_at_2(Process*, Eterm*);\nEterm wrap_binary_at_2(Process*, Eterm*, UWord *I);\nEterm binary_binary_part_2(Process*, Eterm*);\nEterm wrap_binary_binary_part_2(Process*, Eterm*, UWord *I);\nEterm binary_binary_part_3(Process*, Eterm*);\nEterm wrap_binary_binary_part_3(Process*, Eterm*, UWord *I);\nEterm binary_bin_to_list_1(Process*, Eterm*);\nEterm wrap_binary_bin_to_list_1(Process*, Eterm*, UWord *I);\nEterm binary_bin_to_list_2(Process*, Eterm*);\nEterm wrap_binary_bin_to_list_2(Process*, Eterm*, UWord *I);\nEterm binary_bin_to_list_3(Process*, Eterm*);\nEterm wrap_binary_bin_to_list_3(Process*, Eterm*, UWord *I);\nEterm binary_list_to_bin_1(Process*, Eterm*);\nEterm wrap_binary_list_to_bin_1(Process*, Eterm*, UWord *I);\nEterm binary_copy_1(Process*, Eterm*);\nEterm wrap_binary_copy_1(Process*, Eterm*, UWord *I);\nEterm binary_copy_2(Process*, Eterm*);\nEterm wrap_binary_copy_2(Process*, Eterm*, UWord *I);\nEterm binary_referenced_byte_size_1(Process*, Eterm*);\nEterm wrap_binary_referenced_byte_size_1(Process*, Eterm*, UWord *I);\nEterm binary_encode_unsigned_1(Process*, Eterm*);\nEterm wrap_binary_encode_unsigned_1(Process*, Eterm*, UWord *I);\nEterm binary_encode_unsigned_2(Process*, Eterm*);\nEterm wrap_binary_encode_unsigned_2(Process*, Eterm*, UWord *I);\nEterm binary_decode_unsigned_1(Process*, Eterm*);\nEterm wrap_binary_decode_unsigned_1(Process*, Eterm*, UWord *I);\nEterm binary_decode_unsigned_2(Process*, Eterm*);\nEterm wrap_binary_decode_unsigned_2(Process*, Eterm*, UWord *I);\nEterm nif_error_1(Process*, Eterm*);\nEterm wrap_nif_error_1(Process*, Eterm*, UWord *I);\nEterm nif_error_2(Process*, Eterm*);\nEterm wrap_nif_error_2(Process*, Eterm*, UWord *I);\nEterm prim_file_internal_name2native_1(Process*, Eterm*);\nEterm wrap_prim_file_internal_name2native_1(Process*, Eterm*, UWord *I);\nEterm prim_file_internal_native2name_1(Process*, Eterm*);\nEterm wrap_prim_file_internal_native2name_1(Process*, Eterm*, UWord *I);\nEterm prim_file_internal_normalize_utf8_1(Process*, Eterm*);\nEterm wrap_prim_file_internal_normalize_utf8_1(Process*, Eterm*, UWord *I);\nEterm prim_file_is_translatable_1(Process*, Eterm*);\nEterm wrap_prim_file_is_translatable_1(Process*, Eterm*, UWord *I);\nEterm file_native_name_encoding_0(Process*, Eterm*);\nEterm wrap_file_native_name_encoding_0(Process*, Eterm*, UWord *I);\nEterm check_old_code_1(Process*, Eterm*);\nEterm wrap_check_old_code_1(Process*, Eterm*, UWord *I);\nEterm universaltime_to_posixtime_1(Process*, Eterm*);\nEterm wrap_universaltime_to_posixtime_1(Process*, Eterm*, UWord *I);\nEterm posixtime_to_universaltime_1(Process*, Eterm*);\nEterm wrap_posixtime_to_universaltime_1(Process*, Eterm*, UWord *I);\nEterm dt_put_tag_1(Process*, Eterm*);\nEterm wrap_dt_put_tag_1(Process*, Eterm*, UWord *I);\nEterm dt_get_tag_0(Process*, Eterm*);\nEterm wrap_dt_get_tag_0(Process*, Eterm*, UWord *I);\nEterm dt_get_tag_data_0(Process*, Eterm*);\nEterm wrap_dt_get_tag_data_0(Process*, Eterm*, UWord *I);\nEterm dt_spread_tag_1(Process*, Eterm*);\nEterm wrap_dt_spread_tag_1(Process*, Eterm*, UWord *I);\nEterm dt_restore_tag_1(Process*, Eterm*);\nEterm wrap_dt_restore_tag_1(Process*, Eterm*, UWord *I);\nEterm dt_prepend_vm_tag_data_1(Process*, Eterm*);\nEterm wrap_dt_prepend_vm_tag_data_1(Process*, Eterm*, UWord *I);\nEterm dt_append_vm_tag_data_1(Process*, Eterm*);\nEterm wrap_dt_append_vm_tag_data_1(Process*, Eterm*, UWord *I);\nEterm prepare_loading_2(Process*, Eterm*);\nEterm wrap_prepare_loading_2(Process*, Eterm*, UWord *I);\nEterm finish_loading_1(Process*, Eterm*);\nEterm wrap_finish_loading_1(Process*, Eterm*, UWord *I);\nEterm insert_element_3(Process*, Eterm*);\nEterm wrap_insert_element_3(Process*, Eterm*, UWord *I);\nEterm delete_element_2(Process*, Eterm*);\nEterm wrap_delete_element_2(Process*, Eterm*, UWord *I);\nEterm binary_to_integer_1(Process*, Eterm*);\nEterm wrap_binary_to_integer_1(Process*, Eterm*, UWord *I);\nEterm binary_to_integer_2(Process*, Eterm*);\nEterm wrap_binary_to_integer_2(Process*, Eterm*, UWord *I);\nEterm integer_to_binary_1(Process*, Eterm*);\nEterm wrap_integer_to_binary_1(Process*, Eterm*, UWord *I);\nEterm list_to_integer_2(Process*, Eterm*);\nEterm wrap_list_to_integer_2(Process*, Eterm*, UWord *I);\nEterm float_to_binary_1(Process*, Eterm*);\nEterm wrap_float_to_binary_1(Process*, Eterm*, UWord *I);\nEterm float_to_binary_2(Process*, Eterm*);\nEterm wrap_float_to_binary_2(Process*, Eterm*, UWord *I);\nEterm binary_to_float_1(Process*, Eterm*);\nEterm wrap_binary_to_float_1(Process*, Eterm*, UWord *I);\nEterm io_printable_range_0(Process*, Eterm*);\nEterm wrap_io_printable_range_0(Process*, Eterm*, UWord *I);\nEterm os_unsetenv_1(Process*, Eterm*);\nEterm wrap_os_unsetenv_1(Process*, Eterm*, UWord *I);\nEterm re_inspect_2(Process*, Eterm*);\nEterm wrap_re_inspect_2(Process*, Eterm*, UWord *I);\nEterm is_map_1(Process*, Eterm*);\nEterm wrap_is_map_1(Process*, Eterm*, UWord *I);\nEterm map_size_1(Process*, Eterm*);\nEterm wrap_map_size_1(Process*, Eterm*, UWord *I);\nEterm maps_to_list_1(Process*, Eterm*);\nEterm wrap_maps_to_list_1(Process*, Eterm*, UWord *I);\nEterm maps_find_2(Process*, Eterm*);\nEterm wrap_maps_find_2(Process*, Eterm*, UWord *I);\nEterm maps_get_2(Process*, Eterm*);\nEterm wrap_maps_get_2(Process*, Eterm*, UWord *I);\nEterm maps_from_list_1(Process*, Eterm*);\nEterm wrap_maps_from_list_1(Process*, Eterm*, UWord *I);\nEterm maps_is_key_2(Process*, Eterm*);\nEterm wrap_maps_is_key_2(Process*, Eterm*, UWord *I);\nEterm maps_keys_1(Process*, Eterm*);\nEterm wrap_maps_keys_1(Process*, Eterm*, UWord *I);\nEterm maps_merge_2(Process*, Eterm*);\nEterm wrap_maps_merge_2(Process*, Eterm*, UWord *I);\nEterm maps_new_0(Process*, Eterm*);\nEterm wrap_maps_new_0(Process*, Eterm*, UWord *I);\nEterm maps_put_3(Process*, Eterm*);\nEterm wrap_maps_put_3(Process*, Eterm*, UWord *I);\nEterm maps_remove_2(Process*, Eterm*);\nEterm wrap_maps_remove_2(Process*, Eterm*, UWord *I);\nEterm maps_update_3(Process*, Eterm*);\nEterm wrap_maps_update_3(Process*, Eterm*, UWord *I);\nEterm maps_values_1(Process*, Eterm*);\nEterm wrap_maps_values_1(Process*, Eterm*, UWord *I);\nEterm erts_internal_cmp_term_2(Process*, Eterm*);\nEterm wrap_erts_internal_cmp_term_2(Process*, Eterm*, UWord *I);\nEterm ets_take_2(Process*, Eterm*);\nEterm wrap_ets_take_2(Process*, Eterm*, UWord *I);\nEterm fun_info_mfa_1(Process*, Eterm*);\nEterm wrap_fun_info_mfa_1(Process*, Eterm*, UWord *I);\nEterm get_keys_0(Process*, Eterm*);\nEterm wrap_get_keys_0(Process*, Eterm*, UWord *I);\nEterm ets_update_counter_4(Process*, Eterm*);\nEterm wrap_ets_update_counter_4(Process*, Eterm*, UWord *I);\nEterm erts_debug_map_info_1(Process*, Eterm*);\nEterm wrap_erts_debug_map_info_1(Process*, Eterm*, UWord *I);\nEterm hash_2(Process*, Eterm*);\nEterm wrap_hash_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_write_u8_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_write_u8_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_write_u32_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_write_u32_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_bytearray_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_bytearray_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_bytearray_sub_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_bytearray_sub_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_bytearray_update_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_bytearray_update_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_bitarray_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_bitarray_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_bitarray_sub_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_bitarray_sub_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_bitarray_update_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_bitarray_update_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_array_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_array_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_array_length_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_array_length_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_array_sub_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_array_sub_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_array_update_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_array_update_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_ref_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_ref_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_ref_get_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_ref_get_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_ref_set_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_ref_set_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_enter_code_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_enter_code_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_alloc_data_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_alloc_data_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_constants_size_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_constants_size_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_merge_term_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_merge_term_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_fun_to_address_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_fun_to_address_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_set_native_address_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_set_native_address_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_set_funinfo_native_address_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_set_funinfo_native_address_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_invalidate_funinfo_native_addresses_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_invalidate_funinfo_native_addresses_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_update_code_size_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_update_code_size_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_code_size_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_code_size_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_enter_sdesc_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_enter_sdesc_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_bif_address_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_bif_address_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_primop_address_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_primop_address_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_atom_to_word_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_atom_to_word_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_term_to_word_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_term_to_word_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_get_fe_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_get_fe_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_set_native_address_in_fe_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_set_native_address_in_fe_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_find_na_or_make_stub_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_find_na_or_make_stub_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_check_crc_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_check_crc_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_system_crc_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_system_crc_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_get_rts_param_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_get_rts_param_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_patch_insn_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_patch_insn_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_patch_call_3(Process*, Eterm*);\nEterm wrap_hipe_bifs_patch_call_3(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_add_ref_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_add_ref_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_mark_referred_from_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_mark_referred_from_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_remove_refs_from_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_remove_refs_from_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_redirect_referred_from_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_redirect_referred_from_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_call_count_on_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_call_count_on_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_call_count_off_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_call_count_off_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_call_count_get_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_call_count_get_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_call_count_clear_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_call_count_clear_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_trap_count_get_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_trap_count_get_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_trap_count_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_trap_count_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_process_info_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_process_info_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_process_info_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_process_info_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_message_info_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_message_info_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_message_info_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_message_info_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_message_sizes_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_message_sizes_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_gc_info_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_gc_info_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_shared_gc_info_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_shared_gc_info_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_incremental_gc_info_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_incremental_gc_info_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_gc_info_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_gc_info_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_pause_times_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_pause_times_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_system_timer_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_system_timer_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_system_timer_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_system_timer_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_send_timer_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_send_timer_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_send_timer_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_send_timer_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_gc_timer_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_gc_timer_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_shared_gc_timer_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_shared_gc_timer_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_gc_timer_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_gc_timer_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_misc_timer_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_misc_timer_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_misc_timer_clear_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_misc_timer_clear_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_get_hrvtime_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_get_hrvtime_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_stop_hrvtime_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_stop_hrvtime_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_show_estack_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_show_estack_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_show_heap_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_show_heap_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_show_nstack_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_show_nstack_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_nstack_used_size_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_nstack_used_size_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_show_pcb_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_show_pcb_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_show_term_1(Process*, Eterm*);\nEterm wrap_hipe_bifs_show_term_1(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_in_native_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_in_native_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_modeswitch_debug_on_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_modeswitch_debug_on_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_modeswitch_debug_off_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_modeswitch_debug_off_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_debug_native_called_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_debug_native_called_2(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_llvm_fix_pinned_regs_0(Process*, Eterm*);\nEterm wrap_hipe_bifs_llvm_fix_pinned_regs_0(Process*, Eterm*, UWord *I);\nEterm hipe_bifs_write_u64_2(Process*, Eterm*);\nEterm wrap_hipe_bifs_write_u64_2(Process*, Eterm*, UWord *I);\n#endif\n"
},
{
"alpha_fraction": 0.5148305296897888,
"alphanum_fraction": 0.742584764957428,
"avg_line_length": 33.96296310424805,
"blob_id": "c1ce4a00b2e8c1c589457258c654b0d2ea5a4c23",
"content_id": "374e218fb3a6ba01bd2bbb89a6d744c1792aab3a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1888,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 54,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/pcre/pcre_exec_loop_break_cases.inc",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "case 943: goto L_LOOP_COUNT_843;\ncase 2936: goto L_LOOP_COUNT_2836;\ncase 3086: goto L_LOOP_COUNT_2986;\ncase 3282: goto L_LOOP_COUNT_3182;\ncase 3312: goto L_LOOP_COUNT_3212;\ncase 3428: goto L_LOOP_COUNT_3328;\ncase 3703: goto L_LOOP_COUNT_3603;\ncase 3774: goto L_LOOP_COUNT_3674;\ncase 3810: goto L_LOOP_COUNT_3710;\ncase 3871: goto L_LOOP_COUNT_3771;\ncase 4134: goto L_LOOP_COUNT_4034;\ncase 4159: goto L_LOOP_COUNT_4059;\ncase 4193: goto L_LOOP_COUNT_4093;\ncase 4275: goto L_LOOP_COUNT_4175;\ncase 4300: goto L_LOOP_COUNT_4200;\ncase 4617: goto L_LOOP_COUNT_4517;\ncase 5413: goto L_LOOP_COUNT_5313;\ncase 5709: goto L_LOOP_COUNT_5609;\ncase 5730: goto L_LOOP_COUNT_5630;\ncase 5746: goto L_LOOP_COUNT_5646;\ncase 5762: goto L_LOOP_COUNT_5662;\ncase 5778: goto L_LOOP_COUNT_5678;\ncase 5797: goto L_LOOP_COUNT_5697;\ncase 5816: goto L_LOOP_COUNT_5716;\ncase 5835: goto L_LOOP_COUNT_5735;\ncase 5855: goto L_LOOP_COUNT_5755;\ncase 5880: goto L_LOOP_COUNT_5780;\ncase 5900: goto L_LOOP_COUNT_5800;\ncase 5948: goto L_LOOP_COUNT_5848;\ncase 5950: goto L_LOOP_COUNT_5850;\ncase 6030: goto L_LOOP_COUNT_5930;\ncase 6057: goto L_LOOP_COUNT_5957;\ncase 6074: goto L_LOOP_COUNT_5974;\ncase 6123: goto L_LOOP_COUNT_6023;\ncase 6146: goto L_LOOP_COUNT_6046;\ncase 6169: goto L_LOOP_COUNT_6069;\ncase 6185: goto L_LOOP_COUNT_6085;\ncase 6201: goto L_LOOP_COUNT_6101;\ncase 6217: goto L_LOOP_COUNT_6117;\ncase 6233: goto L_LOOP_COUNT_6133;\ncase 6249: goto L_LOOP_COUNT_6149;\ncase 6265: goto L_LOOP_COUNT_6165;\ncase 6310: goto L_LOOP_COUNT_6210;\ncase 6349: goto L_LOOP_COUNT_6249;\ncase 6370: goto L_LOOP_COUNT_6270;\ncase 6392: goto L_LOOP_COUNT_6292;\ncase 6414: goto L_LOOP_COUNT_6314;\ncase 6436: goto L_LOOP_COUNT_6336;\ncase 6451: goto L_LOOP_COUNT_6351;\ncase 6465: goto L_LOOP_COUNT_6365;\ncase 6479: goto L_LOOP_COUNT_6379;\ncase 6493: goto L_LOOP_COUNT_6393;\ncase 6507: goto L_LOOP_COUNT_6407;\ncase 6521: goto L_LOOP_COUNT_6421;\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 40,
"blob_id": "f93949c6d8320bf8d2b32e74647346db0fa1c46f",
"content_id": "df09c237cf6f437e0cb60c70a1abc41fc6456f2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 1,
"path": "/.zshenv",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/Users/z/.yadr/zsh/prezto/runcoms/zshenv"
},
{
"alpha_fraction": 0.6115667819976807,
"alphanum_fraction": 0.7068349123001099,
"avg_line_length": 26.327587127685547,
"blob_id": "5c1f349afc7929f7bafa867c251958ce28016849",
"content_id": "54af30d8d41b53e67af3b76296a6d6946ebdf080",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4755,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 174,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/opt/plain/hipe_literals.h",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/* File: hipe_literals.h, generated by /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/bin/x86_64-apple-darwin14.5.0/hipe_mkliterals */\n#ifndef __HIPE_LITERALS_H__\n#define __HIPE_LITERALS_H__\n\n#define F_TIMO 4\n#define FREASON_TRAP 256\n#define EFE_NATIVE_ADDRESS 56\n#define EFE_REFC 80\n#define EFT_THING 0\n#define BSF_ALIGNED 1\n#define PB_ACTIVE_WRITER 2\n#define PB_IS_WRITABLE 1\n#define MB_ORIG 0\n#define MB_BASE 8\n#define MB_OFFSET 16\n#define MB_SIZE 24\n#define PROC_BIN_THING_WORD 0\n#define PROC_BIN_BINSIZE 8\n#define PROC_BIN_NEXT 16\n#define PROC_BIN_VAL 24\n#define PROC_BIN_BYTES 32\n#define PROC_BIN_FLAGS 40\n#define PROC_BIN_WORDSIZE 6\n#define SUB_BIN_THING_WORD 0\n#define SUB_BIN_BINSIZE 8\n#define SUB_BIN_BITSIZE 24\n#define SUB_BIN_OFFS 16\n#define SUB_BIN_BITOFFS 25\n#define SUB_BIN_WRITABLE 26\n#define SUB_BIN_ORIG 32\n#define SUB_BIN_WORDSIZE 5\n#define HEAP_BIN_THING_WORD 0\n#define HEAP_BIN_SIZE 8\n#define HEAP_BIN_DATA 16\n#define BINARY_ORIG_SIZE 16\n#define BINARY_ORIG_BYTES 24\n#define MAX_HEAP_BIN_SIZE 64\n#define MS_THING_WORD 0\n#define MS_MATCHBUFFER 8\n#define MS_SAVEOFFSET 40\n#define MS_MIN_SIZE 6\n#define MB_ORIG_SIZE 8\n#define MB_BASE_SIZE 8\n#define MB_OFFSET_SIZE 8\n#define MB_SIZE_SIZE 8\n#define PROC_BIN_THING_WORD_SIZE 8\n#define PROC_BIN_BINSIZE_SIZE 8\n#define PROC_BIN_NEXT_SIZE 8\n#define PROC_BIN_VAL_SIZE 8\n#define PROC_BIN_BYTES_SIZE 8\n#define PROC_BIN_FLAGS_SIZE 8\n#define SUB_BIN_THING_WORD_SIZE 8\n#define SUB_BIN_BINSIZE_SIZE 8\n#define SUB_BIN_BITSIZE_SIZE 1\n#define SUB_BIN_OFFS_SIZE 8\n#define SUB_BIN_BITOFFS_SIZE 1\n#define SUB_BIN_WRITABLE_SIZE 1\n#define SUB_BIN_ORIG_SIZE 8\n#define HEAP_BIN_THING_WORD_SIZE 8\n#define HEAP_BIN_SIZE_SIZE 8\n#define HEAP_BIN_DATA_SIZE 8\n#define BINARY_ORIG_SIZE_SIZE 8\n#define BINARY_ORIG_BYTES_SIZE 1\n#define MS_THING_WORD_SIZE 8\n#define MS_SAVEOFFSET_SIZE 8\n#define MSG_NEXT 0\n#define ARM_LEAF_WORDS 16\n#define ARM_NR_ARG_REGS 3\n#define ARM_IS_BIG_ENDIAN 0\n#define PPC_LEAF_WORDS 16\n#define PPC_NR_ARG_REGS 4\n#define AMD64_LEAF_WORDS 24\n#define AMD64_NR_ARG_REGS 4\n#define AMD64_HP_IN_REGISTER 1\n#define AMD64_HEAP_POINTER 15\n#define X86_LEAF_WORDS 24\n#define X86_NR_ARG_REGS 3\n#define X86_NR_RET_REGS 3\n#define X86_HP_IN_ESI 1\n#define SPARC_LEAF_WORDS 16\n#define SPARC_NR_ARG_REGS 4\n#define P_OFF_HEAP_FUNS 584\n#define EFT_NEXT 16\n#define EFT_CREATOR 48\n#define EFT_FE 8\n#define EFT_NATIVE_ADDRESS 24\n#define EFT_ARITY 32\n#define EFT_NUM_FREE 40\n#define EFT_ENV 56\n#define ERL_FUN_SIZE 7\n#define P_FP_EXCEPTION 128\n#define ERTS_IS_SMP 0\n#define ERTS_NO_FPE_SIGNALS 1\n#define MSG_MESSAGE 16\n#define P_HP 72\n#define P_HP_LIMIT 80\n#define P_OFF_HEAP_FIRST 584\n#define P_MBUF 600\n#define P_ID 0\n#define P_FLAGS 344\n#define P_FVALUE 352\n#define P_FREASON 360\n#define P_FTRACE 368\n#define P_FCALLS 312\n#define P_BEAM_IP 296\n#define P_ARITY 216\n#define P_ARG0 240\n#define P_ARG1 248\n#define P_ARG2 256\n#define P_ARG3 264\n#define P_ARG4 272\n#define P_ARG5 280\n#define P_NSP 136\n#define P_NCALLEE 160\n#define P_CLOSURE 160\n#define P_NSP_LIMIT 144\n#define P_CSP 192\n#define P_NARITY 200\n#define P_FLOAT_RESULT 208\n#define P_MSG_FIRST 400\n#define P_MSG_SAVE 416\n#define P_CALLEE_EXP 160\n#define THE_NON_VALUE 0\n#define HIPE_LITERALS_CRC 2320683U\n#define HIPE_SYSTEM_CRC 106743500U\n#define HIPE_ERTS_CHECKSUM (HIPE_LITERALS_CRC ^ HIPE_SYSTEM_CRC)\n\n#define RTS_PARAMS_CASES \\\n\tcase 1: value = 584; break; \\\n\tcase 4: value = 16; break; \\\n\tcase 5: value = 48; break; \\\n\tcase 6: value = 8; break; \\\n\tcase 7: value = 24; break; \\\n\tcase 8: value = 32; break; \\\n\tcase 9: value = 40; break; \\\n\tcase 10: value = 56; break; \\\n\tcase 11: value = 7; break; \\\n\tcase 12: is_defined = 0; break; \\\n\tcase 14: value = 128; break; \\\n\tcase 15: value = 0; break; \\\n\tcase 16: value = 1; break; \\\n\tcase 19: value = 16; break; \\\n\tcase 22: value = 72; break; \\\n\tcase 23: value = 80; break; \\\n\tcase 24: value = 584; break; \\\n\tcase 25: value = 600; break; \\\n\tcase 26: value = 0; break; \\\n\tcase 27: value = 344; break; \\\n\tcase 28: value = 352; break; \\\n\tcase 29: value = 360; break; \\\n\tcase 30: value = 368; break; \\\n\tcase 31: value = 312; break; \\\n\tcase 32: value = 296; break; \\\n\tcase 33: value = 216; break; \\\n\tcase 34: value = 240; break; \\\n\tcase 35: value = 248; break; \\\n\tcase 36: value = 256; break; \\\n\tcase 37: value = 264; break; \\\n\tcase 38: value = 272; break; \\\n\tcase 39: value = 280; break; \\\n\tcase 40: value = 136; break; \\\n\tcase 41: value = 160; break; \\\n\tcase 42: value = 160; break; \\\n\tcase 43: value = 144; break; \\\n\tcase 44: value = 192; break; \\\n\tcase 45: is_defined = 0; break; \\\n\tcase 46: value = 200; break; \\\n\tcase 47: value = 208; break; \\\n\tcase 48: is_defined = 0; break; \\\n\tcase 49: value = 400; break; \\\n\tcase 50: value = 416; break; \\\n\tcase 51: value = 160; break; \\\n\tcase 52: value = 0; break;\n#endif\n"
},
{
"alpha_fraction": 0.7894737124443054,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 38,
"blob_id": "723c0185df7afdfc6a260f60f9b6e68768df2021",
"content_id": "f94b787d0f511b96c5f497e981676c3b4abe2d3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 1,
"path": "/.unescaped_colors.rb",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/Users/z/.yadr/irb/unescaped_colors.rb"
},
{
"alpha_fraction": 0.6959517598152161,
"alphanum_fraction": 0.7039908170700073,
"avg_line_length": 29.017240524291992,
"blob_id": "98b7d8e39f4013e68c9f5cb6c144ad3a3eef0ad7",
"content_id": "e1e224106b0077e8efbc4f0eaf4057f7ad621003",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3483,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 116,
"path": "/bin/coreosiso",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n# Author: Naoki OKAMURA (Nyarla) <nyarla[ at ]thotep.net>\n# Usage: ./makeiso.sh\n# Unlicense: This script is under the public domain.\n# Requires: gzip tar mkisofs syslinux curl (or axel) ssh\n# cloned from https://github.com/nyarla/coreos-live-iso\n\nset -e\n \n# Default configurations\nSYSLINUX_VERSION=${SYSLINUX_VERSION:=\"6.02\"}\nCOREOS_VERSION=${COREOS_VERSION:=\"dev-channel\"}\nBOOT_ENV=${BOOT_ENV:=\"bios\"}\nSSH_PUBKEY_PATH=${SSH_PUBKEY_PATH:=~/.ssh/id_rsa.pub}\nCURL=${CURL:=\"curl\"} \n \n# Initialze variables\nSYSLINUX_BASE_URL=\"ftp://www.kernel.org/pub/linux/utils/boot/syslinux\"\nSYSLINUX_BASENAME=\"syslinux-$SYSLINUX_VERSION\"\nSYSLINUX_URL=\"${SYSLINUX_BASE_URL}/${SYSLINUX_BASENAME}.tar.gz\"\n \nCOREOS_BASE_URL=\"http://storage.core-os.net/coreos/amd64-generic\"\nCOREOS_KERN_BASENAME=\"coreos_production_pxe.vmlinuz\"\nCOREOS_INITRD_BASENAME=\"coreos_production_pxe_image.cpio.gz\"\nCOREOS_VER_URL=\"${COREOS_BASE_URL}/${COREOS_VERSION}/version.txt\"\nCOREOS_KERN_URL=\"${COREOS_BASE_URL}/${COREOS_VERSION}/${COREOS_KERN_BASENAME}\"\nCOREOS_INITRD_URL=\"${COREOS_BASE_URL}/${COREOS_VERSION}/${COREOS_INITRD_BASENAME}\"\n\nif [ ! -f \"${SSH_PUBKEY_PATH}\" ]; then\n echo \"Missing ${SSH_PUBKEY_PATH}. Please run ssh-keygen to generate keys.\"\n exit\nfi\nSSH_PUBKEY=`cat ${SSH_PUBKEY_PATH}`\n \nbindir=`cd $(dirname $0) && pwd`\nworkdir=$bindir/${COREOS_VERSION}\n \necho \"-----> Initialize working directory\"\nif [ ! -d $workdir ];then\n mkdir -p $workdir\nfi;\n \ncd $workdir\n \nmkdir -p iso/coreos\nmkdir -p iso/syslinux\nmkdir -p iso/isolinux\n\necho \"-----> CoreOS version \"\n$CURL -o version.txt ${COREOS_VER_URL}\ncat version.txt\n \necho \"-----> Download CoreOS's kernel\"\nif [ ! -e iso/coreos/vmlinuz ]; then\n $CURL -o iso/coreos/vmlinuz $COREOS_KERN_URL\nfi\n \necho \"-----> Download CoreOS's initrd\"\nif [ ! -e iso/coreos/cpio.gz ]; then\n $CURL -o iso/coreos/cpio.gz $COREOS_INITRD_URL\nfi\ncd iso/coreos\nmkdir -p usr/share/oem\ncat<<EOF > usr/share/oem/run\n#!/bin/sh\n \n# Place your OEM run commands here...\n \nEOF\nchmod +x usr/share/oem/run\ngzip -d cpio.gz\nfind usr | cpio -o -A -H newc -O cpio\ngzip cpio\nrm -rf usr/share/oem\ncd $workdir\n \necho \"-----> Download syslinux and copy to iso directory\"\nif [ ! -e ${SYSLINUX_BASENAME} ]; then\n $CURL -o ${SYSLINUX_BASENAME}.tar.gz $SYSLINUX_URL\nfi\ntar zxf ${SYSLINUX_BASENAME}.tar.gz\n \ncp ${SYSLINUX_BASENAME}/${BOOT_ENV}/com32/chain/chain.c32 iso/syslinux/\ncp ${SYSLINUX_BASENAME}/${BOOT_ENV}/com32/lib/libcom32.c32 iso/syslinux/\ncp ${SYSLINUX_BASENAME}/${BOOT_ENV}/com32/libutil/libutil.c32 iso/syslinux/\ncp ${SYSLINUX_BASENAME}/${BOOT_ENV}/memdisk/memdisk iso/syslinux/\n \ncp ${SYSLINUX_BASENAME}/${BOOT_ENV}/core/isolinux.bin iso/isolinux/\ncp ${SYSLINUX_BASENAME}/${BOOT_ENV}/com32/elflink/ldlinux/ldlinux.c32 iso/isolinux/\n \necho \"-----> Make isolinux.cfg file\"\ncat<<EOF > iso/isolinux/isolinux.cfg\nINCLUDE /syslinux/syslinux.cfg\nEOF\n \necho \"-----> Make syslinux.cfg file\"\ncat<<EOF > iso/syslinux/syslinux.cfg\ndefault coreos\nprompt 1\ntimeout 15\n \nlabel coreos\n kernel /coreos/vmlinuz\n append initrd=/coreos/cpio.gz root=squashfs: state=tmpfs: sshkey=\"${SSH_PUBKEY}\"\nEOF\n \necho \"-----> Make ISO file\"\ncd iso\nmkisofs -v -l -r -J -o ${bindir}/CoreOS.${COREOS_VERSION}.iso -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table .\nisohybrid ${bindir}/CoreOS.${COREOS_VERSION}.iso\necho \"-----> Cleanup\"\ncd $bindir\nrm -rf $workdir\n \necho \"-----> Finished\"\necho \"-----> Install ${bindir}/CoreOS.${COREOS_VERSION}.iso and ssh core@<ip>\"\n\n"
},
{
"alpha_fraction": 0.5815126299858093,
"alphanum_fraction": 0.5966386795043945,
"avg_line_length": 22.799999237060547,
"blob_id": "d78faa07ef7b9927f803459703621a64a1ecdcf5",
"content_id": "fd5180aa327f222f4e0f3653b072f6b5ccde3f06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 595,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 25,
"path": "/bin/sshlogin",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nFILE=~/login.txt\nCONNECT=sshexpect\nSERVERNAME=$1\nMyServer=\"\"\nMyUser=\"\"\nMyPassword=\"\"\nexec 3<&0\nexec 0<$FILE\nwhile read line\ndo\n MyServer=$(echo $line | cut -d'|' -f1)\n MyUser=$(echo $line | cut -d'|' -f2)\n MyPassword=$(echo $line | cut -d'|' -f3)\n\techo \"MyServer= $MyServer\"\n\techo \"MyUser= $MyUser\"\n\techo \"MyPassword= $MyPassword\"\n if [ \"$SERVERNAME\" == \"$MyServer\" ];\n then\n echo \"Running ssh $MyUser@$MyServer...\"\n $CONNECT $MyPassword $MyServer $MyUser\n fi\ndone\nexec 0<&3\necho \"$SERVERNAME not found in login.txt file\"\n"
},
{
"alpha_fraction": 0.7051482200622559,
"alphanum_fraction": 0.7160686254501343,
"avg_line_length": 32.73684310913086,
"blob_id": "204affd3e4a03a2621ffb382f3382b8bb43f8fa5",
"content_id": "daf41b50bb7d111198972ecab1c5d768b870e5b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 641,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 19,
"path": "/README.md",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "# osx\n=============\ndefault template for a OSX user's home account. Do the following after clone\n\n## Update vim packages\n```\n./bin/viminstall\n```\n\n## Added utilities homebrew, nvm, rvm, pyenv, gvm\n```\nruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\ncurl -o- https://raw.githubusercontent.com/creationix/nvm/v0.29.0/install.sh | bash\ncurl -sSL https://get.rvm.io | bash -s stable --rails\ncurl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash\nbash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)\n```\n\nV1.0.0\n"
},
{
"alpha_fraction": 0.6192830801010132,
"alphanum_fraction": 0.6353523135185242,
"avg_line_length": 20.289474487304688,
"blob_id": "d15c5339e38c4fccee5d641d06b07429114e90ad",
"content_id": "4ebce19faa214a02f8999c23e82b9cfec2875a43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 809,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 38,
"path": "/bin/enc",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n\nfunction enc_gpg {\n if [[ -x /usr/local/bin/gpg || -x /usr/bin/gpg ]]; then\n gpg -c --batch --cipher-algo AES256 --passphrase $PASSWD -o $fileName.gpg $fileName\n else\n return 1\n fi\n}\n\nfunction enc_openssl {\n if [[ -x /usr/local/bin/openssl || -x /usr/bin/openssl ]]; then\n openssl enc -aes-256-cbc -e -in $fileName -out $fileName.openssl -pass pass:$PASSWD\n else\n return 1\n fi\n}\n\nfileName=$1\nif [ ! $fileName ]; then\n\techo \"usage : $0 filename\"\n exit 1\nelse\n\techo \"Please enter your passphrase follows by <return>\"\n\tread -s PASSWD\n echo \"encrypt $fileName using openssl\"\n enc_openssl\n result=$?\n if [ $result -eq \"1\" ]; then\n echo \"encript $fileName using gpg\"\n enc_gpg\n result=$?\n if [ $result -eq \"1\" ]; then\n echo \"Did not find any encryption tool\"\n fi\nfi\nfi\n"
},
{
"alpha_fraction": 0.6389684677124023,
"alphanum_fraction": 0.7220630645751953,
"avg_line_length": 33.900001525878906,
"blob_id": "3e3f4ddfec65292ce2dfd7507050479b3717894a",
"content_id": "442d2ac048a6c6fc6e6a8eb445931c0ab79740d0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 349,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 10,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/erl_version.h",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/* This file was created by 'make_version' -- don't modify. */\n#define ERLANG_OTP_RELEASE \"18\"\n#define ERLANG_OTP_VERSION \"18.2.1\"\n#define ERLANG_VERSION \"7.2.1\"\n#if ERTS_SAVED_COMPILE_TIME\n# define ERLANG_COMPILE_DATE \"Thu Dec 31 02:09:59 2015\"\n#else\n# define ERLANG_COMPILE_DATE \"\"\n#endif\n#define ERLANG_ARCHITECTURE \"x86_64-apple-darwin14.5.0\"\n"
},
{
"alpha_fraction": 0.6799936890602112,
"alphanum_fraction": 0.7087296843528748,
"avg_line_length": 53.45148849487305,
"blob_id": "4a464fb565984d9c69911897256c4d04987813f4",
"content_id": "15f01cb0e20cae9f47125f23054b37e0094ccda4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 265451,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 4875,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/opt/smp/depend.mk",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "$(OBJDIR)/atom.o: beam/atom.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_vm.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_process_dict.h beam/erl_hl_timer.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/module.h beam/register.h \\\n beam/erl_fun.h beam/benchmark.h beam/erl_debug.h beam/error.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/beam_bif_load.o: beam/beam_bif_load.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/beam_bp.h \\\n beam/beam_catches.h beam/erl_binary.h beam/erl_nif.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_native_features_config.h \\\n beam/erl_nif_api_funcs.h\n$(OBJDIR)/beam_bp.o: beam/beam_bp.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h \\\n beam/beam_bp.h\n$(OBJDIR)/beam_catches.o: beam/beam_catches.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/beam_catches.h beam/code_ix.h \\\n beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/export.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_process.h beam/erl_process_lock.h beam/erl_port.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h \\\n beam/erl_port_task.h beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/beam_debug.o: beam/beam_debug.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/beam_bp.h \\\n beam/erl_binary.h\n$(OBJDIR)/beam_emu.o: beam/beam_emu.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/erl_binary.h \\\n beam/erl_map.h beam/dist.h beam/beam_bp.h beam/beam_catches.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n hipe/hipe_bif1.h $(TTF_DIR)/beam_hot.h \\\n $(TTF_DIR)/beam_cold.h\n$(OBJDIR)/beam_load.o: beam/beam_load.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h \\\n beam/beam_catches.h beam/erl_binary.h beam/erl_zlib.h beam/erl_map.h \\\n hipe/hipe_bif0.h hipe/hipe_mode_switch.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n $(TTF_DIR)/beam_pred_funcs.h \\\n $(TTF_DIR)/beam_tr_funcs.h\n$(OBJDIR)/beam_ranges.o: beam/beam_ranges.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/benchmark.o: beam/benchmark.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/bif.o: beam/bif.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_process_dict.h beam/erl_hl_timer.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/module.h beam/register.h \\\n beam/erl_fun.h beam/benchmark.h beam/erl_debug.h beam/error.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/erl_binary.h \\\n beam/beam_bp.h beam/erl_db_util.h beam/erl_bif_unique.h\n$(OBJDIR)/big.o: beam/big.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/big.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/binary.o: beam/binary.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/erl_binary.h\n$(OBJDIR)/break.o: beam/break.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/version.h \\\n beam/erl_db.h beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/erl_instrument.h \\\n beam/erl_mtrace.h\n$(OBJDIR)/code_ix.o: beam/code_ix.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/code_ix.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/export.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_process.h beam/erl_process_lock.h beam/erl_port.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h \\\n beam/erl_port_task.h beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/beam_catches.h\n$(OBJDIR)/copy.o: beam/copy.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h beam/erl_map.h hipe/hipe_process.h beam/erl_bits.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/big.h beam/erl_binary.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/dist.o: beam/dist.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/dist.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h\n$(OBJDIR)/elib_memmove.o: beam/elib_memmove.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h\n$(OBJDIR)/erl_afit_alloc.o: beam/erl_afit_alloc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/erl_afit_alloc.h\n$(OBJDIR)/erl_alloc.o: beam/erl_alloc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_db.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h \\\n beam/erl_binary.h beam/erl_instrument.h beam/erl_mtrace.h \\\n beam/erl_cpu_topology.h beam/erl_thr_queue.h sys/common/erl_check_io.h \\\n sys/common/erl_poll.h beam/erl_goodfit_alloc.h \\\n beam/erl_bestfit_alloc.h beam/erl_afit_alloc.h \\\n beam/erl_ao_firstfit_alloc.h\n$(OBJDIR)/erl_alloc_util.o: beam/erl_alloc_util.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/big.h \\\n beam/erl_mtrace.h\n$(OBJDIR)/erl_ao_firstfit_alloc.o: beam/erl_ao_firstfit_alloc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/erl_ao_firstfit_alloc.h\n$(OBJDIR)/erl_arith.o: beam/erl_arith.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h\n$(OBJDIR)/erl_async.o: beam/erl_async.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_process_dict.h beam/erl_hl_timer.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/module.h beam/register.h \\\n beam/erl_fun.h beam/benchmark.h beam/erl_debug.h beam/error.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_thr_queue.h\n$(OBJDIR)/erl_bestfit_alloc.o: beam/erl_bestfit_alloc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/erl_bestfit_alloc.h\n$(OBJDIR)/erl_bif_binary.o: beam/erl_bif_binary.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/erl_binary.h \\\n beam/erl_bif_unique.h\n$(OBJDIR)/erl_bif_chksum.o: beam/erl_bif_chksum.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h beam/big.h\n$(OBJDIR)/erl_bif_ddll.o: beam/erl_bif_ddll.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/erl_bif_unique.h\n$(OBJDIR)/erl_bif_guard.o: beam/erl_bif_guard.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/erl_binary.h \\\n beam/erl_map.h\n$(OBJDIR)/erl_bif_info.o: beam/erl_bif_info.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_nif.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_native_features_config.h \\\n beam/erl_nif_api_funcs.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h \\\n x86_64-apple-darwin14.5.0/erl_version.h \\\n $(TTF_DIR)/erl_compile_flags.h \\\n beam/erl_db_util.h beam/erl_binary.h beam/erl_db.h beam/erl_db_hash.h \\\n beam/erl_db_tree.h beam/erl_instrument.h beam/erl_mtrace.h beam/dist.h \\\n beam/erl_cpu_topology.h beam/erl_bif_unique.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h\n$(OBJDIR)/erl_bif_lists.o: beam/erl_bif_lists.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/erl_bif_op.o: beam/erl_bif_op.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/erl_binary.h \\\n beam/erl_map.h\n$(OBJDIR)/erl_bif_os.o: beam/erl_bif_os.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n x86_64-apple-darwin14.5.0/erl_version.h\n$(OBJDIR)/erl_bif_port.o: beam/erl_bif_port.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_process_dict.h beam/erl_hl_timer.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/module.h beam/register.h \\\n beam/erl_fun.h beam/benchmark.h beam/erl_debug.h beam/error.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/erl_binary.h \\\n beam/erl_db_util.h beam/packet_parser.h\n$(OBJDIR)/erl_bif_re.o: beam/erl_bif_re.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h beam/big.h \\\n pcre/pcre.h\n$(OBJDIR)/erl_bif_trace.o: beam/erl_bif_trace.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/beam_bp.h \\\n beam/erl_binary.h beam/erl_bif_unique.h\n$(OBJDIR)/erl_bif_unique.o: beam/erl_bif_unique.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/export.h beam/index.h beam/hash.h \\\n beam/code_ix.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_monitors.h \\\n beam/erl_port_task.h beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_bif_unique.h \\\n beam/big.h beam/global.h beam/atom.h beam/module.h beam/register.h \\\n beam/erl_fun.h beam/benchmark.h beam/erl_debug.h beam/error.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/erl_bits.o: beam/erl_bits.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/erl_binary.h\n$(OBJDIR)/erl_cpu_topology.o: beam/erl_cpu_topology.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_cpu_topology.h\n$(OBJDIR)/erl_db.o: beam/erl_db.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_db.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/big.h\n$(OBJDIR)/erl_db_hash.o: beam/erl_db_hash.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_db.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/big.h \\\n beam/erl_binary.h\n$(OBJDIR)/erl_db_tree.o: beam/erl_db_tree.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_db.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/big.h \\\n beam/erl_binary.h\n$(OBJDIR)/erl_db_util.o: beam/erl_db_util.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_db.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/big.h \\\n beam/erl_binary.h beam/erl_map.h\n$(OBJDIR)/erl_debug.o: beam/erl_debug.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/big.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/beam_catches.h beam/erl_map.h\n$(OBJDIR)/erl_drv_thread.o: beam/erl_drv_thread.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/erl_fun.o: beam/erl_fun.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/erl_gc.o: beam/erl_gc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h beam/erl_map.h hipe/hipe_process.h beam/erl_bits.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/erl_db.h beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h \\\n beam/beam_catches.h beam/erl_binary.h beam/big.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_mode_switch.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n beam/erl_bif_unique.h\n$(OBJDIR)/erl_goodfit_alloc.o: beam/erl_goodfit_alloc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/erl_goodfit_alloc.h\n$(OBJDIR)/erl_hl_timer.o: beam/erl_hl_timer.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_bif_unique.h \\\n beam/big.h beam/erl_rbtree.h\n$(OBJDIR)/erl_init.o: beam/erl_init.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/erl_db.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_db_util.h \\\n beam/erl_db_hash.h beam/erl_db_tree.h beam/beam_bp.h beam/erl_binary.h \\\n beam/dist.h beam/erl_instrument.h beam/erl_mtrace.h \\\n beam/erl_printf_term.h ../include/internal/erl_printf_format.h \\\n beam/packet_parser.h beam/erl_cpu_topology.h beam/erl_thr_queue.h \\\n beam/erl_bif_unique.h beam/big.h hipe/hipe_mode_switch.h \\\n hipe/hipe_stack.h hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n hipe/hipe_signal.h\n$(OBJDIR)/erl_instrument.o: beam/erl_instrument.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/big.h \\\n beam/erl_instrument.h beam/erl_mtrace.h\n$(OBJDIR)/erl_lock_check.o: beam/erl_lock_check.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h\n$(OBJDIR)/erl_lock_count.o: beam/erl_lock_count.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h\n$(OBJDIR)/erl_map.o: beam/erl_map.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h \\\n beam/erl_map.h\n$(OBJDIR)/erl_math.o: beam/erl_math.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h\n$(OBJDIR)/erl_md5.o: beam/erl_md5.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h\n$(OBJDIR)/erl_message.o: beam/erl_message.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_binary.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/erl_monitors.o: beam/erl_monitors.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_db.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/big.h\n$(OBJDIR)/erl_mtrace.o: beam/erl_mtrace.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_sock.h \\\n ../include/internal/erl_memory_trace_protocol.h beam/erl_mtrace.h\n$(OBJDIR)/erl_nif.o: beam/erl_nif.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_nif.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_native_features_config.h \\\n beam/erl_drv_nif.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_nif_api_funcs.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h beam/erl_process_dict.h \\\n beam/erl_hl_timer.h sys/common/erl_mseg.h sys/common/erl_mmap.h \\\n beam/erl_async.h beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/erl_binary.h beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/big.h beam/erl_map.h beam/beam_bp.h beam/erl_bif_unique.h\n$(OBJDIR)/erl_node_tables.o: beam/erl_node_tables.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/dist.h \\\n beam/big.h beam/erl_db.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_db_util.h \\\n beam/erl_db_hash.h beam/erl_db_tree.h\n$(OBJDIR)/erl_port_task.o: beam/erl_port_task.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/dist.h \\\n sys/common/erl_check_io.h\n$(OBJDIR)/erl_posix_str.o: beam/erl_posix_str.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n ../include/internal/erl_errno.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h\n$(OBJDIR)/erl_printf_term.o: beam/erl_printf_term.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_printf_term.h ../include/internal/erl_printf_format.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h beam/sys.h \\\n sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/big.h beam/erl_vm.h \\\n beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h beam/erl_drv_nif.h \\\n beam/erl_process_dict.h beam/erl_hl_timer.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/module.h beam/register.h \\\n beam/erl_fun.h beam/benchmark.h beam/erl_debug.h beam/error.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_map.h \\\n beam/erl_binary.h beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/erl_process.o: beam/erl_process.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_db.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/dist.h \\\n beam/beam_catches.h beam/erl_instrument.h beam/erl_mtrace.h \\\n beam/erl_binary.h beam/beam_bp.h beam/erl_cpu_topology.h \\\n beam/erl_thr_queue.h beam/erl_bif_unique.h beam/big.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n hipe/hipe_signal.h\n$(OBJDIR)/erl_process_dict.o: beam/erl_process_dict.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n x86_64-apple-darwin14.5.0/erl_version.h\n$(OBJDIR)/erl_process_dump.o: beam/erl_process_dump.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_db.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/dist.h \\\n beam/beam_catches.h beam/erl_binary.h\n$(OBJDIR)/erl_process_lock.o: beam/erl_process_lock.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_process.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_process_lock.h \\\n beam/erl_port.h beam/erl_vm.h beam/erl_message.h beam/external.h \\\n beam/erl_node_tables.h beam/hash.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_monitors.h \\\n beam/erl_port_task.h beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h\n$(OBJDIR)/erl_ptab.o: beam/erl_ptab.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_ptab.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_time.h beam/erl_utils.h \\\n beam/erl_thr_progress.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_alloc_util.h beam/erl_sched_spec_pre_alloc.h \\\n beam/erl_monitors.h beam/erl_process.h beam/erl_process_lock.h \\\n beam/erl_port.h beam/erl_vm.h beam/erl_message.h beam/external.h \\\n beam/erl_node_tables.h beam/hash.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/global.h beam/atom.h beam/module.h beam/register.h beam/erl_fun.h \\\n beam/benchmark.h beam/erl_debug.h beam/error.h beam/erl_trace.h \\\n beam/dtrace-wrapper.h beam/erl_binary.h\n$(OBJDIR)/erl_sched_spec_pre_alloc.o: beam/erl_sched_spec_pre_alloc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_process.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_process_lock.h \\\n beam/erl_port.h beam/erl_vm.h beam/erl_message.h beam/external.h \\\n beam/erl_node_tables.h beam/hash.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_monitors.h \\\n beam/erl_port_task.h beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h\n$(OBJDIR)/erl_term.o: beam/erl_term.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_map.h\n$(OBJDIR)/erl_thr_progress.o: beam/erl_thr_progress.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_thr_progress.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_vm.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h beam/hash.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_alloc_util.h beam/erl_sched_spec_pre_alloc.h \\\n beam/erl_monitors.h beam/erl_port_task.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h beam/global.h beam/atom.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/erl_thr_queue.o: beam/erl_thr_queue.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_thr_queue.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_vm.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h beam/hash.h \\\n beam/erl_monitors.h beam/erl_port_task.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h\n$(OBJDIR)/erl_time_sup.o: beam/erl_time_sup.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/big.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/erl_trace.o: beam/erl_trace.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/big.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h beam/dist.h \\\n beam/beam_bp.h beam/erl_binary.h\n$(OBJDIR)/erl_unicode.o: beam/erl_unicode.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h beam/big.h \\\n beam/erl_unicode.h beam/erl_unicode_normalize.h\n$(OBJDIR)/erl_zlib.o: beam/erl_zlib.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_zlib.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h\n$(OBJDIR)/export.o: beam/export.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/external.o: beam/external.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/dist.h \\\n beam/erl_binary.h beam/erl_zlib.h beam/erl_map.h\n$(OBJDIR)/hash.o: beam/hash.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/index.o: beam/index.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/io.o: beam/io.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_nif.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_native_features_config.h \\\n beam/erl_nif_api_funcs.h beam/erl_vm.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_process_dict.h beam/erl_hl_timer.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/module.h beam/register.h \\\n beam/erl_fun.h beam/benchmark.h beam/erl_debug.h beam/error.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h beam/dist.h beam/big.h \\\n beam/erl_binary.h beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n x86_64-apple-darwin14.5.0/erl_version.h beam/erl_map.h \\\n beam/erl_bif_unique.h\n$(OBJDIR)/module.o: beam/module.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/packet_parser.o: beam/packet_parser.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/packet_parser.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h\n$(OBJDIR)/register.o: beam/register.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/safe_hash.o: beam/safe_hash.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/safe_hash.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h\n$(OBJDIR)/time.o: beam/time.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/utils.o: beam/utils.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h beam/erl_map.h hipe/hipe_process.h beam/erl_bits.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/big.h beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n beam/erl_binary.h beam/packet_parser.h beam/erl_db.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h beam/dist.h \\\n beam/erl_thr_queue.h beam/beam_bp.h sys/common/erl_check_io.h \\\n beam/erl_bif_unique.h hipe/hipe_mode_switch.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h\n$(OBJDIR)/efile_drv.o: drivers/common/efile_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h drivers/common/erl_efile.h drivers/common/gzio.h \\\n beam/dtrace-wrapper.h\n$(OBJDIR)/gzio.o: drivers/common/gzio.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h drivers/common/erl_efile.h beam/sys.h \\\n sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h drivers/common/gzio_zutil.h \\\n beam/erl_zlib.h drivers/common/gzio.h\n$(OBJDIR)/inet_drv.o: drivers/common/inet_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/packet_parser.h beam/erl_sock.h\n$(OBJDIR)/ram_file_drv.o: drivers/common/ram_file_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h drivers/common/gzio.h\n$(OBJDIR)/zlib_drv.o: drivers/common/zlib_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h\n$(OBJDIR)/bin_drv.o: drivers/unix/bin_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h\n$(OBJDIR)/multi_drv.o: drivers/unix/multi_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h\n$(OBJDIR)/sig_drv.o: drivers/unix/sig_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h\n$(OBJDIR)/ttsl_drv.o: drivers/unix/ttsl_drv.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h\n$(OBJDIR)/unix_efile.o: drivers/unix/unix_efile.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h drivers/common/erl_efile.h\n$(OBJDIR)/erl_child_setup.o: sys/unix/erl_child_setup.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h\n$(OBJDIR)/erl_main.o: sys/unix/erl_main.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/erl_unix_sys_ddll.o: sys/unix/erl_unix_sys_ddll.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/sys.o: sys/unix/sys.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_thr_progress.h \\\n beam/erl_process.h beam/erl_process_lock.h beam/erl_port.h \\\n beam/erl_vm.h beam/erl_message.h beam/external.h \\\n beam/erl_node_tables.h beam/hash.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_alloc_util.h beam/erl_sched_spec_pre_alloc.h \\\n beam/erl_monitors.h beam/erl_port_task.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h beam/global.h beam/atom.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h sys/common/erl_check_io.h \\\n beam/erl_cpu_topology.h\n$(OBJDIR)/sys_float.o: sys/unix/sys_float.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/sys_time.o: sys/unix/sys_time.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n sys/common/erl_os_monotonic_time_extender.h\n$(OBJDIR)/erl_check_io.kp.o $(OBJDIR)/erl_check_io.nkp.o: sys/common/erl_check_io.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n sys/common/erl_check_io.h sys/common/erl_poll.h\n$(OBJDIR)/erl_mmap.o: sys/common/erl_mmap.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_vm.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h beam/hash.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_monitors.h \\\n beam/erl_port_task.h beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h beam/atom.h\n$(OBJDIR)/erl_mseg.o: sys/common/erl_mseg.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h sys/common/erl_mseg.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n sys/common/erl_mmap.h beam/global.h beam/erl_alloc.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n beam/erl_async.h beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/erl_mtrace.h beam/big.h sys/common/erl_util_queue.h\n$(OBJDIR)/erl_mtrace_sys_wrap.o: sys/common/erl_mtrace_sys_wrap.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_mtrace.h \\\n $(TTF_DIR)/erl_alloc_types.h\n$(OBJDIR)/erl_os_monotonic_time_extender.o: \\\n sys/common/erl_os_monotonic_time_extender.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n sys/common/erl_os_monotonic_time_extender.h beam/sys.h \\\n sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h\n$(OBJDIR)/erl_poll.kp.o $(OBJDIR)/erl_poll.nkp.o: sys/common/erl_poll.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n sys/common/erl_poll.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_thr_progress.h \\\n beam/erl_process.h beam/erl_process_lock.h beam/erl_port.h \\\n beam/erl_vm.h beam/erl_message.h beam/external.h \\\n beam/erl_node_tables.h beam/hash.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_alloc_util.h beam/erl_sched_spec_pre_alloc.h \\\n beam/erl_monitors.h beam/erl_port_task.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/export.h beam/index.h beam/code_ix.h \\\n beam/beam_load.h $(TTF_DIR)/beam_opcodes.h \\\n beam/erl_bits.h\n$(OBJDIR)/erl_sys_common_misc.o: sys/common/erl_sys_common_misc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(OBJDIR)/erl_atom_table.o: x86_64-apple-darwin14.5.0/erl_atom_table.c\n$(OBJDIR)/erl_bif_table.o: x86_64-apple-darwin14.5.0/erl_bif_table.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/export.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/index.h beam/hash.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/code_ix.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_vm.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h \\\n beam/erl_monitors.h beam/erl_port_task.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/erl_bif_wrap.o: x86_64-apple-darwin14.5.0/erl_bif_wrap.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/export.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/index.h beam/hash.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/code_ix.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_vm.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h \\\n beam/erl_monitors.h beam/erl_port_task.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/global.h beam/atom.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/erl_pbifs.o: x86_64-apple-darwin14.5.0/erl_pbifs.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/export.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/index.h beam/hash.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/code_ix.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_vm.h \\\n beam/erl_message.h beam/external.h beam/erl_node_tables.h \\\n beam/erl_monitors.h beam/erl_port_task.h beam/erl_sys_driver.h \\\n beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/global.h beam/atom.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/preload.o: x86_64-apple-darwin14.5.0/preload.c\n$(OBJDIR)/beam_opcodes.o: $(TTF_DIR)/beam_opcodes.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/export.h \\\n beam/index.h beam/hash.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/code_ix.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_monitors.h \\\n beam/erl_port_task.h beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_hl_timer.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h sys/common/erl_mseg.h \\\n sys/common/erl_mmap.h beam/erl_async.h beam/erl_gc.h \\\n hipe/hipe_process.h beam/erl_bits.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/driver_tab.o: $(TTF_DIR)/driver_tab.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h\n$(ZLIB_OBJDIR)/adler32.o: zlib/adler32.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zutil.h zlib/zlib.h zlib/zconf.h\n$(ZLIB_OBJDIR)/compress.o: zlib/compress.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zlib.h zlib/zconf.h\n$(ZLIB_OBJDIR)/crc32.o: zlib/crc32.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zutil.h zlib/zlib.h zlib/zconf.h zlib/crc32.h\n$(ZLIB_OBJDIR)/uncompr.o: zlib/uncompr.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zlib.h zlib/zconf.h\n$(ZLIB_OBJDIR)/deflate.o: zlib/deflate.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/deflate.h zlib/zutil.h zlib/zlib.h zlib/zconf.h\n$(ZLIB_OBJDIR)/trees.o: zlib/trees.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/deflate.h zlib/zutil.h zlib/zlib.h zlib/zconf.h zlib/trees.h\n$(ZLIB_OBJDIR)/zutil.o: zlib/zutil.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zutil.h zlib/zlib.h zlib/zconf.h zlib/gzguts.h\n$(ZLIB_OBJDIR)/inflate.o: zlib/inflate.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zutil.h zlib/zlib.h zlib/zconf.h zlib/inftrees.h zlib/inflate.h \\\n zlib/inffast.h zlib/inffixed.h\n$(ZLIB_OBJDIR)/inftrees.o: zlib/inftrees.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zutil.h zlib/zlib.h zlib/zconf.h zlib/inftrees.h\n$(ZLIB_OBJDIR)/inffast.o: zlib/inffast.c \\\n /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0/config.h \\\n zlib/zutil.h zlib/zlib.h zlib/zconf.h zlib/inftrees.h zlib/inflate.h \\\n zlib/inffast.h\n$(OBJDIR)/hipe_amd64.o: hipe/hipe_amd64.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_bif0.h \\\n hipe/hipe_native_bif.h x86_64-apple-darwin14.5.0/erl_bif_list.h \\\n $(TTF_DIR)/hipe_literals.h\n$(OBJDIR)/hipe_arm.o: hipe/hipe_arm.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_binary.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_native_bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_list.h hipe/hipe_bif0.h\n$(OBJDIR)/hipe_bif0.o: hipe/hipe_bif0.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/error.h beam/erl_vm.h \\\n beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/erl_db.h \\\n beam/erl_db_util.h beam/erl_db_hash.h beam/erl_db_tree.h \\\n beam/erl_binary.h hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_stack.h hipe/hipe_mode_switch.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n hipe/hipe_native_bif.h x86_64-apple-darwin14.5.0/erl_bif_list.h \\\n hipe/hipe_bif0.h $(TTF_DIR)/hipe_literals.h \\\n hipe/hipe_primops.h hipe/hipe_amd64_primops.h\n$(OBJDIR)/hipe_bif1.o: hipe/hipe_bif1.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h hipe/hipe_bif0.h \\\n hipe/hipe_bif1.h\n$(OBJDIR)/hipe_bif2.o: hipe/hipe_bif2.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/error.h beam/erl_vm.h \\\n beam/global.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h beam/erl_map.h \\\n hipe/hipe_debug.h hipe/hipe_mode_switch.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h\n$(OBJDIR)/hipe_bif64.o: hipe/hipe_bif64.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/big.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_bif0.h hipe/hipe_bif64.h\n$(OBJDIR)/hipe_debug.o: hipe/hipe_debug.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/beam_catches.h hipe/hipe_mode_switch.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n hipe/hipe_debug.h beam/erl_map.h\n$(OBJDIR)/hipe_gc.o: hipe/hipe_gc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h beam/erl_map.h hipe/hipe_process.h beam/erl_bits.h \\\n beam/module.h beam/register.h beam/erl_fun.h beam/benchmark.h \\\n beam/erl_debug.h beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n hipe/hipe_stack.h hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_gc.h hipe/hipe_amd64_gc.h \\\n $(TTF_DIR)/hipe_amd64_asm.h hipe/hipe_x86_gc.h \\\n $(TTF_DIR)/hipe_x86_asm.h\n$(OBJDIR)/hipe_mkliterals.o: hipe/hipe_mkliterals.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n $(TTF_DIR)/hipe_arm_asm.h \\\n $(TTF_DIR)/hipe_ppc_asm.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n $(TTF_DIR)/hipe_x86_asm.h \\\n $(TTF_DIR)/hipe_sparc_asm.h beam/erl_binary.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h\n$(OBJDIR)/hipe_mode_switch.o: hipe/hipe_mode_switch.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h \\\n beam/beam_catches.h hipe/hipe_mode_switch.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h hipe/hipe_bif0.h\n$(OBJDIR)/hipe_native_bif.o: hipe/hipe_native_bif.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_vm.h beam/global.h \\\n beam/erl_alloc.h $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_node_container_utils.h \\\n beam/erl_ptab.h beam/erl_time.h beam/erl_utils.h beam/erl_monitors.h \\\n beam/hash.h beam/index.h beam/atom.h \\\n x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h beam/erl_binary.h \\\n hipe/hipe_mode_switch.h hipe/hipe_stack.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_amd64_glue.h \\\n $(TTF_DIR)/hipe_amd64_asm.h \\\n hipe/hipe_x86_glue.h $(TTF_DIR)/hipe_x86_asm.h \\\n hipe/hipe_native_bif.h x86_64-apple-darwin14.5.0/erl_bif_list.h\n$(OBJDIR)/hipe_ppc.o: hipe/hipe_ppc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/erl_binary.h \\\n beam/bif.h x86_64-apple-darwin14.5.0/erl_bif_table.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_native_bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_list.h hipe/hipe_bif0.h\n$(OBJDIR)/hipe_risc_stack.o: hipe/hipe_risc_stack.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h\n$(OBJDIR)/hipe_sparc.o: hipe/hipe_sparc.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_native_bif.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n x86_64-apple-darwin14.5.0/erl_bif_list.h\n$(OBJDIR)/hipe_stack.o: hipe/hipe_stack.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h\n$(OBJDIR)/hipe_x86.o: hipe/hipe_x86.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h hipe/hipe_arch.h \\\n hipe/hipe_amd64.h hipe/hipe_x86.h hipe/hipe_native_bif.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h \\\n x86_64-apple-darwin14.5.0/erl_bif_list.h \\\n $(TTF_DIR)/hipe_literals.h\n$(OBJDIR)/hipe_x86_signal.o: hipe/hipe_x86_signal.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/sys.h sys/unix/erl_unix_sys.h ../include/internal/erl_errno.h \\\n ../include/internal/erl_misc_utils.h beam/erl_lock_check.h \\\n beam/erl_smp.h beam/erl_threads.h ../include/internal/ethread.h \\\n ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h hipe/hipe_signal.h\n$(OBJDIR)/hipe_x86_stack.o: hipe/hipe_x86_stack.c \\\n $(ERL_TOP)/erts/x86_64-apple-darwin14.5.0/config.h \\\n beam/global.h beam/sys.h sys/unix/erl_unix_sys.h \\\n ../include/internal/erl_errno.h ../include/internal/erl_misc_utils.h \\\n beam/erl_lock_check.h beam/erl_smp.h beam/erl_threads.h \\\n ../include/internal/ethread.h ../include/internal/ethread_inline.h \\\n ../include/internal/x86_64/ethread.h \\\n ../include/internal/x86_64/../i386/ethread.h \\\n ../include/internal/i386/ethr_membar.h \\\n ../include/internal/i386/atomic.h \\\n ../include/internal/i386/ethr_dw_atomic.h \\\n ../include/internal/i386/spinlock.h ../include/internal/i386/rwlock.h \\\n ../include/internal/gcc/ethread.h \\\n ../include/internal/gcc/ethr_dw_atomic.h \\\n ../include/internal/libatomic_ops/ethread.h \\\n ../include/internal/ethr_atomics.h \\\n ../include/internal/ethr_optimized_fallbacks.h \\\n ../include/internal/pthread/ethr_event.h \\\n ../include/internal/ethr_mutex.h beam/erl_lock_count.h beam/erl_term.h \\\n ../include/internal/erl_printf.h beam/erl_alloc.h \\\n $(TTF_DIR)/erl_alloc_types.h \\\n beam/erl_thr_progress.h beam/erl_alloc_util.h \\\n beam/erl_sched_spec_pre_alloc.h beam/erl_vm.h \\\n beam/erl_node_container_utils.h beam/erl_ptab.h beam/erl_time.h \\\n beam/erl_utils.h beam/erl_monitors.h beam/hash.h beam/index.h \\\n beam/atom.h x86_64-apple-darwin14.5.0/erl_atom_table.h beam/code_ix.h \\\n beam/export.h beam/beam_load.h \\\n $(TTF_DIR)/beam_opcodes.h beam/erl_process.h \\\n beam/erl_process_lock.h beam/erl_port.h beam/erl_message.h \\\n beam/external.h beam/erl_node_tables.h beam/erl_port_task.h \\\n beam/erl_sys_driver.h beam/erl_driver.h \\\n ../include/x86_64-apple-darwin14.5.0/erl_int_sizes_config.h \\\n beam/erl_drv_nif.h beam/erl_process_dict.h beam/erl_hl_timer.h \\\n sys/common/erl_mseg.h sys/common/erl_mmap.h beam/erl_async.h \\\n beam/erl_gc.h hipe/hipe_process.h beam/erl_bits.h beam/module.h \\\n beam/register.h beam/erl_fun.h beam/benchmark.h beam/erl_debug.h \\\n beam/error.h beam/erl_trace.h beam/dtrace-wrapper.h beam/bif.h \\\n x86_64-apple-darwin14.5.0/erl_bif_table.h hipe/hipe_stack.h \\\n hipe/hipe_arch.h hipe/hipe_amd64.h hipe/hipe_x86.h \\\n $(TTF_DIR)/hipe_amd64_asm.h\n"
},
{
"alpha_fraction": 0.6169491410255432,
"alphanum_fraction": 0.6237288117408752,
"avg_line_length": 20.071428298950195,
"blob_id": "cbd54bd1b1363f7853400172e55298f60065129d",
"content_id": "8459b909ba20c99c786b7dc4f0ad2e270f933158",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 14,
"path": "/bin/cd2iso",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nDATE=`date +%s`\nDISK=`drutil status | grep disk | awk -F/ '{ print $3 }'`\nif [[ ! -z \"$DISK\" ]]; then\n\techo \"Found CD at $DISK.\"\n\techo \"Unmounting $DISK.\"\n\tdiskutil unmountDisk $DISK\n\techo \"Creating ISO from $DISK.\"\n\tdd if=/dev/$DISK of=$DATE.iso\nelse\n\techo \"NO CD FOUND.\"\n\texit 1\nfi\n"
},
{
"alpha_fraction": 0.5485991835594177,
"alphanum_fraction": 0.5689270496368408,
"avg_line_length": 35.586971282958984,
"blob_id": "afc32a364e2d105433dfa11634f9c058c907c455",
"content_id": "7b3852e164c51e5a6bc9b8fdce74fc64addeff48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 51112,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 1397,
"path": "/bin/pdf2oo",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# This file is part of the \"pdf2oo\" library.\n#\n# Pdf2oo is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation (version 2 of the License).\n# \n# Pdf2oo is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with pdf2oo. See the file LICENSE. If you haven't received\n# a copy of the GNU General Public License, write to:\n# \n# Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA\n# 02111-1307 USA\n#\n# Author: Vincenzo Ciancia\n#\n# [email protected]\n# [email protected]\n\nresolution=\"150\"\ntransparent=\"false\"\ncolor=\"false\"\nimgtype=\"jpg\"\ntype=\"odp\"\nportrait=\"portrait\"\ninteractive=\"false\"\n\nusage(){\n cat<<EOF\nusage: $(basename \"$0\") [-batch|-interactive] [-color|-gray] [-r resolution] [-transparent|-opaque] input_file [output_file]\n\nResolution is in dpi. Defaults are 150 dpi, grayscale, not transparent, interactive.\n\nEOF\n exit 0\n}\n\ncheck_requirements(){\n local MISSING=\"COMMAND | SUSE PACKAGE | DEBIAN PACKAGE\n---------+--------------+---------------\"\n local failed=false\n local file\n which pdfinfo > /dev/null || { MISSING=\"$MISSING\npdfinfo | xpdf-config | xpdf-utils\"; \n\tfailed=true; }\n\n which pdftoppm > /dev/null || { MISSING=\"$MISSING\npdftoppm | xpdf | xpdf-reader\";\n\tfailed=true; }\n\n which convert > /dev/null || { MISSING=\"$MISSING\nconvert | ImageMagick | imagemagick\"; \n\tfailed=true; }\n\n which zip > /dev/null || { MISSING=\"$MISSING\nzip | zip | zip\"; \n\tfailed=true; }\n\n if [ \"$failed\" = \"true\" ]\n\tthen file=$(mktemp)\n\tcat << EOF > $file\nYou need the following commands to run $(basename \"$0\"):\n\n$MISSING\n\nInstall them using your distribution's package manager, then retry.\nEOF\n\twhich kdialog > /dev/null && kdialog --msgbox \"$(cat $file)\" || cat $file\n\trm $file\t\n\texit 1\n fi\n}\n\ncheck_ui_requirements(){\n local MISSING=\"COMMAND | SUSE PACKAGE | DEBIAN PACKAGE\n---------+--------------+---------------\"\n local failed=false\n\n which kdialog > /dev/null || { MISSING=\"$MISSING\nkdialog | kdebase3 | kdebase-bin\"; \n\tfailed=true; }\n\n which kmdr-executor > /dev/null || { MISSING=\"$MISSING\nkmdr-executor | kdewebdev3 | kommander\"; \n\tfailed=true; }\n\n which dcop > /dev/null || { MISSING=\"$MISSING\ndcop | kdelibs3 | kdelibs-bin\"; \n\tfailed=true; }\n\n if [ \"$failed\" = \"true\" ]\n\tthen\n\tfile=$(mktemp)\n\tcat <<EOF > $file\nYou need the following commands to run $(basename \"$0\") in _graphical_ mode:\n\n$MISSING\n\nInstall them using your distribution's package manager, then retry. You can still use batch mode (-batch command line option) in a terminal window. \nEOF\n\tif tty -s; then\n\t cat $file\n\t rm $file\n\t echo \n\t echo Proceeding interactively since we are in an interactive shell\n\t echo\n\t echo\n\t interactive=false\n\telse\n\t which kdialog > /dev/null && kdialog --msgbox \"$(cat $file)\" || cat $file\n\t rm $file\n\t exit 1\t \n\tfi\t\n fi\n}\n\nexit=false\n\nwhile [ \"${1:0:1}\" == \"-\" ]\n do case \"$1\" in\n \"-transparent\")\n\t transparent=\"true\"\n\t ;;\n \"-opaque\")\n\t transparent=\"false\"\n\t ;;\n \"-color\")\t \n\t color=\"true\"\n\t ;;\n \"-gray\")\n\t color=\"false\"\n\t ;;\n \"-r\")\n\tshift\n\tresolution=\"$1\"\n\t;;\n \"-batch\")\n interactive=\"false\"\n\t;;\n \"-interactive\")\n\tinteractive=\"true\"\n\t;;\n *)\n\tusage\n\t;;\n esac\n shift\ndone\n\nif [ \"$DISPLAY\" = \"\" ]\n then interactive=\"false\"\nfi\n\ncheck_requirements\n\nif [ \"$interactive\" = \"true\" ]\n then check_ui_requirements\nfi\n\nif [ \"$1\" = \"\" -a \"$interactive\" = \"false\" ]\n then usage\nfi\n\nmy_exit(){\n if [ -d \"$destdir\" ]\n then rm -rf \"$destdir\"\n fi\n if [ \"$pbar\" != \"\" ]\n\tthen dcop \"$pbar\" close\n fi\n if [ \"$interactive\" = \"true\" ]\n\tthen kdialog --error \"Error while $phase$1\"\n else\n echo \"Error while $phase$1\"\n fi\n exit 1\n}\n\nnormalize(){\n local PATHNAME=\"$1\"\n local DESTPATHNAME\n if (echo \"$PATHNAME\" | grep -q \"^file://\")\n\tthen PATHNAME=\"${PATHNAME:7}\"\n fi\n if [ \"${PATHNAME:0:1}\" = \"/\" ]\n\tthen DESTPATHNAME=\"$PATHNAME\"\n else DESTPATHNAME=\"$(pwd)/$PATHNAME\"\n fi\n echo \"$DESTPATHNAME\"\n}\n\nphase=\"accessing source file\"\n\nif [ \"$1\" = \"\" ]\n then \n if [ \"$interactive\" = \"false\" ]\n\tthen my_exit\n fi\nelse\n srcfile=$(normalize \"$1\")\n if [ \"$interactive\" = \"false\" ]\n\tthen \n\tif ! [ -f \"$srcfile\" ]\n\t then my_exit \": $srcfile not found\"\n\tfi\n fi\nfi\n\nif [ \"$2\" != \"\" ]\n then destfile=\"$2\" \nelif [ \"$srcfile\" != \"\" ]\n then destfile=$(dirname \"$srcfile\")/$(basename \"$srcfile\" .pdf).$type\nfi\n\nphase=\"initializing values\"\n\nif [ \"$interactive\" = \"true\" ] \n then export srcfile destfile resolution color transparent\n cat > ~/.pdf2oo_dialog.kmdr << ____END____\n<!DOCTYPE UI><UI version=\"3.0\" stdsetdef=\"1\">\n<class>pdf2oo</class>\n<widget class=\"Dialog\">\n <property name=\"name\">\n <cstring>pdf2oo</cstring>\n </property>\n <property name=\"geometry\">\n <rect>\n <x>0</x>\n <y>0</y>\n <width>383</width>\n <height>270</height>\n </rect>\n </property>\n <property name=\"sizePolicy\">\n <sizepolicy>\n <hsizetype>5</hsizetype>\n <vsizetype>5</vsizetype>\n <horstretch>0</horstretch>\n <verstretch>0</verstretch>\n </sizepolicy>\n </property>\n <property name=\"caption\">\n <string>pdf2oo</string>\n </property>\n <property name=\"focusPolicy\">\n <enum>NoFocus</enum>\n </property>\n <property name=\"acceptDrops\">\n <bool>true</bool>\n </property>\n <property name=\"populationText\">\n <string></string>\n </property>\n <property name=\"associations\" stdset=\"0\">\n <stringlist>\n <string></string>\n <string>@infile.setText(@env(srcfile))\[email protected](@env(destfile))\[email protected](@env(resolution))\[email protected](@env(transparent))\[email protected](@env(color))\n\n\n</string>\n <string></string>\n </stringlist>\n </property>\n <vbox>\n <property name=\"name\">\n <cstring>unnamed</cstring>\n </property>\n <property name=\"margin\">\n <number>11</number>\n </property>\n <property name=\"spacing\">\n <number>6</number>\n </property>\n <widget class=\"QLayoutWidget\">\n <property name=\"name\">\n <cstring>Layout5</cstring>\n </property>\n <grid>\n <property name=\"name\">\n <cstring>unnamed</cstring>\n </property>\n <property name=\"margin\">\n <number>0</number>\n </property>\n <property name=\"spacing\">\n <number>6</number>\n </property>\n <widget class=\"Label\" row=\"2\" column=\"0\">\n <property name=\"name\">\n <cstring>Label1_2</cstring>\n </property>\n <property name=\"text\">\n <string>Output:</string>\n </property>\n </widget>\n <spacer row=\"1\" column=\"0\">\n <property name=\"name\">\n <cstring>Spacer7_2</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Horizontal</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Expanding</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>51</width>\n <height>0</height>\n </size>\n </property>\n </spacer>\n <widget class=\"FileSelector\" row=\"2\" column=\"1\" rowspan=\"1\" colspan=\"2\">\n <property name=\"name\">\n <cstring>outfile</cstring>\n </property>\n <property name=\"sizePolicy\">\n <sizepolicy>\n <hsizetype>7</hsizetype>\n <vsizetype>0</vsizetype>\n <horstretch>58</horstretch>\n <verstretch>0</verstretch>\n </sizepolicy>\n </property>\n <property name=\"acceptDrops\">\n <bool>true</bool>\n </property>\n <property name=\"populationText\">\n <string></string>\n </property>\n <property name=\"associations\" stdset=\"0\">\n <stringlist>\n <string></string>\n </stringlist>\n </property>\n <property name=\"selectionFilter\">\n <string>*.odp</string>\n </property>\n <property name=\"selectionType\">\n <enum>Save</enum>\n </property>\n </widget>\n <widget class=\"ExecButton\" row=\"1\" column=\"1\">\n <property name=\"name\">\n <cstring>ExecButton1</cstring>\n </property>\n <property name=\"text\">\n <string>Suggest output file name from &input</string>\n </property>\n <property name=\"associations\" stdset=\"0\">\n <stringlist>\n <string>@outfile.setText(@infile.text.odp)</string>\n </stringlist>\n </property>\n </widget>\n <widget class=\"FileSelector\" row=\"0\" column=\"1\" rowspan=\"1\" colspan=\"2\">\n <property name=\"name\">\n <cstring>infile</cstring>\n </property>\n <property name=\"sizePolicy\">\n <sizepolicy>\n <hsizetype>7</hsizetype>\n <vsizetype>0</vsizetype>\n <horstretch>58</horstretch>\n <verstretch>0</verstretch>\n </sizepolicy>\n </property>\n <property name=\"acceptDrops\">\n <bool>true</bool>\n </property>\n <property name=\"populationText\">\n <string></string>\n </property>\n <property name=\"associations\" stdset=\"0\">\n <stringlist>\n <string></string>\n </stringlist>\n </property>\n <property name=\"selectionFilter\">\n <string>*.pdf</string>\n </property>\n </widget>\n <spacer row=\"1\" column=\"2\">\n <property name=\"name\">\n <cstring>Spacer8</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Horizontal</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Expanding</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>91</width>\n <height>0</height>\n </size>\n </property>\n </spacer>\n <widget class=\"Label\" row=\"0\" column=\"0\">\n <property name=\"name\">\n <cstring>Label1</cstring>\n </property>\n <property name=\"text\">\n <string>Input:</string>\n </property>\n </widget>\n </grid>\n </widget>\n <widget class=\"QLayoutWidget\">\n <property name=\"name\">\n <cstring>Layout14</cstring>\n </property>\n <hbox>\n <property name=\"name\">\n <cstring>unnamed</cstring>\n </property>\n <property name=\"margin\">\n <number>0</number>\n </property>\n <property name=\"spacing\">\n <number>6</number>\n </property>\n <widget class=\"ButtonGroup\">\n <property name=\"name\">\n <cstring>ButtonGroup1</cstring>\n </property>\n <property name=\"title\">\n <string>Color</string>\n </property>\n <property name=\"selectedId\" stdset=\"0\">\n <number>1</number>\n </property>\n <vbox>\n <property name=\"name\">\n <cstring>unnamed</cstring>\n </property>\n <property name=\"margin\">\n <number>11</number>\n </property>\n <property name=\"spacing\">\n <number>6</number>\n </property>\n <widget class=\"RadioButton\">\n <property name=\"name\">\n <cstring>color</cstring>\n </property>\n <property name=\"text\">\n <string>&Color</string>\n </property>\n </widget>\n <widget class=\"RadioButton\">\n <property name=\"name\">\n <cstring>grayscale</cstring>\n </property>\n <property name=\"text\">\n <string>&Grayscale</string>\n </property>\n <property name=\"checked\">\n <bool>true</bool>\n </property>\n </widget>\n </vbox>\n </widget>\n <spacer>\n <property name=\"name\">\n <cstring>Spacer2</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Horizontal</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Fixed</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>16</width>\n <height>16</height>\n </size>\n </property>\n </spacer>\n <widget class=\"ButtonGroup\">\n <property name=\"name\">\n <cstring>ButtonGroup1_2</cstring>\n </property>\n <property name=\"title\">\n <string>Transparency</string>\n </property>\n <property name=\"selectedId\" stdset=\"0\">\n <number>1</number>\n </property>\n <vbox>\n <property name=\"name\">\n <cstring>unnamed</cstring>\n </property>\n <property name=\"margin\">\n <number>11</number>\n </property>\n <property name=\"spacing\">\n <number>6</number>\n </property>\n <widget class=\"RadioButton\">\n <property name=\"name\">\n <cstring>transparent</cstring>\n </property>\n <property name=\"text\">\n <string>Trans&parent</string>\n </property>\n </widget>\n <widget class=\"RadioButton\">\n <property name=\"name\">\n <cstring>opaque</cstring>\n </property>\n <property name=\"text\">\n <string>Op&aque</string>\n </property>\n <property name=\"checked\">\n <bool>true</bool>\n </property>\n </widget>\n </vbox>\n </widget>\n <spacer>\n <property name=\"name\">\n <cstring>Spacer3</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Horizontal</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Expanding</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>16</width>\n <height>0</height>\n </size>\n </property>\n </spacer>\n </hbox>\n </widget>\n <widget class=\"QLayoutWidget\">\n <property name=\"name\">\n <cstring>Layout13</cstring>\n </property>\n <hbox>\n <property name=\"name\">\n <cstring>unnamed</cstring>\n </property>\n <property name=\"margin\">\n <number>0</number>\n </property>\n <property name=\"spacing\">\n <number>6</number>\n </property>\n <widget class=\"Label\">\n <property name=\"name\">\n <cstring>Label2</cstring>\n </property>\n <property name=\"sizePolicy\">\n <sizepolicy>\n <hsizetype>1</hsizetype>\n <vsizetype>5</vsizetype>\n <horstretch>0</horstretch>\n <verstretch>0</verstretch>\n </sizepolicy>\n </property>\n <property name=\"text\">\n <string>Resolution:</string>\n </property>\n </widget>\n <widget class=\"SpinBoxInt\">\n <property name=\"name\">\n <cstring>resolution</cstring>\n </property>\n <property name=\"sizePolicy\">\n <sizepolicy>\n <hsizetype>1</hsizetype>\n <vsizetype>0</vsizetype>\n <horstretch>0</horstretch>\n <verstretch>0</verstretch>\n </sizepolicy>\n </property>\n <property name=\"maxValue\">\n <number>1200</number>\n </property>\n <property name=\"minValue\">\n <number>50</number>\n </property>\n <property name=\"lineStep\">\n <number>50</number>\n </property>\n </widget>\n <spacer>\n <property name=\"name\">\n <cstring>Spacer9</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Horizontal</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Expanding</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>61</width>\n <height>0</height>\n </size>\n </property>\n </spacer>\n </hbox>\n </widget>\n <spacer>\n <property name=\"name\">\n <cstring>Spacer10</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Vertical</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Expanding</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>0</width>\n <height>60</height>\n </size>\n </property>\n </spacer>\n <widget class=\"QLayoutWidget\">\n <property name=\"name\">\n <cstring>Layout11</cstring>\n </property>\n <hbox>\n <property name=\"name\">\n <cstring>unnamed</cstring>\n </property>\n <property name=\"margin\">\n <number>0</number>\n </property>\n <property name=\"spacing\">\n <number>0</number>\n </property>\n <widget class=\"CloseButton\">\n <property name=\"name\">\n <cstring>CloseButton3</cstring>\n </property>\n <property name=\"minimumSize\">\n <size>\n <width>60</width>\n <height>0</height>\n </size>\n </property>\n <property name=\"maximumSize\">\n <size>\n <width>32767</width>\n <height>30</height>\n </size>\n </property>\n <property name=\"text\">\n <string>o&k</string>\n </property>\n <property name=\"default\">\n <bool>true</bool>\n </property>\n <property name=\"associations\" stdset=\"0\">\n <stringlist>\n <string>echo OK\necho \\\"@infile.text\necho \\\"@outfile.text\necho @resolution.text\necho @color.checked\necho @transparent.checked\necho @dcopid\n</string>\n </stringlist>\n </property>\n </widget>\n <spacer>\n <property name=\"name\">\n <cstring>Spacer7</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Horizontal</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Fixed</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>41</width>\n <height>30</height>\n </size>\n </property>\n </spacer>\n <widget class=\"CloseButton\">\n <property name=\"name\">\n <cstring>CloseButton2</cstring>\n </property>\n <property name=\"minimumSize\">\n <size>\n <width>60</width>\n <height>0</height>\n </size>\n </property>\n <property name=\"maximumSize\">\n <size>\n <width>32767</width>\n <height>30</height>\n </size>\n </property>\n <property name=\"text\">\n <string>cance&l</string>\n </property>\n <property name=\"associations\" stdset=\"0\">\n <stringlist>\n <string>echo CANCEL</string>\n </stringlist>\n </property>\n </widget>\n <spacer>\n <property name=\"name\">\n <cstring>Spacer8_2</cstring>\n </property>\n <property name=\"orientation\">\n <enum>Horizontal</enum>\n </property>\n <property name=\"sizeType\">\n <enum>Expanding</enum>\n </property>\n <property name=\"sizeHint\">\n <size>\n <width>61</width>\n <height>0</height>\n </size>\n </property>\n </spacer>\n </hbox>\n </widget>\n </vbox>\n</widget>\n<connections>\n <connection>\n <sender>CloseButton2</sender>\n <signal>clicked()</signal>\n <receiver>pdf2oo</receiver>\n <slot>reject()</slot>\n </connection>\n</connections>\n<layoutdefaults spacing=\"6\" margin=\"11\"/>\n</UI>\n\n____END____\n oldifs=$IFS\n IFS=\"\n\"\n result=($(kmdr-executor ~/.pdf2oo_dialog.kmdr)) \n rm ~/.pdf2oo_dialog.kmdr\n IFS=$oldifs\n if [ \"${result[0]}\" != \"OK\" ]\n\tthen exit 0\n else\n\tsrcfile=\"${result[1]}\"\n\tsrcfile=$(normalize \"${srcfile:1}\")\n\tdestfile=\"${result[2]}\"\n\tdestfile=$(normalize \"${destfile:1}\")\n\tresolution=\"${result[3]}\"\n\tcolor=\"${result[4]}\"\n\ttransparent=\"${result[5]}\"\n\tif [ \"$color\" = \"1\" ]\n\t then color=\"true\"\n\t else color=\"false\"\n\tfi\n\tif [ \"$transparent\" = \"1\" ]\n\t then transparent=\"true\"\n\t else transparent=\"false\"\n\tfi\n fi\nfi\n\n############# choosing parameters based on user options\n\ncolor_opts=\"\"\nppm_opts=\"\"\n\ncase \"$transparent,$color\" in\n \"false,false\")\n\tcolor_opts=\"-type Grayscale\"\n\t;;\n \"false,true\")\n\t;;\n \"true,false\")\n\tcolor_opts=\"-type Grayscale -transparent white\"\n\tppm_opts=\"-aa no\"\n\t;;\n \"true,true\")\n\tcolor_opts=\"-transparent white\"\n\tppm_opts=\"-aa no\"\n\t;;\nesac\n\nphase=\"opening source file\"\n\nif ! [ -f \"$srcfile\" ]\n then my_exit \": file not existent\"\nfi\n\nphase=\"checking destination file\"\n\nif [ \"$destfile\" = \"\" ]\n then my_exit \": no destination file given\"\nfi\n\ndestfile=$(normalize \"$destfile\") || my_exit\n\ndirdestfile=$(dirname \"$destfile\")\nif ! [ -d \"$dirdestfile\" ]\n then my_exit \": destination directory not existent\"\nfi\n\nsize=($(pdfinfo \"$srcfile\"|grep \"Page size:\" |sed 's@[^0-9]*\\([0-9.]*\\)[^0-9]*\\([0-9.]*\\).*@\\1pt \\2pt@' || my_exit))\nheight=${size[1]}\nwidth=${size[0]}\n\nphase=\"creating temporary directory structure\"\n\ndestdir=\"$(mktemp -d)\" || my_exit\npushd \"$destdir\" > /dev/null || my_exit \n\nmkdir document || my_exit \nmkdir document/META-INF || my_exit\nmkdir document/Pictures || my_exit\n\nphase=\"converting pdf file into images\"\n\nif [ \"$interactive\" = \"true\" ]\n then pbar=$(kdialog --title \"Progress\" --progressbar \"Creating images...\")\nfi\n\npushd document/Pictures > /dev/null || my_exit\npages=$(pdfinfo \"$srcfile\"|grep \"Pages:\" | cut -b 17-) || my_exit\nif [ \"$interactive\" = \"true\" ]\n then dcop \"$pbar\" setTotalSteps $pages\nfi\nfor page in $(seq 1 $pages)\n do pdftoppm $ppm_opts -r $resolution -f $page -l $page \"$srcfile\" pg || my_exit\n convert +dither +antialias -density $resolution $color_opts *.ppm \"$(basename \"$srcfile\" .pdf)-$page.$imgtype\" || my_exit\n rm -f *.ppm\n if [ \"$interactive\" = \"true\" ]\n\t then dcop \"$pbar\" setProgress $(($(dcop \"$pbar\" progress)+1))\n fi\ndone\npopd > /dev/null\n\nif [ \"$interactive\" = \"true\" ]\n then dcop \"$pbar\" setLabel \"Creating output file...\"\n dcop \"$pbar\" setTotalSteps 1\n dcop \"$pbar\" setProgress 0\nfi\n\nphase=\"creating document templates\"\n\ngenodp(){\necho \"application/vnd.oasis.opendocument.presentation\" > document/mimetype\n\ncat > document/styles.xml << ____END____\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<office:document-styles xmlns:office=\"urn:oasis:names:tc:opendocument:xmlns:office:1.0\"\nxmlns:style=\"urn:oasis:names:tc:opendocument:xmlns:style:1.0\"\nxmlns:text=\"urn:oasis:names:tc:opendocument:xmlns:text:1.0\"\nxmlns:table=\"urn:oasis:names:tc:opendocument:xmlns:table:1.0\"\nxmlns:draw=\"urn:oasis:names:tc:opendocument:xmlns:drawing:1.0\"\nxmlns:fo=\"urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0\"\nxmlns:xlink=\"http://www.w3.org/1999/xlink\"\nxmlns:dc=\"http://purl.org/dc/elements/1.1/\"\nxmlns:meta=\"urn:oasis:names:tc:opendocument:xmlns:meta:1.0\"\nxmlns:number=\"urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0\"\nxmlns:presentation=\"urn:oasis:names:tc:opendocument:xmlns:presentation:1.0\"\nxmlns:svg=\"urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0\"\nxmlns:chart=\"urn:oasis:names:tc:opendocument:xmlns:chart:1.0\"\nxmlns:dr3d=\"urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0\"\nxmlns:math=\"http://www.w3.org/1998/Math/MathML\"\nxmlns:form=\"urn:oasis:names:tc:opendocument:xmlns:form:1.0\"\nxmlns:script=\"urn:oasis:names:tc:opendocument:xmlns:script:1.0\"\nxmlns:ooo=\"http://openoffice.org/2004/office\"\nxmlns:ooow=\"http://openoffice.org/2004/writer\"\nxmlns:oooc=\"http://openoffice.org/2004/calc\"\nxmlns:dom=\"http://www.w3.org/2001/xml-events\"\nxmlns:smil=\"urn:oasis:names:tc:opendocument:xmlns:smil-compatible:1.0\"\nxmlns:anim=\"urn:oasis:names:tc:opendocument:xmlns:animation:1.0\"\noffice:version=\"1.0\">\n<office:styles>\n<draw:marker draw:name=\"Arrow\" svg:viewBox=\"0 0 20 30\"\nsvg:d=\"m10 0-10 30h20z\" />\n<style:default-style style:family=\"graphic\">\n<style:paragraph-properties style:text-autospace=\"ideograph-alpha\"\nstyle:punctuation-wrap=\"simple\" style:line-break=\"strict\"\nstyle:writing-mode=\"lr-tb\"\nstyle:font-independent-line-spacing=\"false\">\n<style:tab-stops />\n</style:paragraph-properties>\n<style:text-properties style:use-window-font-color=\"true\"\nfo:font-family=\"'Thorndale AMT'\" style:font-family-generic=\"roman\"\nstyle:font-pitch=\"variable\" fo:font-size=\"24pt\" fo:language=\"it\"\nfo:country=\"IT\" style:font-family-asian=\"'Albany AMT'\"\nstyle:font-pitch-asian=\"variable\" style:font-size-asian=\"24pt\"\nstyle:language-asian=\"none\" style:country-asian=\"none\"\nstyle:font-family-complex=\"Lucidasans\"\nstyle:font-pitch-complex=\"variable\" style:font-size-complex=\"24pt\"\nstyle:language-complex=\"none\" style:country-complex=\"none\" />\n</style:default-style>\n<style:style style:name=\"standard\" style:family=\"graphic\">\n<style:graphic-properties draw:stroke=\"solid\"\nsvg:stroke-width=\"0cm\" svg:stroke-color=\"#000000\"\ndraw:marker-start-width=\"0.3cm\" draw:marker-start-center=\"false\"\ndraw:marker-end-width=\"0.3cm\" draw:marker-end-center=\"false\"\ndraw:fill=\"solid\" draw:fill-color=\"#99ccff\"\nfo:padding-top=\"0.125cm\" fo:padding-bottom=\"0.125cm\"\nfo:padding-left=\"0.25cm\" fo:padding-right=\"0.25cm\"\ndraw:shadow=\"hidden\" draw:shadow-offset-x=\"0.3cm\"\ndraw:shadow-offset-y=\"0.3cm\" draw:shadow-color=\"#808080\">\n<text:list-style>\n<text:list-level-style-bullet text:level=\"1\"\ntext:bullet-char=\"â—\">\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"2\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"0.6cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"3\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"1.2cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"4\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"1.8cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"5\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"2.4cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"6\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"3cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"7\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"3.6cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"8\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"4.2cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"9\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"4.8cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"10\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"5.4cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n</text:list-style>\n</style:graphic-properties>\n<style:paragraph-properties fo:margin-left=\"0cm\"\nfo:margin-right=\"0cm\" fo:margin-top=\"0cm\" fo:margin-bottom=\"0cm\"\nfo:line-height=\"100%\" text:enable-numbering=\"false\"\nfo:text-indent=\"0cm\" />\n<style:text-properties style:use-window-font-color=\"true\"\nstyle:text-outline=\"false\" style:text-line-through-style=\"none\"\nfo:font-family=\"'Albany AMT'\" style:font-family-generic=\"roman\"\nstyle:font-pitch=\"variable\" fo:font-size=\"18pt\"\nfo:font-style=\"normal\" fo:text-shadow=\"none\"\nstyle:text-underline-style=\"none\" fo:font-weight=\"normal\"\nstyle:font-family-asian=\"'Albany AMT'\"\nstyle:font-pitch-asian=\"variable\" style:font-size-asian=\"18pt\"\nstyle:font-style-asian=\"normal\" style:font-weight-asian=\"normal\"\nstyle:font-family-complex=\"Lucidasans\"\nstyle:font-pitch-complex=\"variable\" style:font-size-complex=\"18pt\"\nstyle:font-style-complex=\"normal\"\nstyle:font-weight-complex=\"normal\" style:text-emphasize=\"none\"\nstyle:font-relief=\"none\" />\n</style:style>\n<style:style style:name=\"objectwitharrow\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"solid\"\nsvg:stroke-width=\"0.15cm\" svg:stroke-color=\"#000000\"\ndraw:marker-start=\"Arrow\" draw:marker-start-width=\"0.7cm\"\ndraw:marker-start-center=\"true\" draw:marker-end-width=\"0.3cm\" />\n</style:style>\n<style:style style:name=\"objectwithshadow\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:shadow=\"visible\"\ndraw:shadow-offset-x=\"0.3cm\" draw:shadow-offset-y=\"0.3cm\"\ndraw:shadow-color=\"#808080\" />\n</style:style>\n<style:style style:name=\"objectwithoutfill\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:fill=\"none\" />\n</style:style>\n<style:style style:name=\"text\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\" />\n</style:style>\n<style:style style:name=\"textbody\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\" />\n<style:text-properties fo:font-size=\"16pt\" />\n</style:style>\n<style:style style:name=\"textbodyjustfied\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\" />\n<style:paragraph-properties fo:text-align=\"justify\" />\n</style:style>\n<style:style style:name=\"textbodyindent\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\">\n<text:list-style>\n<text:list-level-style-bullet text:level=\"1\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"0.6cm\"\ntext:min-label-width=\"-0.6cm\" text:min-label-distance=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"2\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"0.6cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"3\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"1.2cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"4\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"1.8cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"5\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"2.4cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"6\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"3cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"7\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"3.6cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"8\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"4.2cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"9\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"4.8cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"10\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"5.4cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n</text:list-style>\n</style:graphic-properties>\n<style:paragraph-properties fo:margin-left=\"0cm\"\nfo:margin-right=\"0cm\" fo:text-indent=\"0.6cm\" />\n</style:style>\n<style:style style:name=\"title\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\" />\n<style:text-properties fo:font-size=\"44pt\" />\n</style:style>\n<style:style style:name=\"title1\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"solid\"\ndraw:fill-color=\"#008080\" draw:shadow=\"visible\"\ndraw:shadow-offset-x=\"0.2cm\" draw:shadow-offset-y=\"0.2cm\"\ndraw:shadow-color=\"#808080\" />\n<style:paragraph-properties fo:text-align=\"center\" />\n<style:text-properties fo:font-size=\"24pt\" />\n</style:style>\n<style:style style:name=\"title2\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties svg:stroke-width=\"0.05cm\"\ndraw:fill-color=\"#ffcc99\" draw:shadow=\"visible\"\ndraw:shadow-offset-x=\"0.2cm\" draw:shadow-offset-y=\"0.2cm\"\ndraw:shadow-color=\"#808080\">\n<text:list-style>\n<text:list-level-style-bullet text:level=\"1\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"0.2cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"2\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"0.6cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"3\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"1.2cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"4\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"1.8cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"5\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"2.4cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"6\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"3cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"7\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"3.6cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"8\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"4.2cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"9\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"4.8cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n<text:list-level-style-bullet text:level=\"10\"\ntext:bullet-char=\"â—\">\n<style:list-level-properties text:space-before=\"5.4cm\"\ntext:min-label-width=\"0.6cm\" />\n<style:text-properties fo:font-family=\"StarSymbol\"\nstyle:use-window-font-color=\"true\" fo:font-size=\"45%\" />\n</text:list-level-style-bullet>\n</text:list-style>\n</style:graphic-properties>\n<style:paragraph-properties fo:margin-left=\"0.2cm\"\nfo:margin-right=\"0.2cm\" fo:margin-top=\"0.1cm\"\nfo:margin-bottom=\"0.1cm\" fo:text-align=\"center\"\nfo:text-indent=\"0cm\" />\n<style:text-properties fo:font-size=\"36pt\" />\n</style:style>\n<style:style style:name=\"headline\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\" />\n<style:paragraph-properties fo:margin-top=\"0.42cm\"\nfo:margin-bottom=\"0.21cm\" />\n<style:text-properties fo:font-size=\"24pt\" />\n</style:style>\n<style:style style:name=\"headline1\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\" />\n<style:paragraph-properties fo:margin-top=\"0.42cm\"\nfo:margin-bottom=\"0.21cm\" />\n<style:text-properties fo:font-size=\"18pt\" fo:font-weight=\"bold\" />\n</style:style>\n<style:style style:name=\"headline2\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\" />\n<style:paragraph-properties fo:margin-top=\"0.42cm\"\nfo:margin-bottom=\"0.21cm\" />\n<style:text-properties fo:font-size=\"14pt\" fo:font-style=\"italic\"\nfo:font-weight=\"bold\" />\n</style:style>\n<style:style style:name=\"measure\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"solid\"\ndraw:marker-start=\"Arrow\" draw:marker-start-width=\"0.2cm\"\ndraw:marker-end=\"Arrow\" draw:marker-end-width=\"0.2cm\"\ndraw:fill=\"none\" />\n<style:text-properties fo:font-size=\"12pt\" />\n</style:style>\n</office:styles>\n<office:automatic-styles>\n<style:page-layout style:name=\"PM0\">\n<style:page-layout-properties fo:margin-top=\"0.0cm\"\nfo:margin-bottom=\"0cm\" fo:margin-left=\"0cm\"\nfo:margin-right=\"0cm\" fo:page-width=\"$width\"\nfo:page-height=\"$height\" style:print-orientation=\"$portrait\" />\n</style:page-layout>\n<style:style style:name=\"dp1\" style:family=\"drawing-page\">\n<style:drawing-page-properties draw:background-size=\"border\"\ndraw:fill=\"none\" />\n</style:style>\n<style:style style:name=\"dp2\" style:family=\"drawing-page\">\n<style:drawing-page-properties presentation:display-header=\"true\"\npresentation:display-footer=\"true\"\npresentation:display-page-number=\"false\"\npresentation:display-date-time=\"true\" />\n</style:style>\n</office:automatic-styles>\n<office:master-styles>\n<draw:layer-set>\n<draw:layer draw:name=\"layout\" />\n<draw:layer draw:name=\"background\" />\n<draw:layer draw:name=\"backgroundobjects\" />\n<draw:layer draw:name=\"controls\" />\n<draw:layer draw:name=\"measurelines\" />\n</draw:layer-set>\n<style:master-page style:name=\"Standard\"\nstyle:page-layout-name=\"PM0\" draw:style-name=\"dp1\" />\n</office:master-styles>\n</office:document-styles>\n____END____\n\ncat > document/META-INF/manifest.xml << ____END____\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE manifest:manifest PUBLIC \"-//OpenOffice.org//DTD Manifest 1.0//EN\" \"Manifest.dtd\">\n<manifest:manifest xmlns:manifest=\"urn:oasis:names:tc:opendocument:xmlns:manifest:1.0\">\n <manifest:file-entry manifest:media-type=\"application/vnd.oasis.opendocument.presentation\" manifest:full-path=\"/\"/>\n <manifest:file-entry manifest:media-type=\"application/vnd.sun.xml.ui.configuration\" manifest:full-path=\"Configurations2/\"/>\n <manifest:file-entry manifest:media-type=\"\" manifest:full-path=\"Pictures/\"/>\n <manifest:file-entry manifest:media-type=\"text/xml\" manifest:full-path=\"content.xml\"/>\n <manifest:file-entry manifest:media-type=\"text/xml\" manifest:full-path=\"styles.xml\"/>\n <manifest:file-entry manifest:media-type=\"text/xml\" manifest:full-path=\"meta.xml\"/>\n <manifest:file-entry manifest:media-type=\"\" manifest:full-path=\"Thumbnails/thumbnail.png\"/>\n <manifest:file-entry manifest:media-type=\"\" manifest:full-path=\"Thumbnails/\"/>\n <manifest:file-entry manifest:media-type=\"text/xml\" manifest:full-path=\"settings.xml\"/>\n</manifest:manifest>\n____END____\n\ncat > document/content.xml << ____END____\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<office:document-content xmlns:office=\"urn:oasis:names:tc:opendocument:xmlns:office:1.0\"\nxmlns:style=\"urn:oasis:names:tc:opendocument:xmlns:style:1.0\"\nxmlns:text=\"urn:oasis:names:tc:opendocument:xmlns:text:1.0\"\nxmlns:table=\"urn:oasis:names:tc:opendocument:xmlns:table:1.0\"\nxmlns:draw=\"urn:oasis:names:tc:opendocument:xmlns:drawing:1.0\"\nxmlns:fo=\"urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0\"\nxmlns:xlink=\"http://www.w3.org/1999/xlink\"\nxmlns:dc=\"http://purl.org/dc/elements/1.1/\"\nxmlns:meta=\"urn:oasis:names:tc:opendocument:xmlns:meta:1.0\"\nxmlns:number=\"urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0\"\nxmlns:presentation=\"urn:oasis:names:tc:opendocument:xmlns:presentation:1.0\"\nxmlns:svg=\"urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0\"\nxmlns:chart=\"urn:oasis:names:tc:opendocument:xmlns:chart:1.0\"\nxmlns:dr3d=\"urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0\"\nxmlns:math=\"http://www.w3.org/1998/Math/MathML\"\nxmlns:form=\"urn:oasis:names:tc:opendocument:xmlns:form:1.0\"\nxmlns:script=\"urn:oasis:names:tc:opendocument:xmlns:script:1.0\"\nxmlns:ooo=\"http://openoffice.org/2004/office\"\nxmlns:ooow=\"http://openoffice.org/2004/writer\"\nxmlns:oooc=\"http://openoffice.org/2004/calc\"\nxmlns:dom=\"http://www.w3.org/2001/xml-events\"\nxmlns:xforms=\"http://www.w3.org/2002/xforms\"\nxmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"\nxmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\nxmlns:smil=\"urn:oasis:names:tc:opendocument:xmlns:smil-compatible:1.0\"\nxmlns:anim=\"urn:oasis:names:tc:opendocument:xmlns:animation:1.0\"\noffice:version=\"1.0\">\n<office:scripts />\n<office:automatic-styles>\n<style:style style:name=\"gr1\" style:family=\"graphic\"\nstyle:parent-style-name=\"standard\">\n<style:graphic-properties draw:stroke=\"none\" draw:fill=\"none\"\ndraw:textarea-horizontal-align=\"center\"\ndraw:textarea-vertical-align=\"middle\" draw:color-mode=\"standard\"\ndraw:luminance=\"0%\" draw:contrast=\"0%\" draw:gamma=\"100%\"\ndraw:red=\"0%\" draw:green=\"0%\" draw:blue=\"0%\"\nfo:clip=\"rect(0cm 0cm 0cm 0cm)\" draw:image-opacity=\"100%\"\nstyle:mirror=\"none\" style:protect=\"position size\" />\n</style:style>\n</office:automatic-styles>\n<office:body>\n<office:presentation>\n____END____\n\nfor page in $(seq 1 $pages)\n do cat >> document/content.xml << ____END____\n<draw:page draw:name=\"page$page\" draw:style-name=\"dp1\"\ndraw:master-page-name=\"Standard\">\n<draw:frame draw:style-name=\"gr1\" draw:text-style-name=\"P1\"\ndraw:layer=\"layout\" svg:width=\"$width\" svg:height=\"$height\"\nsvg:x=\"0cm\" svg:y=\"0cm\">\n<draw:image xlink:href=\"Pictures/$(basename \"$srcfile\" .pdf)-$page.$imgtype\"\nxlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\">\n<text:p />\n</draw:image>\n</draw:frame>\n</draw:page>\n____END____\ndone\n\ncat >> document/content.xml << ____END____\n# </office:presentation>\n</office:body>\n</office:document-content>\n____END____\n}\n\ncase \"$type\" in\n \"odp\")\n genodp\n\t;;\nesac\n\nphase=\"creating destination file\"\n\npushd document > /dev/null\nrm \"$destfile\" -f || my_exit\nzip -r \"$destfile\" * > /dev/null || my_exit\npopd > /dev/null\npopd > /dev/null\n\nrm -rf \"$destdir\"\n\nif [ \"$interactive\" = \"true\" ]\n then dcop \"$pbar\" close\nfi\n"
},
{
"alpha_fraction": 0.5932360887527466,
"alphanum_fraction": 0.5978901386260986,
"avg_line_length": 30.910890579223633,
"blob_id": "3bf92a695c65f55b13049602f6f14f8bd4d35924",
"content_id": "bf977b6eae939e748337f2f6be850e04dd9d0849",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3223,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 101,
"path": "/bin/systemload",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n# Script to check system load average levels to try to determine what processes are\n# taking it overly high...\n#\n# set environment\ndt=`date +%d%b%Y-%X`\n# Obviously, change the following directories to where your log files actually are kept\ntmpfile=\"/var/tmp/checkSystemLoad-$dt.tmp\"\nlogfile=\"/var/tmp/checkSystemLoad-$dt.log\"\nmsgLog=\"/var/log/messages\"\nmysqlLog=\"/var/log/mysqld.log\"\n# the first mailstop is standard email for reports. Second one is for cell phone (with a pared down report)\nmailstop=\"[email protected]\"\nmailstop1=\"[email protected]\"\nmachine=`hostname`\n# The following three are for mytop use - use a db user that has decent rights\n#dbusr=\"username\"\n#dbpw=\"password\"\n#db=\"yourdatabasename\"\n# The following is the load level to check on - 10 is really high, so you might want to lower it.\nlevelToCheck=3\n# Set variables from system:\nloadLevel1=`cat /proc/loadavg | awk '{print $1}'`\nloadLevel=$( printf \"%0.f\" $loadLevel1 )\n\n# if the load level is greater than you want, start the script process. Otherwise, exit 0\n\nif [ $loadLevel -gt $levelToCheck ]; then\necho \"Load threshold exceeding ${levelToCheck}\"\necho \"\" > $tmpfile\necho \"**************************************\" >>$tmpfile\necho \"Date: $dt \" >>$tmpfile\necho \"Check System Load & Processes \" >>$tmpfile\necho \"**************************************\" >>$tmpfile\n\n# Get more variables from system:\nhttpdProcesses=`ps -def | grep httpd | grep -v grep | wc -l`\n\n# Show current load level:\necho \"Load Level Is: $loadLevel\" >>$tmpfile\necho \"*************************************************\" >>$tmpfile\n\n# Show number of httpd processes now running (not including children):\necho \"Number of httpd processes now: $httpdProcesses\" >>$tmpfile\necho \"*************************************************\" >>$tmpfile\necho \"\" >>$tmpfile\n\n# Show process list:\necho \"Processes now running:\" >>$tmpfile\nps f -ef >>$tmpfile\necho \"*************************************************\" >>$tmpfile\necho \"\" >>$tmpfile\n\n# Show current MySQL info:\n#echo \"Results from mytop:\" >>$tmpfile\n#/usr/bin/mytop -u $dbusr -p $dbpw -b -d $db >>$tmpfile\n#echo \"*************************************************\" >>$tmpfile\n#echo \"\" >>$tmpfile\n\n# Show current top:\necho \"top now shows:\" >>$tmpfile\necho \"top now shows:\" >>$topfile\n/usr/bin/top -bc -d 60 -n 5 >>$tmpfile\n/usr/bin/top -b -n1 >>$topfile\necho \"*************************************************\" >>$tmpfile\necho \"\" >>$tmpfile\n\n# Show current connections:\necho \"netstat now shows:\" >>$tmpfile\n/bin/netstat -p >>$tmpfile\necho \"*************************************************\" >>$tmpfile\necho \"\" >>$tmpfile\n\n# Check disk space\necho \"disk space:\" >>$tmpfile\n/bin/df -k >>$tmpfile\necho \"*************************************************\" >>$tmpfile\necho \"\" >>$tmpfile\n\n# Send results to log file:\n/bin/cat $tmpfile >>$logfile\n\n# And email results to sysadmin:\n#/usr/sbin/sendmail -s \"$machine has a high load level! - $dt\" -a $mysqlLog -a $msgLog $mailstop <$tmpfile\n#/usr/sbin/sendmail -s \"$machine has a high load level! - $dt\" $mailstop1 <$topfile\n\n\n\necho \"**************************************\" >>$logfile\n\n# And then remove the temp file:\n#rm $tmpfile\n#rm $topfile\nelse\necho \"Load threshold is below ${levelToCheck}\"\n\nfi\n\n#\nexit 0\n"
},
{
"alpha_fraction": 0.6414493322372437,
"alphanum_fraction": 0.6845512390136719,
"avg_line_length": 70.9734878540039,
"blob_id": "3d538721e5be10884a5a8171e1c6866446649d9c",
"content_id": "584fe91ca3775d5b84d00d134a53e537ef0f3797",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 38003,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 528,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/erl_bif_table.c",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/*\n * Warning: Do not edit this file. It was automatically\n * generated by 'make_tables' on Thu Dec 31 02:09:59 2015.\n */\n\n#ifdef HAVE_CONFIG_H\n# include \"config.h\"\n#endif /* HAVE_CONFIG_H */\n#include \"export.h\"\n#include \"sys.h\"\n#include \"erl_vm.h\"\n#include \"erl_process.h\"\n#include \"bif.h\"\n#include \"erl_bif_table.h\"\n#include \"erl_atom_table.h\"\n\n\nExport* bif_export[BIF_SIZE];\nBifEntry bif_table[] = {\n {am_erlang, am_abs, 1, abs_1, abs_1},\n {am_erlang, am_adler32, 1, adler32_1, wrap_adler32_1},\n {am_erlang, am_adler32, 2, adler32_2, wrap_adler32_2},\n {am_erlang, am_adler32_combine, 3, adler32_combine_3, wrap_adler32_combine_3},\n {am_erlang, am_apply, 3, apply_3, wrap_apply_3},\n {am_erlang, am_atom_to_list, 1, atom_to_list_1, wrap_atom_to_list_1},\n {am_erlang, am_binary_to_list, 1, binary_to_list_1, wrap_binary_to_list_1},\n {am_erlang, am_binary_to_list, 3, binary_to_list_3, wrap_binary_to_list_3},\n {am_erlang, am_binary_to_term, 1, binary_to_term_1, wrap_binary_to_term_1},\n {am_erlang, am_crc32, 1, crc32_1, wrap_crc32_1},\n {am_erlang, am_crc32, 2, crc32_2, wrap_crc32_2},\n {am_erlang, am_crc32_combine, 3, crc32_combine_3, wrap_crc32_combine_3},\n {am_erlang, am_date, 0, date_0, wrap_date_0},\n {am_erlang, am_delete_module, 1, delete_module_1, wrap_delete_module_1},\n {am_erlang, am_display, 1, display_1, wrap_display_1},\n {am_erlang, am_display_string, 1, display_string_1, wrap_display_string_1},\n {am_erlang, am_display_nl, 0, display_nl_0, wrap_display_nl_0},\n {am_erlang, am_element, 2, element_2, element_2},\n {am_erlang, am_erase, 0, erase_0, wrap_erase_0},\n {am_erlang, am_erase, 1, erase_1, wrap_erase_1},\n {am_erlang, am_exit, 1, exit_1, wrap_exit_1},\n {am_erlang, am_exit, 2, exit_2, wrap_exit_2},\n {am_erlang, am_external_size, 1, external_size_1, wrap_external_size_1},\n {am_erlang, am_external_size, 2, external_size_2, wrap_external_size_2},\n {am_erlang, am_float, 1, float_1, float_1},\n {am_erlang, am_float_to_list, 1, float_to_list_1, wrap_float_to_list_1},\n {am_erlang, am_float_to_list, 2, float_to_list_2, wrap_float_to_list_2},\n {am_erlang, am_fun_info, 2, fun_info_2, wrap_fun_info_2},\n {am_erlang, am_garbage_collect, 0, garbage_collect_0, wrap_garbage_collect_0},\n {am_erlang, am_get, 0, get_0, wrap_get_0},\n {am_erlang, am_get, 1, get_1, wrap_get_1},\n {am_erlang, am_get_keys, 1, get_keys_1, wrap_get_keys_1},\n {am_erlang, am_group_leader, 0, group_leader_0, wrap_group_leader_0},\n {am_erlang, am_group_leader, 2, group_leader_2, wrap_group_leader_2},\n {am_erlang, am_halt, 0, halt_0, wrap_halt_0},\n {am_erlang, am_halt, 1, halt_1, wrap_halt_1},\n {am_erlang, am_halt, 2, halt_2, wrap_halt_2},\n {am_erlang, am_phash, 2, phash_2, wrap_phash_2},\n {am_erlang, am_phash2, 1, phash2_1, wrap_phash2_1},\n {am_erlang, am_phash2, 2, phash2_2, wrap_phash2_2},\n {am_erlang, am_hd, 1, hd_1, hd_1},\n {am_erlang, am_integer_to_list, 1, integer_to_list_1, wrap_integer_to_list_1},\n {am_erlang, am_is_alive, 0, is_alive_0, wrap_is_alive_0},\n {am_erlang, am_length, 1, length_1, length_1},\n {am_erlang, am_link, 1, link_1, wrap_link_1},\n {am_erlang, am_list_to_atom, 1, list_to_atom_1, wrap_list_to_atom_1},\n {am_erlang, am_list_to_binary, 1, list_to_binary_1, wrap_list_to_binary_1},\n {am_erlang, am_list_to_float, 1, list_to_float_1, wrap_list_to_float_1},\n {am_erlang, am_list_to_integer, 1, list_to_integer_1, wrap_list_to_integer_1},\n {am_erlang, am_list_to_pid, 1, list_to_pid_1, wrap_list_to_pid_1},\n {am_erlang, am_list_to_tuple, 1, list_to_tuple_1, wrap_list_to_tuple_1},\n {am_erlang, am_loaded, 0, loaded_0, wrap_loaded_0},\n {am_erlang, am_localtime, 0, localtime_0, wrap_localtime_0},\n {am_erlang, am_localtime_to_universaltime, 2, localtime_to_universaltime_2, wrap_localtime_to_universaltime_2},\n {am_erlang, am_make_ref, 0, make_ref_0, wrap_make_ref_0},\n {am_erlang, am_unique_integer, 0, unique_integer_0, wrap_unique_integer_0},\n {am_erlang, am_unique_integer, 1, unique_integer_1, wrap_unique_integer_1},\n {am_erlang, am_md5, 1, md5_1, wrap_md5_1},\n {am_erlang, am_md5_init, 0, md5_init_0, wrap_md5_init_0},\n {am_erlang, am_md5_update, 2, md5_update_2, wrap_md5_update_2},\n {am_erlang, am_md5_final, 1, md5_final_1, wrap_md5_final_1},\n {am_erlang, am_module_loaded, 1, module_loaded_1, wrap_module_loaded_1},\n {am_erlang, am_function_exported, 3, function_exported_3, wrap_function_exported_3},\n {am_erlang, am_monitor_node, 2, monitor_node_2, wrap_monitor_node_2},\n {am_erlang, am_monitor_node, 3, monitor_node_3, wrap_monitor_node_3},\n {am_erlang, am_node, 1, node_1, node_1},\n {am_erlang, am_node, 0, node_0, node_0},\n {am_erlang, am_nodes, 1, nodes_1, wrap_nodes_1},\n {am_erlang, am_now, 0, now_0, wrap_now_0},\n {am_erlang, am_monotonic_time, 0, monotonic_time_0, wrap_monotonic_time_0},\n {am_erlang, am_monotonic_time, 1, monotonic_time_1, wrap_monotonic_time_1},\n {am_erlang, am_system_time, 0, system_time_0, wrap_system_time_0},\n {am_erlang, am_system_time, 1, system_time_1, wrap_system_time_1},\n {am_erlang, am_time_offset, 0, time_offset_0, wrap_time_offset_0},\n {am_erlang, am_time_offset, 1, time_offset_1, wrap_time_offset_1},\n {am_erlang, am_timestamp, 0, timestamp_0, wrap_timestamp_0},\n {am_erlang, am_open_port, 2, open_port_2, wrap_open_port_2},\n {am_erlang, am_pid_to_list, 1, pid_to_list_1, wrap_pid_to_list_1},\n {am_erlang, am_ports, 0, ports_0, wrap_ports_0},\n {am_erlang, am_pre_loaded, 0, pre_loaded_0, wrap_pre_loaded_0},\n {am_erlang, am_process_flag, 2, process_flag_2, wrap_process_flag_2},\n {am_erlang, am_process_flag, 3, process_flag_3, wrap_process_flag_3},\n {am_erlang, am_process_info, 1, process_info_1, wrap_process_info_1},\n {am_erlang, am_process_info, 2, process_info_2, wrap_process_info_2},\n {am_erlang, am_processes, 0, processes_0, wrap_processes_0},\n {am_erlang, am_purge_module, 1, purge_module_1, wrap_purge_module_1},\n {am_erlang, am_put, 2, put_2, wrap_put_2},\n {am_erlang, am_register, 2, register_2, wrap_register_2},\n {am_erlang, am_registered, 0, registered_0, wrap_registered_0},\n {am_erlang, am_round, 1, round_1, round_1},\n {am_erlang, am_self, 0, self_0, self_0},\n {am_erlang, am_setelement, 3, setelement_3, wrap_setelement_3},\n {am_erlang, am_size, 1, size_1, size_1},\n {am_erlang, am_spawn, 3, spawn_3, wrap_spawn_3},\n {am_erlang, am_spawn_link, 3, spawn_link_3, wrap_spawn_link_3},\n {am_erlang, am_split_binary, 2, split_binary_2, wrap_split_binary_2},\n {am_erlang, am_statistics, 1, statistics_1, wrap_statistics_1},\n {am_erlang, am_term_to_binary, 1, term_to_binary_1, wrap_term_to_binary_1},\n {am_erlang, am_term_to_binary, 2, term_to_binary_2, wrap_term_to_binary_2},\n {am_erlang, am_throw, 1, throw_1, wrap_throw_1},\n {am_erlang, am_time, 0, time_0, wrap_time_0},\n {am_erlang, am_tl, 1, tl_1, tl_1},\n {am_erlang, am_trunc, 1, trunc_1, trunc_1},\n {am_erlang, am_tuple_to_list, 1, tuple_to_list_1, wrap_tuple_to_list_1},\n {am_erlang, am_universaltime, 0, universaltime_0, wrap_universaltime_0},\n {am_erlang, am_universaltime_to_localtime, 1, universaltime_to_localtime_1, wrap_universaltime_to_localtime_1},\n {am_erlang, am_unlink, 1, unlink_1, wrap_unlink_1},\n {am_erlang, am_unregister, 1, unregister_1, wrap_unregister_1},\n {am_erlang, am_whereis, 1, whereis_1, wrap_whereis_1},\n {am_erlang, am_spawn_opt, 1, spawn_opt_1, wrap_spawn_opt_1},\n {am_erlang, am_setnode, 2, setnode_2, wrap_setnode_2},\n {am_erlang, am_setnode, 3, setnode_3, wrap_setnode_3},\n {am_erlang, am_dist_exit, 3, dist_exit_3, wrap_dist_exit_3},\n {am_erts_internal, am_port_info, 1, erts_internal_port_info_1, wrap_erts_internal_port_info_1},\n {am_erts_internal, am_port_info, 2, erts_internal_port_info_2, wrap_erts_internal_port_info_2},\n {am_erts_internal, am_port_call, 3, erts_internal_port_call_3, wrap_erts_internal_port_call_3},\n {am_erts_internal, am_port_command, 3, erts_internal_port_command_3, wrap_erts_internal_port_command_3},\n {am_erts_internal, am_port_control, 3, erts_internal_port_control_3, wrap_erts_internal_port_control_3},\n {am_erts_internal, am_port_close, 1, erts_internal_port_close_1, wrap_erts_internal_port_close_1},\n {am_erts_internal, am_port_connect, 2, erts_internal_port_connect_2, wrap_erts_internal_port_connect_2},\n {am_erts_internal, am_request_system_task, 3, erts_internal_request_system_task_3, wrap_erts_internal_request_system_task_3},\n {am_erts_internal, am_check_process_code, 2, erts_internal_check_process_code_2, wrap_erts_internal_check_process_code_2},\n {am_erts_internal, am_map_to_tuple_keys, 1, erts_internal_map_to_tuple_keys_1, wrap_erts_internal_map_to_tuple_keys_1},\n {am_erts_internal, am_map_type, 1, erts_internal_map_type_1, wrap_erts_internal_map_type_1},\n {am_erts_internal, am_map_hashmap_children, 1, erts_internal_map_hashmap_children_1, wrap_erts_internal_map_hashmap_children_1},\n {am_erts_internal, am_time_unit, 0, erts_internal_time_unit_0, wrap_erts_internal_time_unit_0},\n {am_erts_internal, am_is_system_process, 1, erts_internal_is_system_process_1, wrap_erts_internal_is_system_process_1},\n {am_erlang, am_port_set_data, 2, port_set_data_2, wrap_port_set_data_2},\n {am_erlang, am_port_get_data, 1, port_get_data_1, wrap_port_get_data_1},\n {am_erlang, am_trace_pattern, 2, trace_pattern_2, wrap_trace_pattern_2},\n {am_erlang, am_trace_pattern, 3, trace_pattern_3, wrap_trace_pattern_3},\n {am_erlang, am_trace, 3, trace_3, wrap_trace_3},\n {am_erlang, am_trace_info, 2, trace_info_2, wrap_trace_info_2},\n {am_erlang, am_trace_delivered, 1, trace_delivered_1, wrap_trace_delivered_1},\n {am_erlang, am_seq_trace, 2, seq_trace_2, wrap_seq_trace_2},\n {am_erlang, am_seq_trace_info, 1, seq_trace_info_1, wrap_seq_trace_info_1},\n {am_erlang, am_seq_trace_print, 1, seq_trace_print_1, wrap_seq_trace_print_1},\n {am_erlang, am_seq_trace_print, 2, seq_trace_print_2, wrap_seq_trace_print_2},\n {am_erlang, am_suspend_process, 2, suspend_process_2, wrap_suspend_process_2},\n {am_erlang, am_resume_process, 1, resume_process_1, wrap_resume_process_1},\n {am_erlang, am_process_display, 2, process_display_2, wrap_process_display_2},\n {am_erlang, am_bump_reductions, 1, bump_reductions_1, wrap_bump_reductions_1},\n {am_math, am_cos, 1, math_cos_1, wrap_math_cos_1},\n {am_math, am_cosh, 1, math_cosh_1, wrap_math_cosh_1},\n {am_math, am_sin, 1, math_sin_1, wrap_math_sin_1},\n {am_math, am_sinh, 1, math_sinh_1, wrap_math_sinh_1},\n {am_math, am_tan, 1, math_tan_1, wrap_math_tan_1},\n {am_math, am_tanh, 1, math_tanh_1, wrap_math_tanh_1},\n {am_math, am_acos, 1, math_acos_1, wrap_math_acos_1},\n {am_math, am_acosh, 1, math_acosh_1, wrap_math_acosh_1},\n {am_math, am_asin, 1, math_asin_1, wrap_math_asin_1},\n {am_math, am_asinh, 1, math_asinh_1, wrap_math_asinh_1},\n {am_math, am_atan, 1, math_atan_1, wrap_math_atan_1},\n {am_math, am_atanh, 1, math_atanh_1, wrap_math_atanh_1},\n {am_math, am_erf, 1, math_erf_1, wrap_math_erf_1},\n {am_math, am_erfc, 1, math_erfc_1, wrap_math_erfc_1},\n {am_math, am_exp, 1, math_exp_1, wrap_math_exp_1},\n {am_math, am_log, 1, math_log_1, wrap_math_log_1},\n {am_math, am_log2, 1, math_log2_1, wrap_math_log2_1},\n {am_math, am_log10, 1, math_log10_1, wrap_math_log10_1},\n {am_math, am_sqrt, 1, math_sqrt_1, wrap_math_sqrt_1},\n {am_math, am_atan2, 2, math_atan2_2, wrap_math_atan2_2},\n {am_math, am_pow, 2, math_pow_2, wrap_math_pow_2},\n {am_erlang, am_start_timer, 3, start_timer_3, wrap_start_timer_3},\n {am_erlang, am_start_timer, 4, start_timer_4, wrap_start_timer_4},\n {am_erlang, am_send_after, 3, send_after_3, wrap_send_after_3},\n {am_erlang, am_send_after, 4, send_after_4, wrap_send_after_4},\n {am_erlang, am_cancel_timer, 1, cancel_timer_1, wrap_cancel_timer_1},\n {am_erlang, am_cancel_timer, 2, cancel_timer_2, wrap_cancel_timer_2},\n {am_erlang, am_read_timer, 1, read_timer_1, wrap_read_timer_1},\n {am_erlang, am_read_timer, 2, read_timer_2, wrap_read_timer_2},\n {am_erlang, am_make_tuple, 2, make_tuple_2, wrap_make_tuple_2},\n {am_erlang, am_append_element, 2, append_element_2, wrap_append_element_2},\n {am_erlang, am_make_tuple, 3, make_tuple_3, wrap_make_tuple_3},\n {am_erlang, am_system_flag, 2, system_flag_2, wrap_system_flag_2},\n {am_erlang, am_system_info, 1, system_info_1, wrap_system_info_1},\n {am_erlang, am_system_monitor, 0, system_monitor_0, wrap_system_monitor_0},\n {am_erlang, am_system_monitor, 1, system_monitor_1, wrap_system_monitor_1},\n {am_erlang, am_system_monitor, 2, system_monitor_2, wrap_system_monitor_2},\n {am_erlang, am_system_profile, 2, system_profile_2, wrap_system_profile_2},\n {am_erlang, am_system_profile, 0, system_profile_0, wrap_system_profile_0},\n {am_erlang, am_ref_to_list, 1, ref_to_list_1, wrap_ref_to_list_1},\n {am_erlang, am_port_to_list, 1, port_to_list_1, wrap_port_to_list_1},\n {am_erlang, am_fun_to_list, 1, fun_to_list_1, wrap_fun_to_list_1},\n {am_erlang, am_monitor, 2, monitor_2, wrap_monitor_2},\n {am_erlang, am_demonitor, 1, demonitor_1, wrap_demonitor_1},\n {am_erlang, am_demonitor, 2, demonitor_2, wrap_demonitor_2},\n {am_erlang, am_is_process_alive, 1, is_process_alive_1, wrap_is_process_alive_1},\n {am_erlang, am_error, 1, error_1, wrap_error_1},\n {am_erlang, am_error, 2, error_2, wrap_error_2},\n {am_erlang, am_raise, 3, raise_3, wrap_raise_3},\n {am_erlang, am_get_stacktrace, 0, get_stacktrace_0, wrap_get_stacktrace_0},\n {am_erlang, am_is_builtin, 3, is_builtin_3, wrap_is_builtin_3},\n {am_erlang, am_and, 2, and_2, and_2},\n {am_erlang, am_or, 2, or_2, or_2},\n {am_erlang, am_xor, 2, xor_2, xor_2},\n {am_erlang, am_not, 1, not_1, not_1},\n {am_erlang, am_Gt, 2, sgt_2, sgt_2},\n {am_erlang, am_Ge, 2, sge_2, sge_2},\n {am_erlang, am_Lt, 2, slt_2, slt_2},\n {am_erlang, am_Le, 2, sle_2, sle_2},\n {am_erlang, am_Eq, 2, seq_2, seq_2},\n {am_erlang, am_Eqeq, 2, seqeq_2, seqeq_2},\n {am_erlang, am_Neq, 2, sneq_2, sneq_2},\n {am_erlang, am_Neqeq, 2, sneqeq_2, sneqeq_2},\n {am_erlang, am_Plus, 2, splus_2, splus_2},\n {am_erlang, am_Minus, 2, sminus_2, sminus_2},\n {am_erlang, am_Times, 2, stimes_2, stimes_2},\n {am_erlang, am_Div, 2, div_2, div_2},\n {am_erlang, am_div, 2, intdiv_2, intdiv_2},\n {am_erlang, am_rem, 2, rem_2, rem_2},\n {am_erlang, am_bor, 2, bor_2, bor_2},\n {am_erlang, am_band, 2, band_2, band_2},\n {am_erlang, am_bxor, 2, bxor_2, bxor_2},\n {am_erlang, am_bsl, 2, bsl_2, bsl_2},\n {am_erlang, am_bsr, 2, bsr_2, bsr_2},\n {am_erlang, am_bnot, 1, bnot_1, bnot_1},\n {am_erlang, am_Minus, 1, sminus_1, sminus_1},\n {am_erlang, am_Plus, 1, splus_1, splus_1},\n {am_erlang, am__AtomAlias26, 2, ebif_bang_2, wrap_ebif_bang_2},\n {am_erlang, am_send, 2, send_2, wrap_send_2},\n {am_erlang, am_send, 3, send_3, wrap_send_3},\n {am_erlang, am__AtomAlias27, 2, ebif_plusplus_2, wrap_ebif_plusplus_2},\n {am_erlang, am_append, 2, append_2, wrap_append_2},\n {am_erlang, am__AtomAlias28, 2, ebif_minusminus_2, wrap_ebif_minusminus_2},\n {am_erlang, am_subtract, 2, subtract_2, wrap_subtract_2},\n {am_erlang, am_is_atom, 1, is_atom_1, is_atom_1},\n {am_erlang, am_is_list, 1, is_list_1, is_list_1},\n {am_erlang, am_is_tuple, 1, is_tuple_1, is_tuple_1},\n {am_erlang, am_is_float, 1, is_float_1, is_float_1},\n {am_erlang, am_is_integer, 1, is_integer_1, is_integer_1},\n {am_erlang, am_is_number, 1, is_number_1, is_number_1},\n {am_erlang, am_is_pid, 1, is_pid_1, is_pid_1},\n {am_erlang, am_is_port, 1, is_port_1, is_port_1},\n {am_erlang, am_is_reference, 1, is_reference_1, is_reference_1},\n {am_erlang, am_is_binary, 1, is_binary_1, is_binary_1},\n {am_erlang, am_is_function, 1, is_function_1, is_function_1},\n {am_erlang, am_is_function, 2, is_function_2, is_function_2},\n {am_erlang, am_is_record, 2, is_record_2, is_record_2},\n {am_erlang, am_is_record, 3, is_record_3, is_record_3},\n {am_erlang, am_match_spec_test, 3, match_spec_test_3, wrap_match_spec_test_3},\n {am_ets, am_all, 0, ets_all_0, wrap_ets_all_0},\n {am_ets, am_new, 2, ets_new_2, wrap_ets_new_2},\n {am_ets, am_delete, 1, ets_delete_1, wrap_ets_delete_1},\n {am_ets, am_delete, 2, ets_delete_2, wrap_ets_delete_2},\n {am_ets, am_delete_all_objects, 1, ets_delete_all_objects_1, wrap_ets_delete_all_objects_1},\n {am_ets, am_delete_object, 2, ets_delete_object_2, wrap_ets_delete_object_2},\n {am_ets, am_first, 1, ets_first_1, wrap_ets_first_1},\n {am_ets, am_is_compiled_ms, 1, ets_is_compiled_ms_1, wrap_ets_is_compiled_ms_1},\n {am_ets, am_lookup, 2, ets_lookup_2, wrap_ets_lookup_2},\n {am_ets, am_lookup_element, 3, ets_lookup_element_3, wrap_ets_lookup_element_3},\n {am_ets, am_info, 1, ets_info_1, wrap_ets_info_1},\n {am_ets, am_info, 2, ets_info_2, wrap_ets_info_2},\n {am_ets, am_last, 1, ets_last_1, wrap_ets_last_1},\n {am_ets, am_match, 1, ets_match_1, wrap_ets_match_1},\n {am_ets, am_match, 2, ets_match_2, wrap_ets_match_2},\n {am_ets, am_match, 3, ets_match_3, wrap_ets_match_3},\n {am_ets, am_match_object, 1, ets_match_object_1, wrap_ets_match_object_1},\n {am_ets, am_match_object, 2, ets_match_object_2, wrap_ets_match_object_2},\n {am_ets, am_match_object, 3, ets_match_object_3, wrap_ets_match_object_3},\n {am_ets, am_member, 2, ets_member_2, wrap_ets_member_2},\n {am_ets, am_next, 2, ets_next_2, wrap_ets_next_2},\n {am_ets, am_prev, 2, ets_prev_2, wrap_ets_prev_2},\n {am_ets, am_insert, 2, ets_insert_2, wrap_ets_insert_2},\n {am_ets, am_insert_new, 2, ets_insert_new_2, wrap_ets_insert_new_2},\n {am_ets, am_rename, 2, ets_rename_2, wrap_ets_rename_2},\n {am_ets, am_safe_fixtable, 2, ets_safe_fixtable_2, wrap_ets_safe_fixtable_2},\n {am_ets, am_slot, 2, ets_slot_2, wrap_ets_slot_2},\n {am_ets, am_update_counter, 3, ets_update_counter_3, wrap_ets_update_counter_3},\n {am_ets, am_select, 1, ets_select_1, wrap_ets_select_1},\n {am_ets, am_select, 2, ets_select_2, wrap_ets_select_2},\n {am_ets, am_select, 3, ets_select_3, wrap_ets_select_3},\n {am_ets, am_select_count, 2, ets_select_count_2, wrap_ets_select_count_2},\n {am_ets, am_select_reverse, 1, ets_select_reverse_1, wrap_ets_select_reverse_1},\n {am_ets, am_select_reverse, 2, ets_select_reverse_2, wrap_ets_select_reverse_2},\n {am_ets, am_select_reverse, 3, ets_select_reverse_3, wrap_ets_select_reverse_3},\n {am_ets, am_select_delete, 2, ets_select_delete_2, wrap_ets_select_delete_2},\n {am_ets, am_match_spec_compile, 1, ets_match_spec_compile_1, wrap_ets_match_spec_compile_1},\n {am_ets, am_match_spec_run_r, 3, ets_match_spec_run_r_3, wrap_ets_match_spec_run_r_3},\n {am_os, am_putenv, 2, os_putenv_2, wrap_os_putenv_2},\n {am_os, am_getenv, 0, os_getenv_0, wrap_os_getenv_0},\n {am_os, am_getenv, 1, os_getenv_1, wrap_os_getenv_1},\n {am_os, am_getpid, 0, os_getpid_0, wrap_os_getpid_0},\n {am_os, am_timestamp, 0, os_timestamp_0, wrap_os_timestamp_0},\n {am_os, am_system_time, 0, os_system_time_0, wrap_os_system_time_0},\n {am_os, am_system_time, 1, os_system_time_1, wrap_os_system_time_1},\n {am_erl_ddll, am_try_load, 3, erl_ddll_try_load_3, wrap_erl_ddll_try_load_3},\n {am_erl_ddll, am_try_unload, 2, erl_ddll_try_unload_2, wrap_erl_ddll_try_unload_2},\n {am_erl_ddll, am_loaded_drivers, 0, erl_ddll_loaded_drivers_0, wrap_erl_ddll_loaded_drivers_0},\n {am_erl_ddll, am_info, 2, erl_ddll_info_2, wrap_erl_ddll_info_2},\n {am_erl_ddll, am_format_error_int, 1, erl_ddll_format_error_int_1, wrap_erl_ddll_format_error_int_1},\n {am_erl_ddll, am_monitor, 2, erl_ddll_monitor_2, wrap_erl_ddll_monitor_2},\n {am_erl_ddll, am_demonitor, 1, erl_ddll_demonitor_1, wrap_erl_ddll_demonitor_1},\n {am_re, am_compile, 1, re_compile_1, wrap_re_compile_1},\n {am_re, am_compile, 2, re_compile_2, wrap_re_compile_2},\n {am_re, am_run, 2, re_run_2, wrap_re_run_2},\n {am_re, am_run, 3, re_run_3, wrap_re_run_3},\n {am_lists, am_member, 2, lists_member_2, wrap_lists_member_2},\n {am_lists, am_reverse, 2, lists_reverse_2, wrap_lists_reverse_2},\n {am_lists, am_keymember, 3, lists_keymember_3, wrap_lists_keymember_3},\n {am_lists, am_keysearch, 3, lists_keysearch_3, wrap_lists_keysearch_3},\n {am_lists, am_keyfind, 3, lists_keyfind_3, wrap_lists_keyfind_3},\n {am_erts_debug, am_disassemble, 1, erts_debug_disassemble_1, wrap_erts_debug_disassemble_1},\n {am_erts_debug, am_breakpoint, 2, erts_debug_breakpoint_2, wrap_erts_debug_breakpoint_2},\n {am_erts_debug, am_same, 2, erts_debug_same_2, wrap_erts_debug_same_2},\n {am_erts_debug, am_flat_size, 1, erts_debug_flat_size_1, wrap_erts_debug_flat_size_1},\n {am_erts_debug, am_get_internal_state, 1, erts_debug_get_internal_state_1, wrap_erts_debug_get_internal_state_1},\n {am_erts_debug, am_set_internal_state, 2, erts_debug_set_internal_state_2, wrap_erts_debug_set_internal_state_2},\n {am_erts_debug, am_display, 1, erts_debug_display_1, wrap_erts_debug_display_1},\n {am_erts_debug, am_dist_ext_to_term, 2, erts_debug_dist_ext_to_term_2, wrap_erts_debug_dist_ext_to_term_2},\n {am_erts_debug, am_instructions, 0, erts_debug_instructions_0, wrap_erts_debug_instructions_0},\n {am_erts_debug, am_dump_monitors, 1, erts_debug_dump_monitors_1, wrap_erts_debug_dump_monitors_1},\n {am_erts_debug, am_dump_links, 1, erts_debug_dump_links_1, wrap_erts_debug_dump_links_1},\n {am_erts_debug, am_lock_counters, 1, erts_debug_lock_counters_1, wrap_erts_debug_lock_counters_1},\n {am_code, am_get_chunk, 2, code_get_chunk_2, wrap_code_get_chunk_2},\n {am_code, am_module_md5, 1, code_module_md5_1, wrap_code_module_md5_1},\n {am_code, am_make_stub_module, 3, code_make_stub_module_3, wrap_code_make_stub_module_3},\n {am_code, am_is_module_native, 1, code_is_module_native_1, wrap_code_is_module_native_1},\n {am_erlang, am_hibernate, 3, hibernate_3, wrap_hibernate_3},\n {am_error_logger, am_warning_map, 0, error_logger_warning_map_0, wrap_error_logger_warning_map_0},\n {am_erlang, am_get_module_info, 1, get_module_info_1, wrap_get_module_info_1},\n {am_erlang, am_get_module_info, 2, get_module_info_2, wrap_get_module_info_2},\n {am_erlang, am_is_boolean, 1, is_boolean_1, is_boolean_1},\n {am_string, am_to_integer, 1, string_to_integer_1, wrap_string_to_integer_1},\n {am_string, am_to_float, 1, string_to_float_1, wrap_string_to_float_1},\n {am_erlang, am_make_fun, 3, make_fun_3, wrap_make_fun_3},\n {am_erlang, am_iolist_size, 1, iolist_size_1, wrap_iolist_size_1},\n {am_erlang, am_iolist_to_binary, 1, iolist_to_binary_1, wrap_iolist_to_binary_1},\n {am_erlang, am_list_to_existing_atom, 1, list_to_existing_atom_1, wrap_list_to_existing_atom_1},\n {am_erlang, am_is_bitstring, 1, is_bitstring_1, is_bitstring_1},\n {am_erlang, am_tuple_size, 1, tuple_size_1, tuple_size_1},\n {am_erlang, am_byte_size, 1, byte_size_1, byte_size_1},\n {am_erlang, am_bit_size, 1, bit_size_1, bit_size_1},\n {am_erlang, am_list_to_bitstring, 1, list_to_bitstring_1, wrap_list_to_bitstring_1},\n {am_erlang, am_bitstring_to_list, 1, bitstring_to_list_1, wrap_bitstring_to_list_1},\n {am_ets, am_update_element, 3, ets_update_element_3, wrap_ets_update_element_3},\n {am_erlang, am_decode_packet, 3, decode_packet_3, wrap_decode_packet_3},\n {am_unicode, am_characters_to_binary, 2, unicode_characters_to_binary_2, wrap_unicode_characters_to_binary_2},\n {am_unicode, am_characters_to_list, 2, unicode_characters_to_list_2, wrap_unicode_characters_to_list_2},\n {am_unicode, am_bin_is_7bit, 1, unicode_bin_is_7bit_1, wrap_unicode_bin_is_7bit_1},\n {am_erlang, am_atom_to_binary, 2, atom_to_binary_2, wrap_atom_to_binary_2},\n {am_erlang, am_binary_to_atom, 2, binary_to_atom_2, wrap_binary_to_atom_2},\n {am_erlang, am_binary_to_existing_atom, 2, binary_to_existing_atom_2, wrap_binary_to_existing_atom_2},\n {am_net_kernel, am_dflag_unicode_io, 1, net_kernel_dflag_unicode_io_1, wrap_net_kernel_dflag_unicode_io_1},\n {am_ets, am_give_away, 3, ets_give_away_3, wrap_ets_give_away_3},\n {am_ets, am_setopts, 2, ets_setopts_2, wrap_ets_setopts_2},\n {am_erlang, am_load_nif, 2, load_nif_2, wrap_load_nif_2},\n {am_erlang, am_call_on_load_function, 1, call_on_load_function_1, wrap_call_on_load_function_1},\n {am_erlang, am_finish_after_on_load, 2, finish_after_on_load_2, wrap_finish_after_on_load_2},\n {am_erlang, am_binary_to_term, 2, binary_to_term_2, wrap_binary_to_term_2},\n {am_erlang, am_binary_part, 2, binary_part_2, binary_part_2},\n {am_erlang, am_binary_part, 3, binary_part_3, binary_part_3},\n {am_binary, am_compile_pattern, 1, binary_compile_pattern_1, wrap_binary_compile_pattern_1},\n {am_binary, am_match, 2, binary_match_2, wrap_binary_match_2},\n {am_binary, am_match, 3, binary_match_3, wrap_binary_match_3},\n {am_binary, am_matches, 2, binary_matches_2, wrap_binary_matches_2},\n {am_binary, am_matches, 3, binary_matches_3, wrap_binary_matches_3},\n {am_binary, am_longest_common_prefix, 1, binary_longest_common_prefix_1, wrap_binary_longest_common_prefix_1},\n {am_binary, am_longest_common_suffix, 1, binary_longest_common_suffix_1, wrap_binary_longest_common_suffix_1},\n {am_binary, am_first, 1, binary_first_1, wrap_binary_first_1},\n {am_binary, am_last, 1, binary_last_1, wrap_binary_last_1},\n {am_binary, am_at, 2, binary_at_2, wrap_binary_at_2},\n {am_binary, am_part, 2, binary_binary_part_2, wrap_binary_binary_part_2},\n {am_binary, am_part, 3, binary_binary_part_3, wrap_binary_binary_part_3},\n {am_binary, am_bin_to_list, 1, binary_bin_to_list_1, wrap_binary_bin_to_list_1},\n {am_binary, am_bin_to_list, 2, binary_bin_to_list_2, wrap_binary_bin_to_list_2},\n {am_binary, am_bin_to_list, 3, binary_bin_to_list_3, wrap_binary_bin_to_list_3},\n {am_binary, am_list_to_bin, 1, binary_list_to_bin_1, wrap_binary_list_to_bin_1},\n {am_binary, am_copy, 1, binary_copy_1, wrap_binary_copy_1},\n {am_binary, am_copy, 2, binary_copy_2, wrap_binary_copy_2},\n {am_binary, am_referenced_byte_size, 1, binary_referenced_byte_size_1, wrap_binary_referenced_byte_size_1},\n {am_binary, am_encode_unsigned, 1, binary_encode_unsigned_1, wrap_binary_encode_unsigned_1},\n {am_binary, am_encode_unsigned, 2, binary_encode_unsigned_2, wrap_binary_encode_unsigned_2},\n {am_binary, am_decode_unsigned, 1, binary_decode_unsigned_1, wrap_binary_decode_unsigned_1},\n {am_binary, am_decode_unsigned, 2, binary_decode_unsigned_2, wrap_binary_decode_unsigned_2},\n {am_erlang, am_nif_error, 1, nif_error_1, wrap_nif_error_1},\n {am_erlang, am_nif_error, 2, nif_error_2, wrap_nif_error_2},\n {am_prim_file, am_internal_name2native, 1, prim_file_internal_name2native_1, wrap_prim_file_internal_name2native_1},\n {am_prim_file, am_internal_native2name, 1, prim_file_internal_native2name_1, wrap_prim_file_internal_native2name_1},\n {am_prim_file, am_internal_normalize_utf8, 1, prim_file_internal_normalize_utf8_1, wrap_prim_file_internal_normalize_utf8_1},\n {am_prim_file, am_is_translatable, 1, prim_file_is_translatable_1, wrap_prim_file_is_translatable_1},\n {am_file, am_native_name_encoding, 0, file_native_name_encoding_0, wrap_file_native_name_encoding_0},\n {am_erlang, am_check_old_code, 1, check_old_code_1, wrap_check_old_code_1},\n {am_erlang, am_universaltime_to_posixtime, 1, universaltime_to_posixtime_1, wrap_universaltime_to_posixtime_1},\n {am_erlang, am_posixtime_to_universaltime, 1, posixtime_to_universaltime_1, wrap_posixtime_to_universaltime_1},\n {am_erlang, am_dt_put_tag, 1, dt_put_tag_1, wrap_dt_put_tag_1},\n {am_erlang, am_dt_get_tag, 0, dt_get_tag_0, wrap_dt_get_tag_0},\n {am_erlang, am_dt_get_tag_data, 0, dt_get_tag_data_0, wrap_dt_get_tag_data_0},\n {am_erlang, am_dt_spread_tag, 1, dt_spread_tag_1, wrap_dt_spread_tag_1},\n {am_erlang, am_dt_restore_tag, 1, dt_restore_tag_1, wrap_dt_restore_tag_1},\n {am_erlang, am_dt_prepend_vm_tag_data, 1, dt_prepend_vm_tag_data_1, wrap_dt_prepend_vm_tag_data_1},\n {am_erlang, am_dt_append_vm_tag_data, 1, dt_append_vm_tag_data_1, wrap_dt_append_vm_tag_data_1},\n {am_erlang, am_prepare_loading, 2, prepare_loading_2, wrap_prepare_loading_2},\n {am_erlang, am_finish_loading, 1, finish_loading_1, wrap_finish_loading_1},\n {am_erlang, am_insert_element, 3, insert_element_3, wrap_insert_element_3},\n {am_erlang, am_delete_element, 2, delete_element_2, wrap_delete_element_2},\n {am_erlang, am_binary_to_integer, 1, binary_to_integer_1, wrap_binary_to_integer_1},\n {am_erlang, am_binary_to_integer, 2, binary_to_integer_2, wrap_binary_to_integer_2},\n {am_erlang, am_integer_to_binary, 1, integer_to_binary_1, wrap_integer_to_binary_1},\n {am_erlang, am_list_to_integer, 2, list_to_integer_2, wrap_list_to_integer_2},\n {am_erlang, am_float_to_binary, 1, float_to_binary_1, wrap_float_to_binary_1},\n {am_erlang, am_float_to_binary, 2, float_to_binary_2, wrap_float_to_binary_2},\n {am_erlang, am_binary_to_float, 1, binary_to_float_1, wrap_binary_to_float_1},\n {am_io, am_printable_range, 0, io_printable_range_0, wrap_io_printable_range_0},\n {am_os, am_unsetenv, 1, os_unsetenv_1, wrap_os_unsetenv_1},\n {am_re, am_inspect, 2, re_inspect_2, wrap_re_inspect_2},\n {am_erlang, am_is_map, 1, is_map_1, is_map_1},\n {am_erlang, am_map_size, 1, map_size_1, map_size_1},\n {am_maps, am_to_list, 1, maps_to_list_1, wrap_maps_to_list_1},\n {am_maps, am_find, 2, maps_find_2, wrap_maps_find_2},\n {am_maps, am_get, 2, maps_get_2, wrap_maps_get_2},\n {am_maps, am_from_list, 1, maps_from_list_1, wrap_maps_from_list_1},\n {am_maps, am_is_key, 2, maps_is_key_2, wrap_maps_is_key_2},\n {am_maps, am_keys, 1, maps_keys_1, wrap_maps_keys_1},\n {am_maps, am_merge, 2, maps_merge_2, wrap_maps_merge_2},\n {am_maps, am_new, 0, maps_new_0, wrap_maps_new_0},\n {am_maps, am_put, 3, maps_put_3, wrap_maps_put_3},\n {am_maps, am_remove, 2, maps_remove_2, wrap_maps_remove_2},\n {am_maps, am_update, 3, maps_update_3, wrap_maps_update_3},\n {am_maps, am_values, 1, maps_values_1, wrap_maps_values_1},\n {am_erts_internal, am_cmp_term, 2, erts_internal_cmp_term_2, wrap_erts_internal_cmp_term_2},\n {am_ets, am_take, 2, ets_take_2, wrap_ets_take_2},\n {am_erlang, am_fun_info_mfa, 1, fun_info_mfa_1, wrap_fun_info_mfa_1},\n {am_erlang, am_get_keys, 0, get_keys_0, wrap_get_keys_0},\n {am_ets, am_update_counter, 4, ets_update_counter_4, wrap_ets_update_counter_4},\n {am_erts_debug, am_map_info, 1, erts_debug_map_info_1, wrap_erts_debug_map_info_1},\n {am_erlang, am_hash, 2, hash_2, wrap_hash_2},\n {am_hipe_bifs, am_write_u8, 2, hipe_bifs_write_u8_2, wrap_hipe_bifs_write_u8_2},\n {am_hipe_bifs, am_write_u32, 2, hipe_bifs_write_u32_2, wrap_hipe_bifs_write_u32_2},\n {am_hipe_bifs, am_bytearray, 2, hipe_bifs_bytearray_2, wrap_hipe_bifs_bytearray_2},\n {am_hipe_bifs, am_bytearray_sub, 2, hipe_bifs_bytearray_sub_2, wrap_hipe_bifs_bytearray_sub_2},\n {am_hipe_bifs, am_bytearray_update, 3, hipe_bifs_bytearray_update_3, wrap_hipe_bifs_bytearray_update_3},\n {am_hipe_bifs, am_bitarray, 2, hipe_bifs_bitarray_2, wrap_hipe_bifs_bitarray_2},\n {am_hipe_bifs, am_bitarray_sub, 2, hipe_bifs_bitarray_sub_2, wrap_hipe_bifs_bitarray_sub_2},\n {am_hipe_bifs, am_bitarray_update, 3, hipe_bifs_bitarray_update_3, wrap_hipe_bifs_bitarray_update_3},\n {am_hipe_bifs, am_array, 2, hipe_bifs_array_2, wrap_hipe_bifs_array_2},\n {am_hipe_bifs, am_array_length, 1, hipe_bifs_array_length_1, wrap_hipe_bifs_array_length_1},\n {am_hipe_bifs, am_array_sub, 2, hipe_bifs_array_sub_2, wrap_hipe_bifs_array_sub_2},\n {am_hipe_bifs, am_array_update, 3, hipe_bifs_array_update_3, wrap_hipe_bifs_array_update_3},\n {am_hipe_bifs, am_ref, 1, hipe_bifs_ref_1, wrap_hipe_bifs_ref_1},\n {am_hipe_bifs, am_ref_get, 1, hipe_bifs_ref_get_1, wrap_hipe_bifs_ref_get_1},\n {am_hipe_bifs, am_ref_set, 2, hipe_bifs_ref_set_2, wrap_hipe_bifs_ref_set_2},\n {am_hipe_bifs, am_enter_code, 2, hipe_bifs_enter_code_2, wrap_hipe_bifs_enter_code_2},\n {am_hipe_bifs, am_alloc_data, 2, hipe_bifs_alloc_data_2, wrap_hipe_bifs_alloc_data_2},\n {am_hipe_bifs, am_constants_size, 0, hipe_bifs_constants_size_0, wrap_hipe_bifs_constants_size_0},\n {am_hipe_bifs, am_merge_term, 1, hipe_bifs_merge_term_1, wrap_hipe_bifs_merge_term_1},\n {am_hipe_bifs, am_fun_to_address, 1, hipe_bifs_fun_to_address_1, wrap_hipe_bifs_fun_to_address_1},\n {am_hipe_bifs, am_set_native_address, 3, hipe_bifs_set_native_address_3, wrap_hipe_bifs_set_native_address_3},\n {am_hipe_bifs, am_set_funinfo_native_address, 3, hipe_bifs_set_funinfo_native_address_3, wrap_hipe_bifs_set_funinfo_native_address_3},\n {am_hipe_bifs, am_invalidate_funinfo_native_addresses, 1, hipe_bifs_invalidate_funinfo_native_addresses_1, wrap_hipe_bifs_invalidate_funinfo_native_addresses_1},\n {am_hipe_bifs, am_update_code_size, 3, hipe_bifs_update_code_size_3, wrap_hipe_bifs_update_code_size_3},\n {am_hipe_bifs, am_code_size, 1, hipe_bifs_code_size_1, wrap_hipe_bifs_code_size_1},\n {am_hipe_bifs, am_enter_sdesc, 1, hipe_bifs_enter_sdesc_1, wrap_hipe_bifs_enter_sdesc_1},\n {am_hipe_bifs, am_bif_address, 3, hipe_bifs_bif_address_3, wrap_hipe_bifs_bif_address_3},\n {am_hipe_bifs, am_primop_address, 1, hipe_bifs_primop_address_1, wrap_hipe_bifs_primop_address_1},\n {am_hipe_bifs, am_atom_to_word, 1, hipe_bifs_atom_to_word_1, wrap_hipe_bifs_atom_to_word_1},\n {am_hipe_bifs, am_term_to_word, 1, hipe_bifs_term_to_word_1, wrap_hipe_bifs_term_to_word_1},\n {am_hipe_bifs, am_get_fe, 2, hipe_bifs_get_fe_2, wrap_hipe_bifs_get_fe_2},\n {am_hipe_bifs, am_set_native_address_in_fe, 2, hipe_bifs_set_native_address_in_fe_2, wrap_hipe_bifs_set_native_address_in_fe_2},\n {am_hipe_bifs, am_find_na_or_make_stub, 2, hipe_bifs_find_na_or_make_stub_2, wrap_hipe_bifs_find_na_or_make_stub_2},\n {am_hipe_bifs, am_check_crc, 1, hipe_bifs_check_crc_1, wrap_hipe_bifs_check_crc_1},\n {am_hipe_bifs, am_system_crc, 0, hipe_bifs_system_crc_0, wrap_hipe_bifs_system_crc_0},\n {am_hipe_bifs, am_get_rts_param, 1, hipe_bifs_get_rts_param_1, wrap_hipe_bifs_get_rts_param_1},\n {am_hipe_bifs, am_patch_insn, 3, hipe_bifs_patch_insn_3, wrap_hipe_bifs_patch_insn_3},\n {am_hipe_bifs, am_patch_call, 3, hipe_bifs_patch_call_3, wrap_hipe_bifs_patch_call_3},\n {am_hipe_bifs, am_add_ref, 2, hipe_bifs_add_ref_2, wrap_hipe_bifs_add_ref_2},\n {am_hipe_bifs, am_mark_referred_from, 1, hipe_bifs_mark_referred_from_1, wrap_hipe_bifs_mark_referred_from_1},\n {am_hipe_bifs, am_remove_refs_from, 1, hipe_bifs_remove_refs_from_1, wrap_hipe_bifs_remove_refs_from_1},\n {am_hipe_bifs, am_redirect_referred_from, 1, hipe_bifs_redirect_referred_from_1, wrap_hipe_bifs_redirect_referred_from_1},\n {am_hipe_bifs, am_call_count_on, 1, hipe_bifs_call_count_on_1, wrap_hipe_bifs_call_count_on_1},\n {am_hipe_bifs, am_call_count_off, 1, hipe_bifs_call_count_off_1, wrap_hipe_bifs_call_count_off_1},\n {am_hipe_bifs, am_call_count_get, 1, hipe_bifs_call_count_get_1, wrap_hipe_bifs_call_count_get_1},\n {am_hipe_bifs, am_call_count_clear, 1, hipe_bifs_call_count_clear_1, wrap_hipe_bifs_call_count_clear_1},\n {am_hipe_bifs, am_trap_count_get, 0, hipe_bifs_trap_count_get_0, wrap_hipe_bifs_trap_count_get_0},\n {am_hipe_bifs, am_trap_count_clear, 0, hipe_bifs_trap_count_clear_0, wrap_hipe_bifs_trap_count_clear_0},\n {am_hipe_bifs, am_process_info, 0, hipe_bifs_process_info_0, wrap_hipe_bifs_process_info_0},\n {am_hipe_bifs, am_process_info_clear, 0, hipe_bifs_process_info_clear_0, wrap_hipe_bifs_process_info_clear_0},\n {am_hipe_bifs, am_message_info, 0, hipe_bifs_message_info_0, wrap_hipe_bifs_message_info_0},\n {am_hipe_bifs, am_message_info_clear, 0, hipe_bifs_message_info_clear_0, wrap_hipe_bifs_message_info_clear_0},\n {am_hipe_bifs, am_message_sizes, 0, hipe_bifs_message_sizes_0, wrap_hipe_bifs_message_sizes_0},\n {am_hipe_bifs, am_gc_info, 0, hipe_bifs_gc_info_0, wrap_hipe_bifs_gc_info_0},\n {am_hipe_bifs, am_shared_gc_info, 0, hipe_bifs_shared_gc_info_0, wrap_hipe_bifs_shared_gc_info_0},\n {am_hipe_bifs, am_incremental_gc_info, 0, hipe_bifs_incremental_gc_info_0, wrap_hipe_bifs_incremental_gc_info_0},\n {am_hipe_bifs, am_gc_info_clear, 0, hipe_bifs_gc_info_clear_0, wrap_hipe_bifs_gc_info_clear_0},\n {am_hipe_bifs, am_pause_times, 0, hipe_bifs_pause_times_0, wrap_hipe_bifs_pause_times_0},\n {am_hipe_bifs, am_system_timer, 0, hipe_bifs_system_timer_0, wrap_hipe_bifs_system_timer_0},\n {am_hipe_bifs, am_system_timer_clear, 0, hipe_bifs_system_timer_clear_0, wrap_hipe_bifs_system_timer_clear_0},\n {am_hipe_bifs, am_send_timer, 0, hipe_bifs_send_timer_0, wrap_hipe_bifs_send_timer_0},\n {am_hipe_bifs, am_send_timer_clear, 0, hipe_bifs_send_timer_clear_0, wrap_hipe_bifs_send_timer_clear_0},\n {am_hipe_bifs, am_gc_timer, 0, hipe_bifs_gc_timer_0, wrap_hipe_bifs_gc_timer_0},\n {am_hipe_bifs, am_shared_gc_timer, 0, hipe_bifs_shared_gc_timer_0, wrap_hipe_bifs_shared_gc_timer_0},\n {am_hipe_bifs, am_gc_timer_clear, 0, hipe_bifs_gc_timer_clear_0, wrap_hipe_bifs_gc_timer_clear_0},\n {am_hipe_bifs, am_misc_timer, 0, hipe_bifs_misc_timer_0, wrap_hipe_bifs_misc_timer_0},\n {am_hipe_bifs, am_misc_timer_clear, 0, hipe_bifs_misc_timer_clear_0, wrap_hipe_bifs_misc_timer_clear_0},\n {am_hipe_bifs, am_get_hrvtime, 0, hipe_bifs_get_hrvtime_0, wrap_hipe_bifs_get_hrvtime_0},\n {am_hipe_bifs, am_stop_hrvtime, 0, hipe_bifs_stop_hrvtime_0, wrap_hipe_bifs_stop_hrvtime_0},\n {am_hipe_bifs, am_show_estack, 1, hipe_bifs_show_estack_1, wrap_hipe_bifs_show_estack_1},\n {am_hipe_bifs, am_show_heap, 1, hipe_bifs_show_heap_1, wrap_hipe_bifs_show_heap_1},\n {am_hipe_bifs, am_show_nstack, 1, hipe_bifs_show_nstack_1, wrap_hipe_bifs_show_nstack_1},\n {am_hipe_bifs, am_nstack_used_size, 0, hipe_bifs_nstack_used_size_0, wrap_hipe_bifs_nstack_used_size_0},\n {am_hipe_bifs, am_show_pcb, 1, hipe_bifs_show_pcb_1, wrap_hipe_bifs_show_pcb_1},\n {am_hipe_bifs, am_show_term, 1, hipe_bifs_show_term_1, wrap_hipe_bifs_show_term_1},\n {am_hipe_bifs, am_in_native, 0, hipe_bifs_in_native_0, wrap_hipe_bifs_in_native_0},\n {am_hipe_bifs, am_modeswitch_debug_on, 0, hipe_bifs_modeswitch_debug_on_0, wrap_hipe_bifs_modeswitch_debug_on_0},\n {am_hipe_bifs, am_modeswitch_debug_off, 0, hipe_bifs_modeswitch_debug_off_0, wrap_hipe_bifs_modeswitch_debug_off_0},\n {am_hipe_bifs, am_debug_native_called, 2, hipe_bifs_debug_native_called_2, wrap_hipe_bifs_debug_native_called_2},\n {am_hipe_bifs, am_llvm_fix_pinned_regs, 0, hipe_bifs_llvm_fix_pinned_regs_0, wrap_hipe_bifs_llvm_fix_pinned_regs_0},\n {am_hipe_bifs, am_write_u64, 2, hipe_bifs_write_u64_2, wrap_hipe_bifs_write_u64_2},\n};\n\n"
},
{
"alpha_fraction": 0.4991679787635803,
"alphanum_fraction": 0.5127114653587341,
"avg_line_length": 43.790889739990234,
"blob_id": "23f5e80054c2510f20b4a4792cca8382bb6693b3",
"content_id": "09b4dd624cfda3de62b6198de1a1f730ccb31494",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21634,
"license_type": "no_license",
"max_line_length": 481,
"num_lines": 483,
"path": "/bin/pussh",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python \n###########################################################\n# PuSSH - Pythonized ubiquitous SSH # Version 2.2 #########\n###########################################################\n# an Open Source script originally made at CERN (2003) ####\n###########################################################\n# by A. Doicin #######################\n###########################################################\n# This program is free software; you can redistribute it \n# and/or modify it under the terms of the GNU General \n# Public License as published by the Free Software \n# Foundation; either version 2 of the License, or (at your \n# option) any later version.\n###########################################################\n# http://pussh.sourceforge.net\n###########################################################\n\nimport os\nimport sys\nimport getopt\nimport select\nimport string\n\n###########################################################\n# Function \"Usage\" - self-explanatory really ... and also \n# a decent synopsis of script functionality.\n###########################################################\n\ndef Usage():\n \"\"\"\n PUSSH is \"Pythonic Ubiquitous SSH\" - a big SSH wrapper consisting of two Python modules (at this time one is called explicitly by the other, although this may change in future versions) - for usage on networks with many/multiple hosts, ideally wherein SSH is configured with Kerberos, RSA/DSA keys, or SSH-Agent in such a way as to avoid any actual password authentication on the command line. Using PuSSH, you can send the same command via SSH to a range of machines of any size.\n\n ---\n\n PUSSH Usage:\n\n pussh [-h] [-s] [-r] [-f <filename>] [-p <parallel>] [-P <port>] [-l <userid>] [-t <timeout value in seconds>] <target> <commands>\n\n -h print this helpful message\n\n -p parallel - numeric argument is equal to number of machines on which command \n\t\t\t\tshould execute in parallel. If NOT specified, this option will be SET by \n\t\t\t\tdefault, and the size of the \"parallel slice\" is set to a finely tuned 42. \n\n -P a numerical value indicating the TCP port on which the target machine's SSHD \n\t\t\t\tserver is (or should be) listening. If NOT specified, this value is SET \n\t\t\t\tto 22 by default. If you're targetting large ranges of machines in a \n\t\t\t\tspeculative fashion and don't want to wait too long, use the -t <timeout>\n\t\t\t\toption in conjunction with this. \n\n -l <user> send command as user <user> ; if NOT specified, this option is SET to the user \n\t\t\t\twho owns the current shell.\n\n -r send command as root (a lazy way of specifying -l root ... see above).\n\n -t <timeout value in seconds> time after which a command times out for any particular reason; if NOT \n\t\t\t\tspecified, this option is SET to 60 seconds by default.\n\n -f <filename> file contains list of target-hosts, each on one row of the file.\n\n -s execute a command over a range of machines in SEQUENCE (PARALLEL execution is \n the default). This can be useful when a password is necessary on one or more \n machines in a range or list for some reason.\n\n --prefix prefix each line of output with 'hostname:' \n (note: this option is rather crudely implemented and so output may not always \n\t\t\t\tbe 100% satisfactory - please feel free to voice your complaints).\n \n <target(s)> target machine(s) in the following possible formats:\n\n machine - single target\n machine[01-10] - range delimited by numeric(-numeric) suffixes in \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t square brackets\n machine[01,05,10] - range delimited by numeric(,numeric) suffixes \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t separated by commas\n machine[01-05,07,09-10] - any combination or permutation of the above 2\n\n ---\n\n Example 1: pussh -r machine[01-10,12,15-20] uptime \n\n Example 2: pussh machine[01-10,12,15-20] \"uname -a; uptime\"\n\n Example 3: pussh -t 10 -f hostlist.txt \"uname -a; uptime; ls -l | grep stuff\"\n \n Example 4: pussh -t 10 -P 55 -f hostlist -r \"uname -a; uptime; ls -l | grep stuff\"\n\"\"\"\n\n###########################################################\n# More practical Function definitions start here ... \n###########################################################\ndef InvalidTargetting():\n print \"PUSSH(001): Invalid host targetting - do try try again.\"\n os._exit(1)\n###########################################################\ndef test_for_numeric(var): \n try: var = int(var)\n except ValueError:\n print \"PUSSH(001): Non-numerical variable:\", var, \"in timeout value (-t xx), range suffix, or parallel group spec (-p xx).\"\n os._exit(1)\n return\n###########################################################\ndef DeterminePath(login):\n progpath = os.path.dirname(sys.argv[0])\n return progpath\n###########################################################\ndef optionizer():\n \"\"\"PUSSH options specification:\n \n pussh [-h] [-s] [-r] [-f <filename>] [-P <sshd TCP port>] [-l <userid>] [-t <timeout value in seconds>] <target> <commands>\"\"\"\n\n unique_case = 0 # by default, we don't expect a scenario wherein the only option is -s\n # note: this entire function could be improved by turning it into a dictionary\n # ... too many \"special cases\" may not be good programming form, but for now, what the heck.\n \n if loginspec == 1 and timeoutspec == 0 and parallel == 1: \n opty = \"-P \", port, \"-l \", login\n # print \"loginspec is 1, timeoutspec is 0, and parallel is 1\"\n elif loginspec == 1 and timeoutspec == 0 and parallel == 0: \n opty = \"-P \", port, \"-l \", login, \" -s \"\n # print \"loginspec is 1, timeoutspec is 0, and parallel is 0\"\n elif loginspec == 1 and timeoutspec == 1 and parallel == 1: \n opty = \"-P \", port, \"-l \", login, \" -t \", timevar\n # print \"loginspec is 1, timeoutspec is 1, and parallel is 1\"\n elif loginspec == 1 and timeoutspec == 1 and parallel == 0: \n opty = \"-P \", port, \"-l \", login, \" -t \", timevar, \" -s \"\n # print \"loginspec is 1, timeoutspec is 1, and parallel is 0\"\n elif loginspec == 0 and timeoutspec == 1 and parallel == 1: \n opty = \"-P \", port, \"-t \", timevar\n # print \"loginspec is 0, timeoutspec is 1, and parallel is 1\"\n elif loginspec == 0 and timeoutspec == 1 and parallel == 0: \n opty = \"-P \", port, \"-t \", timevar, \" -s \"\n # print \"loginspec is 0, timeoutspec is 1, and parallel is 0\"\n elif loginspec == 0 and timeoutspec == 0 and parallel == 0:\n # print \"loginspec is 0, timeoutspec is 0, and parallel is 0\"\n opty = opto = \"-s\"\n unique_case = 1\n else: \n # print \"apparently nothing, but our port is ...\", port\n opty = \"-P \", port\n if unique_case == 0: opto = string.join(opty,' ') # heavily crucial\n return opto, unique_case\n############################################################\ndef range_by_dash(x,y):\n list = range(x,y)\n return list\n############################################################\ndef RangeCruncher(target):\n lsbp = string.find(target, \"[\")\n if lsbp == 0: InvalidTargetting()\n target_prefix = string.join(target[:lsbp],'')\n # print \"target prefix is\", target_prefix\n target_suffix = string.join(target[lsbp+1:target_length-1],'')\n # print \"target suffix is\", target_suffix\n if len(target_suffix) < 3: InvalidTargetting()\n target_list = []\n dashcount = string.count(target_suffix,\"-\")\n commacount = string.count(target_suffix,\",\")\n if dashcount == commacount == 0: InvalidTargetting()\n # check for commas \",\", dashes \"-\" and stuff ...\n if string.count(target_suffix,\",\")>0:\n suffix_list = string.split(target_suffix,\",\")\n for arg in suffix_list:\n ranger = 0\n # print \"testing\", arg\n if string.count(arg,\"-\") == 1:\n range_limits = string.split(arg,\"-\")\n for i in range_limits:\n test_for_numeric(i)\n if range_limits[0] < range_limits[1] and len(range_limits[0]) == len(range_limits[1]):\n ranger = 1\n lenny = len(range_limits[0])\n for limit in range_limits: test_for_numeric(limit)\n lowerlimit = int(range_limits[0])\n upperlimit = int(range_limits[1])\n else: InvalidTargetting()\n elif string.count(arg,\"-\")>1:\n print \"PUSSH(001): ... range is invalid on target component(s) ...\", arg\n InvalidTargetting()\n else:\n test_for_numeric(arg)\n # print arg\n lenny = len(arg)\n next_target_list = [int(arg)]\n target_list = target_list + next_target_list\n if ranger == 1:\n # print \"range aii is\", range_by_dash(lowerlimit, upperlimit+1)\n next_target_list = range_by_dash(lowerlimit, upperlimit+1)\n target_list = target_list + next_target_list\n elif string.count(target_suffix,\"-\")==1 and string.count(target_suffix,\",\")==0:\n range_limits = string.split(target_suffix,\"-\")\n for limit in range_limits: test_for_numeric(limit)\n if range_limits[0] < range_limits[1] and len(range_limits[0]) == len(range_limits[1]):\n lenny = len(range_limits[0])\n lowerlimit = int(range_limits[0])\n lenny = len(range_limits[0])\n # print \"lower limit on singular range is\", lowerlimit\n upperlimit = int(range_limits[1])\n # print \"upper limit on singular range is\", upperlimit\n for i in range_limits:\n # print \"testing\", i\n test_for_numeric(i)\n # print \"singular range is\", range_by_dash(lowerlimit, upperlimit+1)\n next_target_list = range_by_dash(lowerlimit, upperlimit+1)\n target_list = target_list + next_target_list\n else:\n print \"PUSSH(001): ... invalid range construction ...\"\n InvalidTargetting()\n\n elif string.count(target_suffix,\"-\")>1 and string.count(target_suffix,\",\") == 0: InvalidTargetting()\n\n hostlist = []\n for host in target_list:\n host = target_prefix + string.zfill(host,lenny)\n # print string.zfill(host,lenny)\n hostlist.insert(len(hostlist), host) # hostlist is finally ready\n return hostlist # ... return to sender ...\n############################################################\ndef keyboard_interrupt():\n print \"PUSSH(001): <<<<< _Clean_Keyboard_Interrupt_ >>>>>\"\n os._exit(1)\n############################################################\ndef sequentialCommand(host):\n letsgo = '%s/brussh.py %s %s %s 2>&1' % (progpath, opto, host, payload)\n try:\n os.system('echo %s:' % (host))\n os.system(letsgo)\n except KeyboardInterrupt: print keyboard_interrupt()\n############################################################\ndef parallelCommand(host):\n while prefix == 1: \n return \"%s/brussh.py %s %s %s 2>&1 | sed 's/^/%s: /'\" % (progpath, opto, host, payload, host)\n else: return \"echo %s: ; %s/brussh.py %s %s %s 2>&1\" % (host, progpath, opto, host, payload)\n############################################################\ndef readout():\n while 1:\n try:\n # Could put a timeout here if we had something else to do ...\n readable,writable,errors=select.select(readPipes,[],[])\n for p in readable:\n print p.read().rstrip()\n readPipes.remove(p)\n # os.wait() # Don't want zombies\n if len(readPipes)==0: break\n except KeyboardInterrupt: print keyboard_interrupt()\n############################################################\ndef SliceyNicey(hostlist,step,originalstep,SliceItNice): # parallel = 1 ... easy life, or so you might think ...\n stepsize = int(originalstep)\n hostlistlen = int(len(hostlist))\n if hostlistlen > stepsize:\n SliceItNice = 1\n slices, remainder = divmod(hostlistlen,stepsize)\n\n ### Firstly loop through each 'slice of however big the slice is'\n\n for i in range(slices):\n # print \"step is now\", hostlist[step:step+stepsize]\n for host in hostlist[step:step+stepsize]: readPipes.append(os.popen(parallelCommand(host)))\n readout()\n step = step + stepsize\n\n ### and then through the remainder, if there is one.\n\n if SliceItNice == 1 and remainder == 0: os._exit(0)\n elif SliceItNice == 1 and remainder > 0:\n for host in hostlist[step:]: readPipes.append(os.popen(parallelCommand(host)))\n readout()\n\n ### If target list is less than whatever the default slice-size is, fire \n ### off commands to entire target list in parallel.\n\n elif SliceItNice == 0:\n for host in hostlist: readPipes.append(os.popen(parallelCommand(host)))\n readout()\n return\n\n##################### END of FUNCTIONs #####################\n\n################### Start of MAIN PROGRAM ##################\n\nif __name__ == \"__main__\":\n############################################################\n# Declaration of global variables and defaults\n############################################################\n\n timeoutspec = 0 # this means the default timeout will\n # kick in on \"brush\" which is 60 secs\n # i.e. assumes timeout is not\n # specified on the command line\n\n loginspec = 0 # this means the default user will\n # kick in on \"brussh\" which is the\n # current user i.e. assumes login is\n # not specified on the command line\n \n login = os.environ['LOGNAME'] # as per above\n\n port = \"22\"\n\n progpath = DeterminePath(login)\n\n # a few defaults to start, concerned with parallelism ######\n\n parallel = 1 # execute commands in parallel (0 will\n # mean sequential processing)\n\n sequence = 0 # means the same as above. but in reverse\n\n readPipes = [] # empty \"piperack\" to start\n\n SliceItNice = 0 # no need to slice, i.e. range or list\n # of hosts does not exceed 40 or \n # specified range via option -p with argument \n\n prefix = 0 # do not prefix output with 'hostname: '\n\n step = 0 # initialize\n originalstep = 42 # default no. of machines to \n # execute parallel commands on\n\n####### otherwise, assume nothing ... #####################\n\n target_is_singular = target_is_range = target_is_file = 0\n\n############################################################\n# Command line options pre-amble ...\n############################################################\n try: \n options, arguments = getopt.getopt(sys.argv[1:],'P:l:t:p:f:rsh',['port','login','timeout','parallel','file','root','sequence','help','prefix','prefixitright'])\n except getopt.error:\n print Usage.__doc__\n os._exit(1)\n\n for opt in options:\n if opt[0] == '-t':\n timeoutspec = 1\n timevar = opt[1]\n test_for_numeric(timevar)\n if int(timevar) < 301 and int(timevar) > 0: timeout_value = int(timevar)\n else:\n print \"Invalid timeout value - timeout must be\" \n print \"greater than or equal to 1 second, and\" \n print \"less than or equal to 300 seconds.\"\n print \"Type pussh -h <ENTER> for help.\"\n os._exit(1)\n\n if opt[0] == '-f':\n target_is_file = 1\n file = opt[1]\n \n if opt[0] == '-s': \n parallel = 0\n sequence = 1\n \n if opt[0] == '-p':\n stepspec = parallel = 1\n size_of_slice = opt[1]\n test_for_numeric(size_of_slice)\n originalstep = size_of_slice \n\n if opt[0] == '-P':\n portspec = 1\n port = opt[1]\n\n if opt[0] == '-r':\n loginspec = 1\n login = \"root\"\n # print \"login now=\", login\n\n if opt[0] == '-l':\n loginspec = 1\n login = opt[1]\n\n if opt[0] == '--prefix': \n arglist = sys.argv[1:]\n arglist.remove('--prefix')\n arglist = string.join(arglist)\n os.system('%s/pussh --prefixitright %s 2>&1 | sort -k 1 -n' % (progpath,arglist)) \n os._exit(0) \n \n if opt[0] == '--prefixitright': \n prefix =1\n \n if opt[0] == '-h': \n print Usage.__doc__\n os._exit(1)\n\n if len(arguments) < 1 and target_is_file:\n print \"PUSSH(001) Error: not enough command to send to hostlist in file \\\"\" + file + \"\\\" - you need at least 1 command. Type pussh -h <ENTER> for help.\"\n os._exit(1)\n\n if len(arguments) < 2 and target_is_file == 0:\n print \"PUSSH(001) Error: not enough command to send to host - you need at least 1 command. Type pussh -h <ENTER> for help.\"\n os._exit(1)\n\n else: pass\n\n if (parallel == sequence) or (parallel == sequence): \n print \"PUSSH(001) Error: Options are incorrect - you cannot specify parallel and sequential processing concurrently.\"\n print optionizer.__doc__\n os._exit(1)\n\n### and of options preamble, and start of 'what is the nature of the target?' pre-amble ##############################\n\n try: target = arguments[0]\n except IndexError:\n print \"PUSSH(001) Error: no target specified.\"\n os._exit(1)\n\n # assume no square bracketed suffix i.e. no multi-targetting \n\n target_length = len(target)\n\n # find no. of left/right square brackets or \"lsbs\" and \"rsbs\" ... there can be only 1 of each ...\n # if we find some, then there's a range of targets rather than a single machine target ...\n # which means target_is_range = 1\n\n lsbs = string.count(target,\"[\")\n rsbs = string.count(target,\"]\")\n\n if lsbs == rsbs == 1 and target[target_length-1] == \"]\": target_is_range = 1\n \n elif lsbs == rsbs == 0: target_is_singular = 1 # note: this could also mean a single file containing a\n # list of target hosts ... \n else: InvalidTargetting()\n\n#######################################################################\n### where the action really begins - and ends ... ######\n#######################################################################\n\n if target_is_range: \n \n hostlist = RangeCruncher(target)\n\n # print \"target list is\", target_list\n unique_case = 0\n opto = optionizer()[0]\n payload = '\\\"' + string.join(arguments[1:]) + '\\\"'\n\n if parallel == 0 and target_is_range:\n for host in hostlist: sequentialCommand(host) # no parallelism, as per option -s for sequential op\n os._exit(0)\n\n # else ...\n\n ### If target list exceeds slice size, target list into groups of\n ### \"slice-size\", so we only ever fire off that many ssh commands\n ### at one time. \n\n SliceyNicey(hostlist,step,originalstep,SliceItNice) # parallelism kicks in\n\n####################################################################### \n elif target_is_singular == 1 and target_is_file == 0: # i.e. target is NOT a file ... but a SINGLE host\n host = target\n payload = '\\\"' + string.join(arguments[1:]) + '\\\"'\n\n opto = optionizer()[0] # sort out these troublesome options ...\n if parallel == 0:\n sequentialCommand(host) # no parallelism, as per option -s for sequential op\n os._exit(0)\n\n # else\n\n readPipes.append(os.popen(parallelCommand(host))) # parallelism kicks in\n readout()\n\n#######################################################################\n elif target_is_singular and target_is_file: # there can only be a single file, with 1 host per line\n hostlist = []\n f = open(file)\n hostlist = f.read().splitlines()\n payload = '\\\"' + string.join(arguments[0:]) + '\\\"'\n opto = optionizer()[0]\n \n if optionizer()[1]: # by implication, parallel = 0 but unique_case also 1 (see optionizer()) ...\n for host in hostlist: sequentialCommand(host) # ... something to tidy up here ? ... wtf =)\n os._exit(0)\n \n if optionizer()[1] == parallel == 0: # parallel = 0, but unique case = 0, so other options are specified also ...\n for host in hostlist: sequentialCommand(host)\n os._exit(0)\n\n else: SliceyNicey(hostlist,step,originalstep,SliceItNice) # parallel = 1 ...\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 24,
"blob_id": "1aa3fcd01f1a6fd9b86018c406a9b0356e18bf94",
"content_id": "4271473aebe6bcb131ceb6ca23fdce075829a099",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 1,
"path": "/.pryrc",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/Users/z/.yadr/irb/pryrc"
},
{
"alpha_fraction": 0.4285714328289032,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 13,
"blob_id": "e380f3a72d0bc5cba4b8f8593182ea05c31c5790",
"content_id": "aa1af219901f92b301e5a7eef5735c8fa8174191",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 14,
"license_type": "permissive",
"max_line_length": 13,
"num_lines": 1,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib/ssl/vsn.mk",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "SSL_VSN = 7.2\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 40,
"blob_id": "047e473f1b5da759e45ac82305d344ff252781a3",
"content_id": "7d454b3c86a5ad7ba7817d0b2f68b29cfa98c6c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 1,
"path": "/.zlogin",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/Users/z/.yadr/zsh/prezto/runcoms/zlogin"
},
{
"alpha_fraction": 0.6803662776947021,
"alphanum_fraction": 0.7606319785118103,
"avg_line_length": 35.793575286865234,
"blob_id": "2ad28d881bd549b254c03d1a88fc59312edc7c65",
"content_id": "ad65f60b3eda490f408152666632fa7da9277871",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 37787,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 1027,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/erl_atom_table.h",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/*\n * Warning: Do not edit this file. It was automatically\n * generated by 'make_tables' on Thu Dec 31 02:09:59 2015.\n */\n\n#ifndef __ERL_ATOM_TABLE_H__\n#define __ERL_ATOM_TABLE_H__\nextern char* erl_atom_names[];\n\n#define am_false make_atom(0)\n#define am_true make_atom(1)\n#define am_Underscore make_atom(2)\n#define am_Noname make_atom(3)\n#define am_EOT make_atom(4)\n#define am_Cookie make_atom(5)\n#define am_fun make_atom(6)\n#define am_infinity make_atom(7)\n#define am_timeout make_atom(8)\n#define am_normal make_atom(9)\n#define am_call make_atom(10)\n#define am_return make_atom(11)\n#define am_throw make_atom(12)\n#define am_error make_atom(13)\n#define am_exit make_atom(14)\n#define am_undefined make_atom(15)\n#define am_nocatch make_atom(16)\n#define am_undefined_function make_atom(17)\n#define am_undefined_lambda make_atom(18)\n#define am_DOWN make_atom(19)\n#define am_UP make_atom(20)\n#define am_EXIT make_atom(21)\n#define am_aborted make_atom(22)\n#define am_abs_path make_atom(23)\n#define am_absoluteURI make_atom(24)\n#define am_ac make_atom(25)\n#define am_accessor make_atom(26)\n#define am_active make_atom(27)\n#define am_all make_atom(28)\n#define am_all_but_first make_atom(29)\n#define am_all_names make_atom(30)\n#define am_alloc_info make_atom(31)\n#define am_alloc_sizes make_atom(32)\n#define am_allocated make_atom(33)\n#define am_allocated_areas make_atom(34)\n#define am_allocator make_atom(35)\n#define am_allocator_sizes make_atom(36)\n#define am_alloc_util_allocators make_atom(37)\n#define am_allow_gc make_atom(38)\n#define am_allow_passive_connect make_atom(39)\n#define am_already_loaded make_atom(40)\n#define am_amd64 make_atom(41)\n#define am_anchored make_atom(42)\n#define am_and make_atom(43)\n#define am_andalso make_atom(44)\n#define am_andthen make_atom(45)\n#define am_any make_atom(46)\n#define am_anycrlf make_atom(47)\n#define am_apply make_atom(48)\n#define am_args make_atom(49)\n#define am_arg0 make_atom(50)\n#define am_arity make_atom(51)\n#define am_asn1 make_atom(52)\n#define am_async make_atom(53)\n#define am_asynchronous make_atom(54)\n#define am_atom make_atom(55)\n#define am_atom_used make_atom(56)\n#define am_attributes make_atom(57)\n#define am_await_port_send_result make_atom(58)\n#define am_await_proc_exit make_atom(59)\n#define am_await_result make_atom(60)\n#define am_await_sched_wall_time_modifications make_atom(61)\n#define am_awaiting_load make_atom(62)\n#define am_awaiting_unload make_atom(63)\n#define am_backtrace make_atom(64)\n#define am_backtrace_depth make_atom(65)\n#define am_badarg make_atom(66)\n#define am_badarith make_atom(67)\n#define am_badarity make_atom(68)\n#define am_badfile make_atom(69)\n#define am_badfun make_atom(70)\n#define am_badkey make_atom(71)\n#define am_badmap make_atom(72)\n#define am_badmatch make_atom(73)\n#define am_badsig make_atom(74)\n#define am_bag make_atom(75)\n#define am_band make_atom(76)\n#define am_big make_atom(77)\n#define am_bif_return_trap make_atom(78)\n#define am_bif_timer_server make_atom(79)\n#define am_binary make_atom(80)\n#define am_binary_bin_to_list_trap make_atom(81)\n#define am_binary_copy_trap make_atom(82)\n#define am_binary_longest_prefix_trap make_atom(83)\n#define am_binary_longest_suffix_trap make_atom(84)\n#define am_binary_match_trap make_atom(85)\n#define am_binary_matches_trap make_atom(86)\n#define am_binary_to_list_continue make_atom(87)\n#define am_binary_to_term_trap make_atom(88)\n#define am_block make_atom(89)\n#define am_blocked make_atom(90)\n#define am_bm make_atom(91)\n#define am_bnot make_atom(92)\n#define am_bor make_atom(93)\n#define am_bxor make_atom(94)\n#define am_break_ignored make_atom(95)\n#define am_breakpoint make_atom(96)\n#define am_bsl make_atom(97)\n#define am_bsr make_atom(98)\n#define am_bsr_anycrlf make_atom(99)\n#define am_bsr_unicode make_atom(100)\n#define am_build_type make_atom(101)\n#define am_busy_dist_port make_atom(102)\n#define am_busy_port make_atom(103)\n#define am_call_count make_atom(104)\n#define am_call_time make_atom(105)\n#define am_caller make_atom(106)\n#define am_capture make_atom(107)\n#define am_case_clause make_atom(108)\n#define am_caseless make_atom(109)\n#define am_catchlevel make_atom(110)\n#define am_cd make_atom(111)\n#define am_cdr make_atom(112)\n#define am_cflags make_atom(113)\n#define am_CHANGE make_atom(114)\n#define am_characters_to_binary_int make_atom(115)\n#define am_characters_to_list_int make_atom(116)\n#define am_clear make_atom(117)\n#define am_clock_service make_atom(118)\n#define am_close make_atom(119)\n#define am_closed make_atom(120)\n#define am_code make_atom(121)\n#define am_command make_atom(122)\n#define am_compact make_atom(123)\n#define am_compat_rel make_atom(124)\n#define am_compile make_atom(125)\n#define am_compressed make_atom(126)\n#define am_config_h make_atom(127)\n#define am_convert_time_unit make_atom(128)\n#define am_connect make_atom(129)\n#define am_connected make_atom(130)\n#define am_connection_closed make_atom(131)\n#define am_cons make_atom(132)\n#define am_const make_atom(133)\n#define am_context_switches make_atom(134)\n#define am_control make_atom(135)\n#define am_copy make_atom(136)\n#define am_cpu make_atom(137)\n#define am_cpu_timestamp make_atom(138)\n#define am_cr make_atom(139)\n#define am_crlf make_atom(140)\n#define am_creation make_atom(141)\n#define am_current_function make_atom(142)\n#define am_current_location make_atom(143)\n#define am_current_stacktrace make_atom(144)\n#define am_data make_atom(145)\n#define am_debug_flags make_atom(146)\n#define am_decimals make_atom(147)\n#define am_delay_trap make_atom(148)\n#define am_dexit make_atom(149)\n#define am_depth make_atom(150)\n#define am_dgroup_leader make_atom(151)\n#define am_dictionary make_atom(152)\n#define am_dirty_cpu_schedulers_online make_atom(153)\n#define am_disable_trace make_atom(154)\n#define am_disabled make_atom(155)\n#define am_display_items make_atom(156)\n#define am_dist make_atom(157)\n#define am_dist_cmd make_atom(158)\n#define am_Div make_atom(159)\n#define am_div make_atom(160)\n#define am_dlink make_atom(161)\n#define am_dmonitor_node make_atom(162)\n#define am_dmonitor_p make_atom(163)\n#define am_DollarDollar make_atom(164)\n#define am_DollarUnderscore make_atom(165)\n#define am_dollar_endonly make_atom(166)\n#define am_dotall make_atom(167)\n#define am_driver make_atom(168)\n#define am_driver_options make_atom(169)\n#define am_dsend make_atom(170)\n#define am_dsend_continue_trap make_atom(171)\n#define am_dunlink make_atom(172)\n#define am_duplicate_bag make_atom(173)\n#define am_dupnames make_atom(174)\n#define am_elib_malloc make_atom(175)\n#define am_emulator make_atom(176)\n#define am_enable_trace make_atom(177)\n#define am_enabled make_atom(178)\n#define am_endian make_atom(179)\n#define am_env make_atom(180)\n#define am_eof make_atom(181)\n#define am_eol make_atom(182)\n#define am_exception_from make_atom(183)\n#define am_exception_trace make_atom(184)\n#define am_extended make_atom(185)\n#define am_Eq make_atom(186)\n#define am_Eqeq make_atom(187)\n#define am_erlang make_atom(188)\n#define am_ERROR make_atom(189)\n#define am_error_handler make_atom(190)\n#define am_error_logger make_atom(191)\n#define am_erts_internal make_atom(192)\n#define am_ets make_atom(193)\n#define am_ETS_TRANSFER make_atom(194)\n#define am_event make_atom(195)\n#define am_exact_reductions make_atom(196)\n#define am_exclusive make_atom(197)\n#define am_exit_status make_atom(198)\n#define am_existing make_atom(199)\n#define am_exiting make_atom(200)\n#define am_exports make_atom(201)\n#define am_external make_atom(202)\n#define am_fcgi make_atom(203)\n#define am_fd make_atom(204)\n#define am_first make_atom(205)\n#define am_firstline make_atom(206)\n#define am_flags make_atom(207)\n#define am_flush make_atom(208)\n#define am_flush_monitor_messages make_atom(209)\n#define am_force make_atom(210)\n#define am_format_cpu_topology make_atom(211)\n#define am_free make_atom(212)\n#define am_fullsweep_after make_atom(213)\n#define am_fullsweep_if_old_binaries make_atom(214)\n#define am_function make_atom(215)\n#define am_functions make_atom(216)\n#define am_function_clause make_atom(217)\n#define am_garbage_collecting make_atom(218)\n#define am_garbage_collection make_atom(219)\n#define am_gc_end make_atom(220)\n#define am_gc_start make_atom(221)\n#define am_Ge make_atom(222)\n#define am_generational make_atom(223)\n#define am_get_data make_atom(224)\n#define am_get_seq_token make_atom(225)\n#define am_get_tcw make_atom(226)\n#define am_getenv make_atom(227)\n#define am_gather_gc_info_result make_atom(228)\n#define am_gather_io_bytes make_atom(229)\n#define am_gather_sched_wall_time_result make_atom(230)\n#define am_getting_linked make_atom(231)\n#define am_getting_unlinked make_atom(232)\n#define am_global make_atom(233)\n#define am_Gt make_atom(234)\n#define am_grun make_atom(235)\n#define am_group_leader make_atom(236)\n#define am_have_dt_utag make_atom(237)\n#define am_heap_block_size make_atom(238)\n#define am_heap_size make_atom(239)\n#define am_heap_sizes make_atom(240)\n#define am_heap_type make_atom(241)\n#define am_heart_port make_atom(242)\n#define am_heir make_atom(243)\n#define am_hidden make_atom(244)\n#define am_hide make_atom(245)\n#define am_high make_atom(246)\n#define am_hipe_architecture make_atom(247)\n#define am_http make_atom(248)\n#define am_httph make_atom(249)\n#define am_https make_atom(250)\n#define am_http_response make_atom(251)\n#define am_http_request make_atom(252)\n#define am_http_header make_atom(253)\n#define am_http_eoh make_atom(254)\n#define am_http_error make_atom(255)\n#define am_http_bin make_atom(256)\n#define am_httph_bin make_atom(257)\n#define am_id make_atom(258)\n#define am_if_clause make_atom(259)\n#define am_ignore make_atom(260)\n#define am_in make_atom(261)\n#define am_in_exiting make_atom(262)\n#define am_inactive make_atom(263)\n#define am_incomplete make_atom(264)\n#define am_inconsistent make_atom(265)\n#define am_index make_atom(266)\n#define am_info make_atom(267)\n#define am_info_msg make_atom(268)\n#define am_initial_call make_atom(269)\n#define am_input make_atom(270)\n#define am_internal make_atom(271)\n#define am_internal_error make_atom(272)\n#define am_internal_status make_atom(273)\n#define am_instruction_counts make_atom(274)\n#define am_invalid make_atom(275)\n#define am_is_constant make_atom(276)\n#define am_is_seq_trace make_atom(277)\n#define am_io make_atom(278)\n#define am_keypos make_atom(279)\n#define am_kill make_atom(280)\n#define am_killed make_atom(281)\n#define am_kill_ports make_atom(282)\n#define am_known make_atom(283)\n#define am_label make_atom(284)\n#define am_large_heap make_atom(285)\n#define am_last_calls make_atom(286)\n#define am_latin1 make_atom(287)\n#define am_ldflags make_atom(288)\n#define am_Le make_atom(289)\n#define am_lf make_atom(290)\n#define am_line make_atom(291)\n#define am_line_delimiter make_atom(292)\n#define am_line_length make_atom(293)\n#define am_linked_in_driver make_atom(294)\n#define am_links make_atom(295)\n#define am_list make_atom(296)\n#define am_list_to_binary_continue make_atom(297)\n#define am_little make_atom(298)\n#define am_loaded make_atom(299)\n#define am_load_cancelled make_atom(300)\n#define am_load_failure make_atom(301)\n#define am_local make_atom(302)\n#define am_long_gc make_atom(303)\n#define am_long_schedule make_atom(304)\n#define am_low make_atom(305)\n#define am_Lt make_atom(306)\n#define am_machine make_atom(307)\n#define am_match make_atom(308)\n#define am_match_limit make_atom(309)\n#define am_match_limit_recursion make_atom(310)\n#define am_match_spec make_atom(311)\n#define am_max make_atom(312)\n#define am_maximum make_atom(313)\n#define am_max_tables make_atom(314)\n#define am_max_processes make_atom(315)\n#define am_mbuf_size make_atom(316)\n#define am_md5 make_atom(317)\n#define am_memory make_atom(318)\n#define am_memory_internal make_atom(319)\n#define am_memory_types make_atom(320)\n#define am_message make_atom(321)\n#define am_message_binary make_atom(322)\n#define am_message_queue_len make_atom(323)\n#define am_messages make_atom(324)\n#define am_merge_trap make_atom(325)\n#define am_meta make_atom(326)\n#define am_meta_match_spec make_atom(327)\n#define am_micro_seconds make_atom(328)\n#define am_milli_seconds make_atom(329)\n#define am_min_heap_size make_atom(330)\n#define am_min_bin_vheap_size make_atom(331)\n#define am_minor_version make_atom(332)\n#define am_Minus make_atom(333)\n#define am_module make_atom(334)\n#define am_module_info make_atom(335)\n#define am_monitored_by make_atom(336)\n#define am_monitor make_atom(337)\n#define am_monitor_nodes make_atom(338)\n#define am_monitors make_atom(339)\n#define am_monotonic make_atom(340)\n#define am_more make_atom(341)\n#define am_multi_scheduling make_atom(342)\n#define am_multiline make_atom(343)\n#define am_nano_seconds make_atom(344)\n#define am_name make_atom(345)\n#define am_named_table make_atom(346)\n#define am_namelist make_atom(347)\n#define am_native make_atom(348)\n#define am_native_addresses make_atom(349)\n#define am_Neq make_atom(350)\n#define am_Neqeq make_atom(351)\n#define am_net_kernel make_atom(352)\n#define am_net_kernel_terminated make_atom(353)\n#define am_never_utf make_atom(354)\n#define am_new make_atom(355)\n#define am_new_index make_atom(356)\n#define am_new_uniq make_atom(357)\n#define am_newline make_atom(358)\n#define am_next make_atom(359)\n#define am_no make_atom(360)\n#define am_nomatch make_atom(361)\n#define am_none make_atom(362)\n#define am_no_auto_capture make_atom(363)\n#define am_noconnect make_atom(364)\n#define am_noconnection make_atom(365)\n#define am_nocookie make_atom(366)\n#define am_node make_atom(367)\n#define am_node_type make_atom(368)\n#define am_nodedown make_atom(369)\n#define am_nodedown_reason make_atom(370)\n#define am_nodeup make_atom(371)\n#define am_noeol make_atom(372)\n#define am_nofile make_atom(373)\n#define am_noproc make_atom(374)\n#define am_nosuspend make_atom(375)\n#define am_no_float make_atom(376)\n#define am_no_integer make_atom(377)\n#define am_no_network make_atom(378)\n#define am_no_start_optimize make_atom(379)\n#define am_not make_atom(380)\n#define am_not_a_list make_atom(381)\n#define am_not_loaded make_atom(382)\n#define am_not_loaded_by_this_process make_atom(383)\n#define am_not_pending make_atom(384)\n#define am_not_purged make_atom(385)\n#define am_notalive make_atom(386)\n#define am_notbol make_atom(387)\n#define am_noteol make_atom(388)\n#define am_notempty make_atom(389)\n#define am_notempty_atstart make_atom(390)\n#define am_notify make_atom(391)\n#define am_notsup make_atom(392)\n#define am_nouse_stdio make_atom(393)\n#define am_objects make_atom(394)\n#define am_offset make_atom(395)\n#define am_ok make_atom(396)\n#define am_old_heap_block_size make_atom(397)\n#define am_old_heap_size make_atom(398)\n#define am_on_load make_atom(399)\n#define am_open make_atom(400)\n#define am_open_error make_atom(401)\n#define am_opt make_atom(402)\n#define am_or make_atom(403)\n#define am_ordered_set make_atom(404)\n#define am_orelse make_atom(405)\n#define am_os_pid make_atom(406)\n#define am_os_type make_atom(407)\n#define am_os_version make_atom(408)\n#define am_ose_bg_proc make_atom(409)\n#define am_ose_int_proc make_atom(410)\n#define am_ose_phantom make_atom(411)\n#define am_ose_pri_proc make_atom(412)\n#define am_ose_process_prio make_atom(413)\n#define am_ose_process_type make_atom(414)\n#define am_ose_ti_proc make_atom(415)\n#define am_out make_atom(416)\n#define am_out_exited make_atom(417)\n#define am_out_exiting make_atom(418)\n#define am_output make_atom(419)\n#define am_overlapped_io make_atom(420)\n#define am_owner make_atom(421)\n#define am_packet make_atom(422)\n#define am_packet_size make_atom(423)\n#define am_parallelism make_atom(424)\n#define am_Plus make_atom(425)\n#define am_pause make_atom(426)\n#define am_pending make_atom(427)\n#define am_pending_driver make_atom(428)\n#define am_pending_process make_atom(429)\n#define am_pending_reload make_atom(430)\n#define am_permanent make_atom(431)\n#define am_pid make_atom(432)\n#define am_port make_atom(433)\n#define am_ports make_atom(434)\n#define am_port_count make_atom(435)\n#define am_port_limit make_atom(436)\n#define am_port_op make_atom(437)\n#define am_positive make_atom(438)\n#define am_print make_atom(439)\n#define am_priority make_atom(440)\n#define am_private make_atom(441)\n#define am_process make_atom(442)\n#define am_processes make_atom(443)\n#define am_processes_used make_atom(444)\n#define am_process_count make_atom(445)\n#define am_process_display make_atom(446)\n#define am_process_limit make_atom(447)\n#define am_process_dump make_atom(448)\n#define am_procs make_atom(449)\n#define am_proc_sig make_atom(450)\n#define am_profile make_atom(451)\n#define am_protected make_atom(452)\n#define am_protection make_atom(453)\n#define am_ptab_list_continue make_atom(454)\n#define am_public make_atom(455)\n#define am_purify make_atom(456)\n#define am_quantify make_atom(457)\n#define am_queue_size make_atom(458)\n#define am_raw make_atom(459)\n#define am_re make_atom(460)\n#define am_re_pattern make_atom(461)\n#define am_re_run_trap make_atom(462)\n#define am_read_concurrency make_atom(463)\n#define am_ready_input make_atom(464)\n#define am_ready_output make_atom(465)\n#define am_ready_async make_atom(466)\n#define am_reason make_atom(467)\n#define am_receive make_atom(468)\n#define am_recent_size make_atom(469)\n#define am_reductions make_atom(470)\n#define am_refc make_atom(471)\n#define am_register make_atom(472)\n#define am_registered_name make_atom(473)\n#define am_reload make_atom(474)\n#define am_rem make_atom(475)\n#define am_report_errors make_atom(476)\n#define am_reset make_atom(477)\n#define am_restart make_atom(478)\n#define am_return_from make_atom(479)\n#define am_return_to make_atom(480)\n#define am_return_trace make_atom(481)\n#define am_run_queue make_atom(482)\n#define am_runnable make_atom(483)\n#define am_runnable_ports make_atom(484)\n#define am_runnable_procs make_atom(485)\n#define am_running make_atom(486)\n#define am_running_ports make_atom(487)\n#define am_running_procs make_atom(488)\n#define am_runtime make_atom(489)\n#define am_safe make_atom(490)\n#define am_save_calls make_atom(491)\n#define am_scheduler make_atom(492)\n#define am_scheduler_id make_atom(493)\n#define am_schedulers_online make_atom(494)\n#define am_scheme make_atom(495)\n#define am_scientific make_atom(496)\n#define am_scope make_atom(497)\n#define am_seconds make_atom(498)\n#define am_sensitive make_atom(499)\n#define am_sequential_tracer make_atom(500)\n#define am_sequential_trace_token make_atom(501)\n#define am_serial make_atom(502)\n#define am_set make_atom(503)\n#define am_set_cpu_topology make_atom(504)\n#define am_set_data make_atom(505)\n#define am_set_on_first_link make_atom(506)\n#define am_set_on_first_spawn make_atom(507)\n#define am_set_on_link make_atom(508)\n#define am_set_on_spawn make_atom(509)\n#define am_set_seq_token make_atom(510)\n#define am_set_tcw make_atom(511)\n#define am_set_tcw_fake make_atom(512)\n#define am_separate make_atom(513)\n#define am_shared make_atom(514)\n#define am_silent make_atom(515)\n#define am_size make_atom(516)\n#define am_sl_alloc make_atom(517)\n#define am_spawn_executable make_atom(518)\n#define am_spawn_driver make_atom(519)\n#define am_ssl_tls make_atom(520)\n#define am_stack_size make_atom(521)\n#define am_start make_atom(522)\n#define am_status make_atom(523)\n#define am_static make_atom(524)\n#define am_stderr_to_stdout make_atom(525)\n#define am_stop make_atom(526)\n#define am_stream make_atom(527)\n#define am_sunrm make_atom(528)\n#define am_suspend make_atom(529)\n#define am_suspended make_atom(530)\n#define am_suspending make_atom(531)\n#define am_sys_misc make_atom(532)\n#define am_system make_atom(533)\n#define am_system_error make_atom(534)\n#define am_system_limit make_atom(535)\n#define am_system_version make_atom(536)\n#define am_system_architecture make_atom(537)\n#define am_SYSTEM make_atom(538)\n#define am_table make_atom(539)\n#define am_term_to_binary_trap make_atom(540)\n#define am_this make_atom(541)\n#define am_thread_pool_size make_atom(542)\n#define am_threads make_atom(543)\n#define am_time_offset make_atom(544)\n#define am_timeout_value make_atom(545)\n#define am_Times make_atom(546)\n#define am_timestamp make_atom(547)\n#define am_total make_atom(548)\n#define am_total_heap_size make_atom(549)\n#define am_tpkt make_atom(550)\n#define am_trace make_atom(551)\n#define am_trace_ts make_atom(552)\n#define am_traced make_atom(553)\n#define am_trace_control_word make_atom(554)\n#define am_tracer make_atom(555)\n#define am_trap_exit make_atom(556)\n#define am_try_clause make_atom(557)\n#define am_tuple make_atom(558)\n#define am_type make_atom(559)\n#define am_ucompile make_atom(560)\n#define am_ucp make_atom(561)\n#define am_undef make_atom(562)\n#define am_ungreedy make_atom(563)\n#define am_unicode make_atom(564)\n#define am_unregister make_atom(565)\n#define am_urun make_atom(566)\n#define am_use_stdio make_atom(567)\n#define am_used make_atom(568)\n#define am_utf8 make_atom(569)\n#define am_unblock make_atom(570)\n#define am_uniq make_atom(571)\n#define am_unless_suspending make_atom(572)\n#define am_unloaded make_atom(573)\n#define am_unloading make_atom(574)\n#define am_unloaded_only make_atom(575)\n#define am_unload_cancelled make_atom(576)\n#define am_value make_atom(577)\n#define am_values make_atom(578)\n#define am_version make_atom(579)\n#define am_visible make_atom(580)\n#define am_waiting make_atom(581)\n#define am_wall_clock make_atom(582)\n#define am_warning make_atom(583)\n#define am_warning_msg make_atom(584)\n#define am_scheduler_wall_time make_atom(585)\n#define am_wordsize make_atom(586)\n#define am_write_concurrency make_atom(587)\n#define am_xor make_atom(588)\n#define am_x86 make_atom(589)\n#define am_yes make_atom(590)\n#define am_yield make_atom(591)\n#define am_abs make_atom(592)\n#define am_adler32 make_atom(593)\n#define am_adler32_combine make_atom(594)\n#define am_atom_to_list make_atom(595)\n#define am_binary_to_list make_atom(596)\n#define am_binary_to_term make_atom(597)\n#define am_crc32 make_atom(598)\n#define am_crc32_combine make_atom(599)\n#define am_date make_atom(600)\n#define am_delete_module make_atom(601)\n#define am_display make_atom(602)\n#define am_display_string make_atom(603)\n#define am_display_nl make_atom(604)\n#define am_element make_atom(605)\n#define am_erase make_atom(606)\n#define am_external_size make_atom(607)\n#define am_float make_atom(608)\n#define am_float_to_list make_atom(609)\n#define am_fun_info make_atom(610)\n#define am_garbage_collect make_atom(611)\n#define am_get make_atom(612)\n#define am_get_keys make_atom(613)\n#define am_halt make_atom(614)\n#define am_phash make_atom(615)\n#define am_phash2 make_atom(616)\n#define am_hd make_atom(617)\n#define am_integer_to_list make_atom(618)\n#define am_is_alive make_atom(619)\n#define am_length make_atom(620)\n#define am_link make_atom(621)\n#define am_list_to_atom make_atom(622)\n#define am_list_to_binary make_atom(623)\n#define am_list_to_float make_atom(624)\n#define am_list_to_integer make_atom(625)\n#define am_list_to_pid make_atom(626)\n#define am_list_to_tuple make_atom(627)\n#define am_localtime make_atom(628)\n#define am_localtime_to_universaltime make_atom(629)\n#define am_make_ref make_atom(630)\n#define am_unique_integer make_atom(631)\n#define am_md5_init make_atom(632)\n#define am_md5_update make_atom(633)\n#define am_md5_final make_atom(634)\n#define am_module_loaded make_atom(635)\n#define am_function_exported make_atom(636)\n#define am_monitor_node make_atom(637)\n#define am_nodes make_atom(638)\n#define am_now make_atom(639)\n#define am_monotonic_time make_atom(640)\n#define am_system_time make_atom(641)\n#define am_open_port make_atom(642)\n#define am_pid_to_list make_atom(643)\n#define am_pre_loaded make_atom(644)\n#define am_process_flag make_atom(645)\n#define am_process_info make_atom(646)\n#define am_purge_module make_atom(647)\n#define am_put make_atom(648)\n#define am_registered make_atom(649)\n#define am_round make_atom(650)\n#define am_self make_atom(651)\n#define am_setelement make_atom(652)\n#define am_spawn make_atom(653)\n#define am_spawn_link make_atom(654)\n#define am_split_binary make_atom(655)\n#define am_statistics make_atom(656)\n#define am_term_to_binary make_atom(657)\n#define am_time make_atom(658)\n#define am_tl make_atom(659)\n#define am_trunc make_atom(660)\n#define am_tuple_to_list make_atom(661)\n#define am_universaltime make_atom(662)\n#define am_universaltime_to_localtime make_atom(663)\n#define am_unlink make_atom(664)\n#define am_whereis make_atom(665)\n#define am_spawn_opt make_atom(666)\n#define am_setnode make_atom(667)\n#define am_dist_exit make_atom(668)\n#define am_port_info make_atom(669)\n#define am_port_call make_atom(670)\n#define am_port_command make_atom(671)\n#define am_port_control make_atom(672)\n#define am_port_close make_atom(673)\n#define am_port_connect make_atom(674)\n#define am_request_system_task make_atom(675)\n#define am_check_process_code make_atom(676)\n#define am_map_to_tuple_keys make_atom(677)\n#define am_map_type make_atom(678)\n#define am_map_hashmap_children make_atom(679)\n#define am_time_unit make_atom(680)\n#define am_is_system_process make_atom(681)\n#define am_port_set_data make_atom(682)\n#define am_port_get_data make_atom(683)\n#define am_trace_pattern make_atom(684)\n#define am_trace_info make_atom(685)\n#define am_trace_delivered make_atom(686)\n#define am_seq_trace make_atom(687)\n#define am_seq_trace_info make_atom(688)\n#define am_seq_trace_print make_atom(689)\n#define am_suspend_process make_atom(690)\n#define am_resume_process make_atom(691)\n#define am_bump_reductions make_atom(692)\n#define am_math make_atom(693)\n#define am_cos make_atom(694)\n#define am_cosh make_atom(695)\n#define am_sin make_atom(696)\n#define am_sinh make_atom(697)\n#define am_tan make_atom(698)\n#define am_tanh make_atom(699)\n#define am_acos make_atom(700)\n#define am_acosh make_atom(701)\n#define am_asin make_atom(702)\n#define am_asinh make_atom(703)\n#define am_atan make_atom(704)\n#define am_atanh make_atom(705)\n#define am_erf make_atom(706)\n#define am_erfc make_atom(707)\n#define am_exp make_atom(708)\n#define am_log make_atom(709)\n#define am_log2 make_atom(710)\n#define am_log10 make_atom(711)\n#define am_sqrt make_atom(712)\n#define am_atan2 make_atom(713)\n#define am_pow make_atom(714)\n#define am_start_timer make_atom(715)\n#define am_send_after make_atom(716)\n#define am_cancel_timer make_atom(717)\n#define am_read_timer make_atom(718)\n#define am_make_tuple make_atom(719)\n#define am_append_element make_atom(720)\n#define am_system_flag make_atom(721)\n#define am_system_info make_atom(722)\n#define am_system_monitor make_atom(723)\n#define am_system_profile make_atom(724)\n#define am_ref_to_list make_atom(725)\n#define am_port_to_list make_atom(726)\n#define am_fun_to_list make_atom(727)\n#define am_demonitor make_atom(728)\n#define am_is_process_alive make_atom(729)\n#define am_raise make_atom(730)\n#define am_get_stacktrace make_atom(731)\n#define am_is_builtin make_atom(732)\n#define am__AtomAlias26 make_atom(733)\n#define am_send make_atom(734)\n#define am__AtomAlias27 make_atom(735)\n#define am_append make_atom(736)\n#define am__AtomAlias28 make_atom(737)\n#define am_subtract make_atom(738)\n#define am_is_atom make_atom(739)\n#define am_is_list make_atom(740)\n#define am_is_tuple make_atom(741)\n#define am_is_float make_atom(742)\n#define am_is_integer make_atom(743)\n#define am_is_number make_atom(744)\n#define am_is_pid make_atom(745)\n#define am_is_port make_atom(746)\n#define am_is_reference make_atom(747)\n#define am_is_binary make_atom(748)\n#define am_is_function make_atom(749)\n#define am_is_record make_atom(750)\n#define am_match_spec_test make_atom(751)\n#define am_delete make_atom(752)\n#define am_delete_all_objects make_atom(753)\n#define am_delete_object make_atom(754)\n#define am_is_compiled_ms make_atom(755)\n#define am_lookup make_atom(756)\n#define am_lookup_element make_atom(757)\n#define am_last make_atom(758)\n#define am_match_object make_atom(759)\n#define am_member make_atom(760)\n#define am_prev make_atom(761)\n#define am_insert make_atom(762)\n#define am_insert_new make_atom(763)\n#define am_rename make_atom(764)\n#define am_safe_fixtable make_atom(765)\n#define am_slot make_atom(766)\n#define am_update_counter make_atom(767)\n#define am_select make_atom(768)\n#define am_select_count make_atom(769)\n#define am_select_reverse make_atom(770)\n#define am_select_delete make_atom(771)\n#define am_match_spec_compile make_atom(772)\n#define am_match_spec_run_r make_atom(773)\n#define am_os make_atom(774)\n#define am_putenv make_atom(775)\n#define am_getpid make_atom(776)\n#define am_erl_ddll make_atom(777)\n#define am_try_load make_atom(778)\n#define am_try_unload make_atom(779)\n#define am_loaded_drivers make_atom(780)\n#define am_format_error_int make_atom(781)\n#define am_run make_atom(782)\n#define am_lists make_atom(783)\n#define am_reverse make_atom(784)\n#define am_keymember make_atom(785)\n#define am_keysearch make_atom(786)\n#define am_keyfind make_atom(787)\n#define am_erts_debug make_atom(788)\n#define am_disassemble make_atom(789)\n#define am_same make_atom(790)\n#define am_flat_size make_atom(791)\n#define am_get_internal_state make_atom(792)\n#define am_set_internal_state make_atom(793)\n#define am_dist_ext_to_term make_atom(794)\n#define am_instructions make_atom(795)\n#define am_dump_monitors make_atom(796)\n#define am_dump_links make_atom(797)\n#define am_lock_counters make_atom(798)\n#define am_get_chunk make_atom(799)\n#define am_module_md5 make_atom(800)\n#define am_make_stub_module make_atom(801)\n#define am_is_module_native make_atom(802)\n#define am_hibernate make_atom(803)\n#define am_warning_map make_atom(804)\n#define am_get_module_info make_atom(805)\n#define am_is_boolean make_atom(806)\n#define am_string make_atom(807)\n#define am_to_integer make_atom(808)\n#define am_to_float make_atom(809)\n#define am_make_fun make_atom(810)\n#define am_iolist_size make_atom(811)\n#define am_iolist_to_binary make_atom(812)\n#define am_list_to_existing_atom make_atom(813)\n#define am_is_bitstring make_atom(814)\n#define am_tuple_size make_atom(815)\n#define am_byte_size make_atom(816)\n#define am_bit_size make_atom(817)\n#define am_list_to_bitstring make_atom(818)\n#define am_bitstring_to_list make_atom(819)\n#define am_update_element make_atom(820)\n#define am_decode_packet make_atom(821)\n#define am_characters_to_binary make_atom(822)\n#define am_characters_to_list make_atom(823)\n#define am_bin_is_7bit make_atom(824)\n#define am_atom_to_binary make_atom(825)\n#define am_binary_to_atom make_atom(826)\n#define am_binary_to_existing_atom make_atom(827)\n#define am_dflag_unicode_io make_atom(828)\n#define am_give_away make_atom(829)\n#define am_setopts make_atom(830)\n#define am_load_nif make_atom(831)\n#define am_call_on_load_function make_atom(832)\n#define am_finish_after_on_load make_atom(833)\n#define am_binary_part make_atom(834)\n#define am_compile_pattern make_atom(835)\n#define am_matches make_atom(836)\n#define am_longest_common_prefix make_atom(837)\n#define am_longest_common_suffix make_atom(838)\n#define am_at make_atom(839)\n#define am_part make_atom(840)\n#define am_bin_to_list make_atom(841)\n#define am_list_to_bin make_atom(842)\n#define am_referenced_byte_size make_atom(843)\n#define am_encode_unsigned make_atom(844)\n#define am_decode_unsigned make_atom(845)\n#define am_nif_error make_atom(846)\n#define am_prim_file make_atom(847)\n#define am_internal_name2native make_atom(848)\n#define am_internal_native2name make_atom(849)\n#define am_internal_normalize_utf8 make_atom(850)\n#define am_is_translatable make_atom(851)\n#define am_file make_atom(852)\n#define am_native_name_encoding make_atom(853)\n#define am_check_old_code make_atom(854)\n#define am_universaltime_to_posixtime make_atom(855)\n#define am_posixtime_to_universaltime make_atom(856)\n#define am_dt_put_tag make_atom(857)\n#define am_dt_get_tag make_atom(858)\n#define am_dt_get_tag_data make_atom(859)\n#define am_dt_spread_tag make_atom(860)\n#define am_dt_restore_tag make_atom(861)\n#define am_dt_prepend_vm_tag_data make_atom(862)\n#define am_dt_append_vm_tag_data make_atom(863)\n#define am_prepare_loading make_atom(864)\n#define am_finish_loading make_atom(865)\n#define am_insert_element make_atom(866)\n#define am_delete_element make_atom(867)\n#define am_binary_to_integer make_atom(868)\n#define am_integer_to_binary make_atom(869)\n#define am_float_to_binary make_atom(870)\n#define am_binary_to_float make_atom(871)\n#define am_printable_range make_atom(872)\n#define am_unsetenv make_atom(873)\n#define am_inspect make_atom(874)\n#define am_is_map make_atom(875)\n#define am_map_size make_atom(876)\n#define am_maps make_atom(877)\n#define am_to_list make_atom(878)\n#define am_find make_atom(879)\n#define am_from_list make_atom(880)\n#define am_is_key make_atom(881)\n#define am_keys make_atom(882)\n#define am_merge make_atom(883)\n#define am_remove make_atom(884)\n#define am_update make_atom(885)\n#define am_cmp_term make_atom(886)\n#define am_take make_atom(887)\n#define am_fun_info_mfa make_atom(888)\n#define am_map_info make_atom(889)\n#define am_hash make_atom(890)\n#define am_hipe_bifs make_atom(891)\n#define am_write_u8 make_atom(892)\n#define am_write_u32 make_atom(893)\n#define am_bytearray make_atom(894)\n#define am_bytearray_sub make_atom(895)\n#define am_bytearray_update make_atom(896)\n#define am_bitarray make_atom(897)\n#define am_bitarray_sub make_atom(898)\n#define am_bitarray_update make_atom(899)\n#define am_array make_atom(900)\n#define am_array_length make_atom(901)\n#define am_array_sub make_atom(902)\n#define am_array_update make_atom(903)\n#define am_ref make_atom(904)\n#define am_ref_get make_atom(905)\n#define am_ref_set make_atom(906)\n#define am_enter_code make_atom(907)\n#define am_alloc_data make_atom(908)\n#define am_constants_size make_atom(909)\n#define am_merge_term make_atom(910)\n#define am_fun_to_address make_atom(911)\n#define am_set_native_address make_atom(912)\n#define am_set_funinfo_native_address make_atom(913)\n#define am_invalidate_funinfo_native_addresses make_atom(914)\n#define am_update_code_size make_atom(915)\n#define am_code_size make_atom(916)\n#define am_enter_sdesc make_atom(917)\n#define am_bif_address make_atom(918)\n#define am_primop_address make_atom(919)\n#define am_atom_to_word make_atom(920)\n#define am_term_to_word make_atom(921)\n#define am_get_fe make_atom(922)\n#define am_set_native_address_in_fe make_atom(923)\n#define am_find_na_or_make_stub make_atom(924)\n#define am_check_crc make_atom(925)\n#define am_system_crc make_atom(926)\n#define am_get_rts_param make_atom(927)\n#define am_patch_insn make_atom(928)\n#define am_patch_call make_atom(929)\n#define am_add_ref make_atom(930)\n#define am_mark_referred_from make_atom(931)\n#define am_remove_refs_from make_atom(932)\n#define am_redirect_referred_from make_atom(933)\n#define am_load_mfa make_atom(934)\n#define am_remote make_atom(935)\n#define am_c_const make_atom(936)\n#define am_closure make_atom(937)\n#define am_constant make_atom(938)\n#define am_x86_abs_pcrel make_atom(939)\n#define am_load_fe make_atom(940)\n#define am_suspend_msg make_atom(941)\n#define am_suspend_msg_timeout make_atom(942)\n#define am_suspend_0 make_atom(943)\n#define am_gc_1 make_atom(944)\n#define am_hipe_apply make_atom(945)\n#define am_rethrow make_atom(946)\n#define am_nonclosure_address make_atom(947)\n#define am_atomic_inc make_atom(948)\n#define am_clear_timeout make_atom(949)\n#define am_check_get_msg make_atom(950)\n#define am_select_msg make_atom(951)\n#define am_set_timeout make_atom(952)\n#define am_cmp_2 make_atom(953)\n#define am_op_exact_eqeq_2 make_atom(954)\n#define am_conv_big_to_float make_atom(955)\n#define am_fclearerror_error make_atom(956)\n#define am_bs_put_big_integer make_atom(957)\n#define am_bs_put_small_float make_atom(958)\n#define am_bs_put_bits make_atom(959)\n#define am_bs_allocate make_atom(960)\n#define am_bs_get_integer_2 make_atom(961)\n#define am_bs_get_float_2 make_atom(962)\n#define am_bs_get_binary_2 make_atom(963)\n#define am_bs_reallocate make_atom(964)\n#define am_bs_utf8_size make_atom(965)\n#define am_bs_put_utf8 make_atom(966)\n#define am_bs_get_utf8 make_atom(967)\n#define am_bs_utf16_size make_atom(968)\n#define am_bs_put_utf16be make_atom(969)\n#define am_bs_put_utf16le make_atom(970)\n#define am_bs_get_utf16 make_atom(971)\n#define am_bs_validate_unicode make_atom(972)\n#define am_bs_validate_unicode_retract make_atom(973)\n#define am_emulate_fpe make_atom(974)\n#define am_emasculate_binary make_atom(975)\n#define am_call_count_on make_atom(976)\n#define am_call_count_off make_atom(977)\n#define am_call_count_get make_atom(978)\n#define am_call_count_clear make_atom(979)\n#define am_trap_count_get make_atom(980)\n#define am_trap_count_clear make_atom(981)\n#define am_process_info_clear make_atom(982)\n#define am_message_info make_atom(983)\n#define am_message_info_clear make_atom(984)\n#define am_message_sizes make_atom(985)\n#define am_gc_info make_atom(986)\n#define am_shared_gc_info make_atom(987)\n#define am_incremental_gc_info make_atom(988)\n#define am_gc_info_clear make_atom(989)\n#define am_pause_times make_atom(990)\n#define am_system_timer make_atom(991)\n#define am_system_timer_clear make_atom(992)\n#define am_send_timer make_atom(993)\n#define am_send_timer_clear make_atom(994)\n#define am_gc_timer make_atom(995)\n#define am_shared_gc_timer make_atom(996)\n#define am_gc_timer_clear make_atom(997)\n#define am_misc_timer make_atom(998)\n#define am_misc_timer_clear make_atom(999)\n#define am_get_hrvtime make_atom(1000)\n#define am_stop_hrvtime make_atom(1001)\n#define am_show_estack make_atom(1002)\n#define am_show_heap make_atom(1003)\n#define am_show_nstack make_atom(1004)\n#define am_nstack_used_size make_atom(1005)\n#define am_show_pcb make_atom(1006)\n#define am_show_term make_atom(1007)\n#define am_in_native make_atom(1008)\n#define am_modeswitch_debug_on make_atom(1009)\n#define am_modeswitch_debug_off make_atom(1010)\n#define am_debug_native_called make_atom(1011)\n#define am_llvm_fix_pinned_regs make_atom(1012)\n#define am_handle_fp_exception make_atom(1013)\n#define am_inc_stack_0 make_atom(1014)\n#define am_sse2_fnegate_mask make_atom(1015)\n#define am_write_u64 make_atom(1016)\n#endif\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.675000011920929,
"avg_line_length": 19,
"blob_id": "235aee699d1d4d0d3197151c704b60ea5d020dfe",
"content_id": "176e10ef94f042e21362f18ae7ab09e0f483e69b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 2,
"path": "/bin/tcp-forward",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n~/bin/tcpxd 3333 mume.org 443\n"
},
{
"alpha_fraction": 0.5843806266784668,
"alphanum_fraction": 0.6060143709182739,
"avg_line_length": 28.786096572875977,
"blob_id": "1464d97827946f83c572cbe1a9e9078d968f954f",
"content_id": "cbb39afcbf959728253424a88fa6aa7b5f9069a1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 11140,
"license_type": "permissive",
"max_line_length": 327,
"num_lines": 374,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib/config.status",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#! /bin/sh\n# Generated by configure.\n# Run this file to recreate the current configuration.\n# Compiler output produced by configure, useful for debugging\n# configure, is in config.log if it exists.\n\ndebug=false\nac_cs_recheck=false\nac_cs_silent=false\nSHELL=${CONFIG_SHELL-/bin/sh}\n## --------------------- ##\n## M4sh Initialization. ##\n## --------------------- ##\n\n# Be Bourne compatible\nif test -n \"${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then\n emulate sh\n NULLCMD=:\n # Zsh 3.x and 4.x performs word splitting on ${1+\"$@\"}, which\n # is contrary to our usage. Disable this feature.\n alias -g '${1+\"$@\"}'='\"$@\"'\nelif test -n \"${BASH_VERSION+set}\" && (set -o posix) >/dev/null 2>&1; then\n set -o posix\nfi\nDUALCASE=1; export DUALCASE # for MKS sh\n\n# Support unset when possible.\nif ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then\n as_unset=unset\nelse\n as_unset=false\nfi\n\n\n# Work around bugs in pre-3.0 UWIN ksh.\n$as_unset ENV MAIL MAILPATH\nPS1='$ '\nPS2='> '\nPS4='+ '\n\n# NLS nuisances.\nfor as_var in \\\n LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \\\n LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \\\n LC_TELEPHONE LC_TIME\ndo\n if (set +x; test -z \"`(eval $as_var=C; export $as_var) 2>&1`\"); then\n eval $as_var=C; export $as_var\n else\n $as_unset $as_var\n fi\ndone\n\n# Required to use basename.\nif expr a : '\\(a\\)' >/dev/null 2>&1; then\n as_expr=expr\nelse\n as_expr=false\nfi\n\nif (basename /) >/dev/null 2>&1 && test \"X`basename / 2>&1`\" = \"X/\"; then\n as_basename=basename\nelse\n as_basename=false\nfi\n\n\n# Name of the executable.\nas_me=`$as_basename \"$0\" ||\n$as_expr X/\"$0\" : '.*/\\([^/][^/]*\\)/*$' \\| \\\n\t X\"$0\" : 'X\\(//\\)$' \\| \\\n\t X\"$0\" : 'X\\(/\\)$' \\| \\\n\t . : '\\(.\\)' 2>/dev/null ||\necho X/\"$0\" |\n sed '/^.*\\/\\([^/][^/]*\\)\\/*$/{ s//\\1/; q; }\n \t /^X\\/\\(\\/\\/\\)$/{ s//\\1/; q; }\n \t /^X\\/\\(\\/\\).*/{ s//\\1/; q; }\n \t s/.*/./; q'`\n\n\n# PATH needs CR, and LINENO needs CR and PATH.\n# Avoid depending upon Character Ranges.\nas_cr_letters='abcdefghijklmnopqrstuvwxyz'\nas_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nas_cr_Letters=$as_cr_letters$as_cr_LETTERS\nas_cr_digits='0123456789'\nas_cr_alnum=$as_cr_Letters$as_cr_digits\n\n# The user is always right.\nif test \"${PATH_SEPARATOR+set}\" != set; then\n echo \"#! /bin/sh\" >conf$$.sh\n echo \"exit 0\" >>conf$$.sh\n chmod +x conf$$.sh\n if (PATH=\"/nonexistent;.\"; conf$$.sh) >/dev/null 2>&1; then\n PATH_SEPARATOR=';'\n else\n PATH_SEPARATOR=:\n fi\n rm -f conf$$.sh\nfi\n\n\n as_lineno_1=$LINENO\n as_lineno_2=$LINENO\n as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`\n test \"x$as_lineno_1\" != \"x$as_lineno_2\" &&\n test \"x$as_lineno_3\" = \"x$as_lineno_2\" || {\n # Find who we are. Look in the path if we contain no path at all\n # relative or not.\n case $0 in\n *[\\\\/]* ) as_myself=$0 ;;\n *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR\nfor as_dir in $PATH\ndo\n IFS=$as_save_IFS\n test -z \"$as_dir\" && as_dir=.\n test -r \"$as_dir/$0\" && as_myself=$as_dir/$0 && break\ndone\n\n ;;\n esac\n # We did not find ourselves, most probably we were run as `sh COMMAND'\n # in which case we are not to be found in the path.\n if test \"x$as_myself\" = x; then\n as_myself=$0\n fi\n if test ! -f \"$as_myself\"; then\n { { echo \"$as_me:$LINENO: error: cannot find myself; rerun with an absolute path\" >&5\necho \"$as_me: error: cannot find myself; rerun with an absolute path\" >&2;}\n { (exit 1); exit 1; }; }\n fi\n case $CONFIG_SHELL in\n '')\n as_save_IFS=$IFS; IFS=$PATH_SEPARATOR\nfor as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH\ndo\n IFS=$as_save_IFS\n test -z \"$as_dir\" && as_dir=.\n for as_base in sh bash ksh sh5; do\n\t case $as_dir in\n\t /*)\n\t if (\"$as_dir/$as_base\" -c '\n as_lineno_1=$LINENO\n as_lineno_2=$LINENO\n as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`\n test \"x$as_lineno_1\" != \"x$as_lineno_2\" &&\n test \"x$as_lineno_3\" = \"x$as_lineno_2\" ') 2>/dev/null; then\n\t $as_unset BASH_ENV || test \"${BASH_ENV+set}\" != set || { BASH_ENV=; export BASH_ENV; }\n\t $as_unset ENV || test \"${ENV+set}\" != set || { ENV=; export ENV; }\n\t CONFIG_SHELL=$as_dir/$as_base\n\t export CONFIG_SHELL\n\t exec \"$CONFIG_SHELL\" \"$0\" ${1+\"$@\"}\n\t fi;;\n\t esac\n done\ndone\n;;\n esac\n\n # Create $as_me.lineno as a copy of $as_myself, but with $LINENO\n # uniformly replaced by the line number. The first 'sed' inserts a\n # line-number line before each line; the second 'sed' does the real\n # work. The second script uses 'N' to pair each line-number line\n # with the numbered line, and appends trailing '-' during\n # substitution so that $LINENO is not a special case at line end.\n # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the\n # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)\n sed '=' <$as_myself |\n sed '\n N\n s,$,-,\n : loop\n s,^\\(['$as_cr_digits']*\\)\\(.*\\)[$]LINENO\\([^'$as_cr_alnum'_]\\),\\1\\2\\1\\3,\n t loop\n s,-$,,\n s,^['$as_cr_digits']*\\n,,\n ' >$as_me.lineno &&\n chmod +x $as_me.lineno ||\n { { echo \"$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell\" >&5\necho \"$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell\" >&2;}\n { (exit 1); exit 1; }; }\n\n # Don't try to exec as it changes $[0], causing all sort of problems\n # (the dirname of $[0] is not the place where we might find the\n # original and so on. Autoconf is especially sensible to this).\n . ./$as_me.lineno\n # Exit status is that of the last command.\n exit\n}\n\n\ncase `echo \"testing\\c\"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in\n *c*,-n*) ECHO_N= ECHO_C='\n' ECHO_T='\t' ;;\n *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;\n *) ECHO_N= ECHO_C='\\c' ECHO_T= ;;\nesac\n\nif expr a : '\\(a\\)' >/dev/null 2>&1; then\n as_expr=expr\nelse\n as_expr=false\nfi\n\nrm -f conf$$ conf$$.exe conf$$.file\necho >conf$$.file\nif ln -s conf$$.file conf$$ 2>/dev/null; then\n # We could just check for DJGPP; but this test a) works b) is more generic\n # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).\n if test -f conf$$.exe; then\n # Don't use ln at all; we don't have any links\n as_ln_s='cp -p'\n else\n as_ln_s='ln -s'\n fi\nelif ln conf$$.file conf$$ 2>/dev/null; then\n as_ln_s=ln\nelse\n as_ln_s='cp -p'\nfi\nrm -f conf$$ conf$$.exe conf$$.file\n\nif mkdir -p . 2>/dev/null; then\n as_mkdir_p=:\nelse\n test -d ./-p && rmdir ./-p\n as_mkdir_p=false\nfi\n\nas_executable_p=\"test -f\"\n\n# Sed expression to map a string onto a valid CPP name.\nas_tr_cpp=\"eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'\"\n\n# Sed expression to map a string onto a valid variable name.\nas_tr_sh=\"eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'\"\n\n\n# IFS\n# We need space, tab and new line, in precisely that order.\nas_nl='\n'\nIFS=\" \t$as_nl\"\n\n# CDPATH.\n$as_unset CDPATH\n\nexec 6>&1\n\n# Open the log real soon, to keep \\$[0] and so on meaningful, and to\n# report actual input values of CONFIG_FILES etc. instead of their\n# values after options handling. Logging --version etc. is OK.\nexec 5>>config.log\n{\n echo\n sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX\n## Running $as_me. ##\n_ASBOX\n} >&5\ncat >&5 <<_CSEOF\n\nThis file was extended by $as_me, which was\ngenerated by GNU Autoconf 2.59. Invocation command line was\n\n CONFIG_FILES = $CONFIG_FILES\n CONFIG_HEADERS = $CONFIG_HEADERS\n CONFIG_LINKS = $CONFIG_LINKS\n CONFIG_COMMANDS = $CONFIG_COMMANDS\n $ $0 $@\n\n_CSEOF\necho \"on `(hostname || uname -n) 2>/dev/null | sed 1q`\" >&5\necho >&5\n\nac_cs_usage=\"\\\n\\`$as_me' instantiates files from templates according to the\ncurrent configuration.\n\nUsage: $0 [OPTIONS] [FILE]...\n\n -h, --help print this help, then exit\n -V, --version print version number, then exit\n -q, --quiet do not print progress messages\n -d, --debug don't remove temporary files\n --recheck update $as_me by reconfiguring in the same conditions\n\nReport bugs to <[email protected]>.\"\nac_cs_version=\"\\\nconfig.status\nconfigured by /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib/configure, generated by GNU Autoconf 2.59,\n with options \\\"'--prefix=/usr/local' 'CFLAGS=-O0' 'ERL_TOP=/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1' '--cache-file=/dev/null' '--srcdir=/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib'\\\"\n\nCopyright (C) 2003 Free Software Foundation, Inc.\nThis config.status script is free software; the Free Software Foundation\ngives unlimited permission to copy, distribute and modify it.\"\nsrcdir=/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib\n# If no file are specified by the user, then we need to provide default\n# value. By we need to know if files were specified by the user.\nac_need_defaults=:\nwhile test $# != 0\ndo\n case $1 in\n --*=*)\n ac_option=`expr \"x$1\" : 'x\\([^=]*\\)='`\n ac_optarg=`expr \"x$1\" : 'x[^=]*=\\(.*\\)'`\n ac_shift=:\n ;;\n -*)\n ac_option=$1\n ac_optarg=$2\n ac_shift=shift\n ;;\n *) # This is not an option, so the user has probably given explicit\n # arguments.\n ac_option=$1\n ac_need_defaults=false;;\n esac\n\n case $ac_option in\n # Handling of the options.\n -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)\n ac_cs_recheck=: ;;\n --version | --vers* | -V )\n echo \"$ac_cs_version\"; exit 0 ;;\n --he | --h)\n # Conflict between --help and --header\n { { echo \"$as_me:$LINENO: error: ambiguous option: $1\nTry \\`$0 --help' for more information.\" >&5\necho \"$as_me: error: ambiguous option: $1\nTry \\`$0 --help' for more information.\" >&2;}\n { (exit 1); exit 1; }; };;\n --help | --hel | -h )\n echo \"$ac_cs_usage\"; exit 0 ;;\n --debug | --d* | -d )\n debug=: ;;\n --file | --fil | --fi | --f )\n $ac_shift\n CONFIG_FILES=\"$CONFIG_FILES $ac_optarg\"\n ac_need_defaults=false;;\n --header | --heade | --head | --hea )\n $ac_shift\n CONFIG_HEADERS=\"$CONFIG_HEADERS $ac_optarg\"\n ac_need_defaults=false;;\n -q | -quiet | --quiet | --quie | --qui | --qu | --q \\\n | -silent | --silent | --silen | --sile | --sil | --si | --s)\n ac_cs_silent=: ;;\n\n # This is an error.\n -*) { { echo \"$as_me:$LINENO: error: unrecognized option: $1\nTry \\`$0 --help' for more information.\" >&5\necho \"$as_me: error: unrecognized option: $1\nTry \\`$0 --help' for more information.\" >&2;}\n { (exit 1); exit 1; }; } ;;\n\n *) ac_config_targets=\"$ac_config_targets $1\" ;;\n\n esac\n shift\ndone\n\nac_configure_extra_args=\n\nif $ac_cs_silent; then\n exec 6>/dev/null\n ac_configure_extra_args=\"$ac_configure_extra_args --silent\"\nfi\n\nif $ac_cs_recheck; then\n echo \"running /bin/sh /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib/configure \" '--prefix=/usr/local' 'CFLAGS=-O0' 'ERL_TOP=/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1' '--cache-file=/dev/null' '--srcdir=/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib' $ac_configure_extra_args \" --no-create --no-recursion\" >&6\n exec /bin/sh /Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib/configure '--prefix=/usr/local' 'CFLAGS=-O0' 'ERL_TOP=/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1' '--cache-file=/dev/null' '--srcdir=/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/lib' $ac_configure_extra_args --no-create --no-recursion\nfi\n\n\n{ (exit 0); exit 0; }\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 42,
"blob_id": "7e657897964eefc03452e006f73636e4760420f8",
"content_id": "02fb6974b4c94447d32962a4bb8ed5f3828aa5a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 42,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 1,
"path": "/.zprofile",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/Users/z/.yadr/zsh/prezto/runcoms/zprofile"
},
{
"alpha_fraction": 0.4986225962638855,
"alphanum_fraction": 0.6005509495735168,
"avg_line_length": 18.105262756347656,
"blob_id": "3f5d78b48a74b6467eacf60b292aa68ebc7692c8",
"content_id": "3b365da8a600179523f3614b77cb2e89c4c95cbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 363,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 19,
"path": "/bin/cssh",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nHOSTNAME=`echo $@ | sed s/.*@//`\n\nset_bg () {\n osascript -e \"tell application \\\"Terminal\\\" to set background color of window 1 to $1\"\n}\n\non_exit () {\n set_bg \"{0, 0, 0, 50000}\"\n}\ntrap on_exit EXIT\n\ncase $HOSTNAME in\n production1|production2|production3) set_bg \"{45000, 0, 0, 50000}\" ;;\n *) set_bg \"{0, 45000, 0, 50000}\" ;;\nesac\n\n/usr/bin/ssh \"$@\"\n"
},
{
"alpha_fraction": 0.657108724117279,
"alphanum_fraction": 0.7060931921005249,
"avg_line_length": 65.95999908447266,
"blob_id": "a348a702423932d2f2c5a07014b831683ab1098e",
"content_id": "678b5568854fed468008e6fb749e8a22d782bec3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 3348,
"license_type": "permissive",
"max_line_length": 481,
"num_lines": 50,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/make/x86_64-apple-darwin14.5.0/otp_ded.mk",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#-*-makefile-*- ; force emacs to enter makefile-mode\n# ----------------------------------------------------\n# %CopyrightBegin%\n#\n# Copyright Ericsson AB 2009-2013. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# %CopyrightEnd%\n\n# The version.\n#\n# Note that it is important that the version is\n# explicitly expressed here. Some applications need to\n# be able to check this value *before* configure has\n# been run and generated otp_ded.mk\nDED_MK_VSN = 1\n# ----------------------------------------------------\n# Variables needed for building Dynamic Erlang Drivers\n# ----------------------------------------------------\nDED_CC = gcc\nDED_GCC = yes\nDED_LD = gcc\nDED_LDFLAGS = -m64 -bundle -flat_namespace -undefined suppress\nDED__NOWARN_NOTHR_CFLAGS = -O0 -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0 -D_XOPEN_SOURCE -fPIC -fno-common\nDED__NOTHR_CFLAGS = -Wall -Wstrict-prototypes -Wmissing-prototypes -Wdeclaration-after-statement -O0 -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0 -D_XOPEN_SOURCE -fPIC -fno-common\nDED__NOWARN_CFLAGS = -DUSE_THREADS -D_THREAD_SAFE -D_REENTRANT -DPOSIX_THREADS -O0 -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0 -D_XOPEN_SOURCE -fPIC -fno-common\nDED_THR_DEFS = -DUSE_THREADS -D_THREAD_SAFE -D_REENTRANT -DPOSIX_THREADS\nDED_EMU_THR_DEFS = -DUSE_THREADS -D_THREAD_SAFE -D_REENTRANT -DPOSIX_THREADS\nDED_WARN_FLAGS = -Wall -Wstrict-prototypes -Wmissing-prototypes -Wdeclaration-after-statement\nDED_CFLAGS = -Werror=return-type -Wall -Wstrict-prototypes -Wmissing-prototypes -Wdeclaration-after-statement -DUSE_THREADS -D_THREAD_SAFE -D_REENTRANT -DPOSIX_THREADS -O0 -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0 -D_XOPEN_SOURCE -fPIC -fno-common\nDED_STATIC_CFLAGS = -Werror=return-type -Wall -Wstrict-prototypes -Wmissing-prototypes -Wdeclaration-after-statement -DUSE_THREADS -D_THREAD_SAFE -D_REENTRANT -DPOSIX_THREADS -O0 -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/x86_64-apple-darwin14.5.0 -D_XOPEN_SOURCE -fno-common -DSTATIC_ERLANG_NIF -DSTATIC_ERLANG_DRIVER\nDED_LIBS = -lutil -ldl -lm \nDED_EXT = so\nERLANG_OSTYPE = unix\nPRIVDIR = ../priv\nOBJDIR = $(PRIVDIR)/obj/$(TARGET)\nLIBDIR = $(PRIVDIR)/lib/$(TARGET)\nDED_SYS_INCLUDE = -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/beam -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/include -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/include/x86_64-apple-darwin14.5.0 -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/include/internal -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/include/internal/x86_64-apple-darwin14.5.0 -I/Users/z/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/sys/unix\nDED_INCLUDES = $(DED_SYS_INCLUDE)\n"
},
{
"alpha_fraction": 0.6730920672416687,
"alphanum_fraction": 0.7589459419250488,
"avg_line_length": 56.03508758544922,
"blob_id": "9f4772c3370e91b9ceb3b272da29a88e01760142",
"content_id": "9fa73beb7622c0dc60ae3fb56eb99de098bc7bd6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 29259,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 513,
"path": "/.kerl/builds/otp-18.2.1/otp_src_18.2.1/erts/emulator/x86_64-apple-darwin14.5.0/erl_bif_list.h",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "/*\n * Warning: Do not edit this file. It was automatically\n * generated by 'make_tables' on Thu Dec 31 02:09:59 2015.\n */\n\nBIF_LIST(am_erlang,am_abs,1,abs_1,0)\nBIF_LIST(am_erlang,am_adler32,1,adler32_1,1)\nBIF_LIST(am_erlang,am_adler32,2,adler32_2,2)\nBIF_LIST(am_erlang,am_adler32_combine,3,adler32_combine_3,3)\nBIF_LIST(am_erlang,am_apply,3,apply_3,4)\nBIF_LIST(am_erlang,am_atom_to_list,1,atom_to_list_1,5)\nBIF_LIST(am_erlang,am_binary_to_list,1,binary_to_list_1,6)\nBIF_LIST(am_erlang,am_binary_to_list,3,binary_to_list_3,7)\nBIF_LIST(am_erlang,am_binary_to_term,1,binary_to_term_1,8)\nBIF_LIST(am_erlang,am_crc32,1,crc32_1,9)\nBIF_LIST(am_erlang,am_crc32,2,crc32_2,10)\nBIF_LIST(am_erlang,am_crc32_combine,3,crc32_combine_3,11)\nBIF_LIST(am_erlang,am_date,0,date_0,12)\nBIF_LIST(am_erlang,am_delete_module,1,delete_module_1,13)\nBIF_LIST(am_erlang,am_display,1,display_1,14)\nBIF_LIST(am_erlang,am_display_string,1,display_string_1,15)\nBIF_LIST(am_erlang,am_display_nl,0,display_nl_0,16)\nBIF_LIST(am_erlang,am_element,2,element_2,17)\nBIF_LIST(am_erlang,am_erase,0,erase_0,18)\nBIF_LIST(am_erlang,am_erase,1,erase_1,19)\nBIF_LIST(am_erlang,am_exit,1,exit_1,20)\nBIF_LIST(am_erlang,am_exit,2,exit_2,21)\nBIF_LIST(am_erlang,am_external_size,1,external_size_1,22)\nBIF_LIST(am_erlang,am_external_size,2,external_size_2,23)\nBIF_LIST(am_erlang,am_float,1,float_1,24)\nBIF_LIST(am_erlang,am_float_to_list,1,float_to_list_1,25)\nBIF_LIST(am_erlang,am_float_to_list,2,float_to_list_2,26)\nBIF_LIST(am_erlang,am_fun_info,2,fun_info_2,27)\nBIF_LIST(am_erlang,am_garbage_collect,0,garbage_collect_0,28)\nBIF_LIST(am_erlang,am_get,0,get_0,29)\nBIF_LIST(am_erlang,am_get,1,get_1,30)\nBIF_LIST(am_erlang,am_get_keys,1,get_keys_1,31)\nBIF_LIST(am_erlang,am_group_leader,0,group_leader_0,32)\nBIF_LIST(am_erlang,am_group_leader,2,group_leader_2,33)\nBIF_LIST(am_erlang,am_halt,0,halt_0,34)\nBIF_LIST(am_erlang,am_halt,1,halt_1,35)\nBIF_LIST(am_erlang,am_halt,2,halt_2,36)\nBIF_LIST(am_erlang,am_phash,2,phash_2,37)\nBIF_LIST(am_erlang,am_phash2,1,phash2_1,38)\nBIF_LIST(am_erlang,am_phash2,2,phash2_2,39)\nBIF_LIST(am_erlang,am_hd,1,hd_1,40)\nBIF_LIST(am_erlang,am_integer_to_list,1,integer_to_list_1,41)\nBIF_LIST(am_erlang,am_is_alive,0,is_alive_0,42)\nBIF_LIST(am_erlang,am_length,1,length_1,43)\nBIF_LIST(am_erlang,am_link,1,link_1,44)\nBIF_LIST(am_erlang,am_list_to_atom,1,list_to_atom_1,45)\nBIF_LIST(am_erlang,am_list_to_binary,1,list_to_binary_1,46)\nBIF_LIST(am_erlang,am_list_to_float,1,list_to_float_1,47)\nBIF_LIST(am_erlang,am_list_to_integer,1,list_to_integer_1,48)\nBIF_LIST(am_erlang,am_list_to_pid,1,list_to_pid_1,49)\nBIF_LIST(am_erlang,am_list_to_tuple,1,list_to_tuple_1,50)\nBIF_LIST(am_erlang,am_loaded,0,loaded_0,51)\nBIF_LIST(am_erlang,am_localtime,0,localtime_0,52)\nBIF_LIST(am_erlang,am_localtime_to_universaltime,2,localtime_to_universaltime_2,53)\nBIF_LIST(am_erlang,am_make_ref,0,make_ref_0,54)\nBIF_LIST(am_erlang,am_unique_integer,0,unique_integer_0,55)\nBIF_LIST(am_erlang,am_unique_integer,1,unique_integer_1,56)\nBIF_LIST(am_erlang,am_md5,1,md5_1,57)\nBIF_LIST(am_erlang,am_md5_init,0,md5_init_0,58)\nBIF_LIST(am_erlang,am_md5_update,2,md5_update_2,59)\nBIF_LIST(am_erlang,am_md5_final,1,md5_final_1,60)\nBIF_LIST(am_erlang,am_module_loaded,1,module_loaded_1,61)\nBIF_LIST(am_erlang,am_function_exported,3,function_exported_3,62)\nBIF_LIST(am_erlang,am_monitor_node,2,monitor_node_2,63)\nBIF_LIST(am_erlang,am_monitor_node,3,monitor_node_3,64)\nBIF_LIST(am_erlang,am_node,1,node_1,65)\nBIF_LIST(am_erlang,am_node,0,node_0,66)\nBIF_LIST(am_erlang,am_nodes,1,nodes_1,67)\nBIF_LIST(am_erlang,am_now,0,now_0,68)\nBIF_LIST(am_erlang,am_monotonic_time,0,monotonic_time_0,69)\nBIF_LIST(am_erlang,am_monotonic_time,1,monotonic_time_1,70)\nBIF_LIST(am_erlang,am_system_time,0,system_time_0,71)\nBIF_LIST(am_erlang,am_system_time,1,system_time_1,72)\nBIF_LIST(am_erlang,am_time_offset,0,time_offset_0,73)\nBIF_LIST(am_erlang,am_time_offset,1,time_offset_1,74)\nBIF_LIST(am_erlang,am_timestamp,0,timestamp_0,75)\nBIF_LIST(am_erlang,am_open_port,2,open_port_2,76)\nBIF_LIST(am_erlang,am_pid_to_list,1,pid_to_list_1,77)\nBIF_LIST(am_erlang,am_ports,0,ports_0,78)\nBIF_LIST(am_erlang,am_pre_loaded,0,pre_loaded_0,79)\nBIF_LIST(am_erlang,am_process_flag,2,process_flag_2,80)\nBIF_LIST(am_erlang,am_process_flag,3,process_flag_3,81)\nBIF_LIST(am_erlang,am_process_info,1,process_info_1,82)\nBIF_LIST(am_erlang,am_process_info,2,process_info_2,83)\nBIF_LIST(am_erlang,am_processes,0,processes_0,84)\nBIF_LIST(am_erlang,am_purge_module,1,purge_module_1,85)\nBIF_LIST(am_erlang,am_put,2,put_2,86)\nBIF_LIST(am_erlang,am_register,2,register_2,87)\nBIF_LIST(am_erlang,am_registered,0,registered_0,88)\nBIF_LIST(am_erlang,am_round,1,round_1,89)\nBIF_LIST(am_erlang,am_self,0,self_0,90)\nBIF_LIST(am_erlang,am_setelement,3,setelement_3,91)\nBIF_LIST(am_erlang,am_size,1,size_1,92)\nBIF_LIST(am_erlang,am_spawn,3,spawn_3,93)\nBIF_LIST(am_erlang,am_spawn_link,3,spawn_link_3,94)\nBIF_LIST(am_erlang,am_split_binary,2,split_binary_2,95)\nBIF_LIST(am_erlang,am_statistics,1,statistics_1,96)\nBIF_LIST(am_erlang,am_term_to_binary,1,term_to_binary_1,97)\nBIF_LIST(am_erlang,am_term_to_binary,2,term_to_binary_2,98)\nBIF_LIST(am_erlang,am_throw,1,throw_1,99)\nBIF_LIST(am_erlang,am_time,0,time_0,100)\nBIF_LIST(am_erlang,am_tl,1,tl_1,101)\nBIF_LIST(am_erlang,am_trunc,1,trunc_1,102)\nBIF_LIST(am_erlang,am_tuple_to_list,1,tuple_to_list_1,103)\nBIF_LIST(am_erlang,am_universaltime,0,universaltime_0,104)\nBIF_LIST(am_erlang,am_universaltime_to_localtime,1,universaltime_to_localtime_1,105)\nBIF_LIST(am_erlang,am_unlink,1,unlink_1,106)\nBIF_LIST(am_erlang,am_unregister,1,unregister_1,107)\nBIF_LIST(am_erlang,am_whereis,1,whereis_1,108)\nBIF_LIST(am_erlang,am_spawn_opt,1,spawn_opt_1,109)\nBIF_LIST(am_erlang,am_setnode,2,setnode_2,110)\nBIF_LIST(am_erlang,am_setnode,3,setnode_3,111)\nBIF_LIST(am_erlang,am_dist_exit,3,dist_exit_3,112)\nBIF_LIST(am_erts_internal,am_port_info,1,erts_internal_port_info_1,113)\nBIF_LIST(am_erts_internal,am_port_info,2,erts_internal_port_info_2,114)\nBIF_LIST(am_erts_internal,am_port_call,3,erts_internal_port_call_3,115)\nBIF_LIST(am_erts_internal,am_port_command,3,erts_internal_port_command_3,116)\nBIF_LIST(am_erts_internal,am_port_control,3,erts_internal_port_control_3,117)\nBIF_LIST(am_erts_internal,am_port_close,1,erts_internal_port_close_1,118)\nBIF_LIST(am_erts_internal,am_port_connect,2,erts_internal_port_connect_2,119)\nBIF_LIST(am_erts_internal,am_request_system_task,3,erts_internal_request_system_task_3,120)\nBIF_LIST(am_erts_internal,am_check_process_code,2,erts_internal_check_process_code_2,121)\nBIF_LIST(am_erts_internal,am_map_to_tuple_keys,1,erts_internal_map_to_tuple_keys_1,122)\nBIF_LIST(am_erts_internal,am_map_type,1,erts_internal_map_type_1,123)\nBIF_LIST(am_erts_internal,am_map_hashmap_children,1,erts_internal_map_hashmap_children_1,124)\nBIF_LIST(am_erts_internal,am_time_unit,0,erts_internal_time_unit_0,125)\nBIF_LIST(am_erts_internal,am_is_system_process,1,erts_internal_is_system_process_1,126)\nBIF_LIST(am_erlang,am_port_set_data,2,port_set_data_2,127)\nBIF_LIST(am_erlang,am_port_get_data,1,port_get_data_1,128)\nBIF_LIST(am_erlang,am_trace_pattern,2,trace_pattern_2,129)\nBIF_LIST(am_erlang,am_trace_pattern,3,trace_pattern_3,130)\nBIF_LIST(am_erlang,am_trace,3,trace_3,131)\nBIF_LIST(am_erlang,am_trace_info,2,trace_info_2,132)\nBIF_LIST(am_erlang,am_trace_delivered,1,trace_delivered_1,133)\nBIF_LIST(am_erlang,am_seq_trace,2,seq_trace_2,134)\nBIF_LIST(am_erlang,am_seq_trace_info,1,seq_trace_info_1,135)\nBIF_LIST(am_erlang,am_seq_trace_print,1,seq_trace_print_1,136)\nBIF_LIST(am_erlang,am_seq_trace_print,2,seq_trace_print_2,137)\nBIF_LIST(am_erlang,am_suspend_process,2,suspend_process_2,138)\nBIF_LIST(am_erlang,am_resume_process,1,resume_process_1,139)\nBIF_LIST(am_erlang,am_process_display,2,process_display_2,140)\nBIF_LIST(am_erlang,am_bump_reductions,1,bump_reductions_1,141)\nBIF_LIST(am_math,am_cos,1,math_cos_1,142)\nBIF_LIST(am_math,am_cosh,1,math_cosh_1,143)\nBIF_LIST(am_math,am_sin,1,math_sin_1,144)\nBIF_LIST(am_math,am_sinh,1,math_sinh_1,145)\nBIF_LIST(am_math,am_tan,1,math_tan_1,146)\nBIF_LIST(am_math,am_tanh,1,math_tanh_1,147)\nBIF_LIST(am_math,am_acos,1,math_acos_1,148)\nBIF_LIST(am_math,am_acosh,1,math_acosh_1,149)\nBIF_LIST(am_math,am_asin,1,math_asin_1,150)\nBIF_LIST(am_math,am_asinh,1,math_asinh_1,151)\nBIF_LIST(am_math,am_atan,1,math_atan_1,152)\nBIF_LIST(am_math,am_atanh,1,math_atanh_1,153)\nBIF_LIST(am_math,am_erf,1,math_erf_1,154)\nBIF_LIST(am_math,am_erfc,1,math_erfc_1,155)\nBIF_LIST(am_math,am_exp,1,math_exp_1,156)\nBIF_LIST(am_math,am_log,1,math_log_1,157)\nBIF_LIST(am_math,am_log2,1,math_log2_1,158)\nBIF_LIST(am_math,am_log10,1,math_log10_1,159)\nBIF_LIST(am_math,am_sqrt,1,math_sqrt_1,160)\nBIF_LIST(am_math,am_atan2,2,math_atan2_2,161)\nBIF_LIST(am_math,am_pow,2,math_pow_2,162)\nBIF_LIST(am_erlang,am_start_timer,3,start_timer_3,163)\nBIF_LIST(am_erlang,am_start_timer,4,start_timer_4,164)\nBIF_LIST(am_erlang,am_send_after,3,send_after_3,165)\nBIF_LIST(am_erlang,am_send_after,4,send_after_4,166)\nBIF_LIST(am_erlang,am_cancel_timer,1,cancel_timer_1,167)\nBIF_LIST(am_erlang,am_cancel_timer,2,cancel_timer_2,168)\nBIF_LIST(am_erlang,am_read_timer,1,read_timer_1,169)\nBIF_LIST(am_erlang,am_read_timer,2,read_timer_2,170)\nBIF_LIST(am_erlang,am_make_tuple,2,make_tuple_2,171)\nBIF_LIST(am_erlang,am_append_element,2,append_element_2,172)\nBIF_LIST(am_erlang,am_make_tuple,3,make_tuple_3,173)\nBIF_LIST(am_erlang,am_system_flag,2,system_flag_2,174)\nBIF_LIST(am_erlang,am_system_info,1,system_info_1,175)\nBIF_LIST(am_erlang,am_system_monitor,0,system_monitor_0,176)\nBIF_LIST(am_erlang,am_system_monitor,1,system_monitor_1,177)\nBIF_LIST(am_erlang,am_system_monitor,2,system_monitor_2,178)\nBIF_LIST(am_erlang,am_system_profile,2,system_profile_2,179)\nBIF_LIST(am_erlang,am_system_profile,0,system_profile_0,180)\nBIF_LIST(am_erlang,am_ref_to_list,1,ref_to_list_1,181)\nBIF_LIST(am_erlang,am_port_to_list,1,port_to_list_1,182)\nBIF_LIST(am_erlang,am_fun_to_list,1,fun_to_list_1,183)\nBIF_LIST(am_erlang,am_monitor,2,monitor_2,184)\nBIF_LIST(am_erlang,am_demonitor,1,demonitor_1,185)\nBIF_LIST(am_erlang,am_demonitor,2,demonitor_2,186)\nBIF_LIST(am_erlang,am_is_process_alive,1,is_process_alive_1,187)\nBIF_LIST(am_erlang,am_error,1,error_1,188)\nBIF_LIST(am_erlang,am_error,2,error_2,189)\nBIF_LIST(am_erlang,am_raise,3,raise_3,190)\nBIF_LIST(am_erlang,am_get_stacktrace,0,get_stacktrace_0,191)\nBIF_LIST(am_erlang,am_is_builtin,3,is_builtin_3,192)\nBIF_LIST(am_erlang,am_and,2,and_2,193)\nBIF_LIST(am_erlang,am_or,2,or_2,194)\nBIF_LIST(am_erlang,am_xor,2,xor_2,195)\nBIF_LIST(am_erlang,am_not,1,not_1,196)\nBIF_LIST(am_erlang,am_Gt,2,sgt_2,197)\nBIF_LIST(am_erlang,am_Ge,2,sge_2,198)\nBIF_LIST(am_erlang,am_Lt,2,slt_2,199)\nBIF_LIST(am_erlang,am_Le,2,sle_2,200)\nBIF_LIST(am_erlang,am_Eq,2,seq_2,201)\nBIF_LIST(am_erlang,am_Eqeq,2,seqeq_2,202)\nBIF_LIST(am_erlang,am_Neq,2,sneq_2,203)\nBIF_LIST(am_erlang,am_Neqeq,2,sneqeq_2,204)\nBIF_LIST(am_erlang,am_Plus,2,splus_2,205)\nBIF_LIST(am_erlang,am_Minus,2,sminus_2,206)\nBIF_LIST(am_erlang,am_Times,2,stimes_2,207)\nBIF_LIST(am_erlang,am_Div,2,div_2,208)\nBIF_LIST(am_erlang,am_div,2,intdiv_2,209)\nBIF_LIST(am_erlang,am_rem,2,rem_2,210)\nBIF_LIST(am_erlang,am_bor,2,bor_2,211)\nBIF_LIST(am_erlang,am_band,2,band_2,212)\nBIF_LIST(am_erlang,am_bxor,2,bxor_2,213)\nBIF_LIST(am_erlang,am_bsl,2,bsl_2,214)\nBIF_LIST(am_erlang,am_bsr,2,bsr_2,215)\nBIF_LIST(am_erlang,am_bnot,1,bnot_1,216)\nBIF_LIST(am_erlang,am_Minus,1,sminus_1,217)\nBIF_LIST(am_erlang,am_Plus,1,splus_1,218)\nBIF_LIST(am_erlang,am__AtomAlias26,2,ebif_bang_2,219)\nBIF_LIST(am_erlang,am_send,2,send_2,220)\nBIF_LIST(am_erlang,am_send,3,send_3,221)\nBIF_LIST(am_erlang,am__AtomAlias27,2,ebif_plusplus_2,222)\nBIF_LIST(am_erlang,am_append,2,append_2,223)\nBIF_LIST(am_erlang,am__AtomAlias28,2,ebif_minusminus_2,224)\nBIF_LIST(am_erlang,am_subtract,2,subtract_2,225)\nBIF_LIST(am_erlang,am_is_atom,1,is_atom_1,226)\nBIF_LIST(am_erlang,am_is_list,1,is_list_1,227)\nBIF_LIST(am_erlang,am_is_tuple,1,is_tuple_1,228)\nBIF_LIST(am_erlang,am_is_float,1,is_float_1,229)\nBIF_LIST(am_erlang,am_is_integer,1,is_integer_1,230)\nBIF_LIST(am_erlang,am_is_number,1,is_number_1,231)\nBIF_LIST(am_erlang,am_is_pid,1,is_pid_1,232)\nBIF_LIST(am_erlang,am_is_port,1,is_port_1,233)\nBIF_LIST(am_erlang,am_is_reference,1,is_reference_1,234)\nBIF_LIST(am_erlang,am_is_binary,1,is_binary_1,235)\nBIF_LIST(am_erlang,am_is_function,1,is_function_1,236)\nBIF_LIST(am_erlang,am_is_function,2,is_function_2,237)\nBIF_LIST(am_erlang,am_is_record,2,is_record_2,238)\nBIF_LIST(am_erlang,am_is_record,3,is_record_3,239)\nBIF_LIST(am_erlang,am_match_spec_test,3,match_spec_test_3,240)\nBIF_LIST(am_ets,am_all,0,ets_all_0,241)\nBIF_LIST(am_ets,am_new,2,ets_new_2,242)\nBIF_LIST(am_ets,am_delete,1,ets_delete_1,243)\nBIF_LIST(am_ets,am_delete,2,ets_delete_2,244)\nBIF_LIST(am_ets,am_delete_all_objects,1,ets_delete_all_objects_1,245)\nBIF_LIST(am_ets,am_delete_object,2,ets_delete_object_2,246)\nBIF_LIST(am_ets,am_first,1,ets_first_1,247)\nBIF_LIST(am_ets,am_is_compiled_ms,1,ets_is_compiled_ms_1,248)\nBIF_LIST(am_ets,am_lookup,2,ets_lookup_2,249)\nBIF_LIST(am_ets,am_lookup_element,3,ets_lookup_element_3,250)\nBIF_LIST(am_ets,am_info,1,ets_info_1,251)\nBIF_LIST(am_ets,am_info,2,ets_info_2,252)\nBIF_LIST(am_ets,am_last,1,ets_last_1,253)\nBIF_LIST(am_ets,am_match,1,ets_match_1,254)\nBIF_LIST(am_ets,am_match,2,ets_match_2,255)\nBIF_LIST(am_ets,am_match,3,ets_match_3,256)\nBIF_LIST(am_ets,am_match_object,1,ets_match_object_1,257)\nBIF_LIST(am_ets,am_match_object,2,ets_match_object_2,258)\nBIF_LIST(am_ets,am_match_object,3,ets_match_object_3,259)\nBIF_LIST(am_ets,am_member,2,ets_member_2,260)\nBIF_LIST(am_ets,am_next,2,ets_next_2,261)\nBIF_LIST(am_ets,am_prev,2,ets_prev_2,262)\nBIF_LIST(am_ets,am_insert,2,ets_insert_2,263)\nBIF_LIST(am_ets,am_insert_new,2,ets_insert_new_2,264)\nBIF_LIST(am_ets,am_rename,2,ets_rename_2,265)\nBIF_LIST(am_ets,am_safe_fixtable,2,ets_safe_fixtable_2,266)\nBIF_LIST(am_ets,am_slot,2,ets_slot_2,267)\nBIF_LIST(am_ets,am_update_counter,3,ets_update_counter_3,268)\nBIF_LIST(am_ets,am_select,1,ets_select_1,269)\nBIF_LIST(am_ets,am_select,2,ets_select_2,270)\nBIF_LIST(am_ets,am_select,3,ets_select_3,271)\nBIF_LIST(am_ets,am_select_count,2,ets_select_count_2,272)\nBIF_LIST(am_ets,am_select_reverse,1,ets_select_reverse_1,273)\nBIF_LIST(am_ets,am_select_reverse,2,ets_select_reverse_2,274)\nBIF_LIST(am_ets,am_select_reverse,3,ets_select_reverse_3,275)\nBIF_LIST(am_ets,am_select_delete,2,ets_select_delete_2,276)\nBIF_LIST(am_ets,am_match_spec_compile,1,ets_match_spec_compile_1,277)\nBIF_LIST(am_ets,am_match_spec_run_r,3,ets_match_spec_run_r_3,278)\nBIF_LIST(am_os,am_putenv,2,os_putenv_2,279)\nBIF_LIST(am_os,am_getenv,0,os_getenv_0,280)\nBIF_LIST(am_os,am_getenv,1,os_getenv_1,281)\nBIF_LIST(am_os,am_getpid,0,os_getpid_0,282)\nBIF_LIST(am_os,am_timestamp,0,os_timestamp_0,283)\nBIF_LIST(am_os,am_system_time,0,os_system_time_0,284)\nBIF_LIST(am_os,am_system_time,1,os_system_time_1,285)\nBIF_LIST(am_erl_ddll,am_try_load,3,erl_ddll_try_load_3,286)\nBIF_LIST(am_erl_ddll,am_try_unload,2,erl_ddll_try_unload_2,287)\nBIF_LIST(am_erl_ddll,am_loaded_drivers,0,erl_ddll_loaded_drivers_0,288)\nBIF_LIST(am_erl_ddll,am_info,2,erl_ddll_info_2,289)\nBIF_LIST(am_erl_ddll,am_format_error_int,1,erl_ddll_format_error_int_1,290)\nBIF_LIST(am_erl_ddll,am_monitor,2,erl_ddll_monitor_2,291)\nBIF_LIST(am_erl_ddll,am_demonitor,1,erl_ddll_demonitor_1,292)\nBIF_LIST(am_re,am_compile,1,re_compile_1,293)\nBIF_LIST(am_re,am_compile,2,re_compile_2,294)\nBIF_LIST(am_re,am_run,2,re_run_2,295)\nBIF_LIST(am_re,am_run,3,re_run_3,296)\nBIF_LIST(am_lists,am_member,2,lists_member_2,297)\nBIF_LIST(am_lists,am_reverse,2,lists_reverse_2,298)\nBIF_LIST(am_lists,am_keymember,3,lists_keymember_3,299)\nBIF_LIST(am_lists,am_keysearch,3,lists_keysearch_3,300)\nBIF_LIST(am_lists,am_keyfind,3,lists_keyfind_3,301)\nBIF_LIST(am_erts_debug,am_disassemble,1,erts_debug_disassemble_1,302)\nBIF_LIST(am_erts_debug,am_breakpoint,2,erts_debug_breakpoint_2,303)\nBIF_LIST(am_erts_debug,am_same,2,erts_debug_same_2,304)\nBIF_LIST(am_erts_debug,am_flat_size,1,erts_debug_flat_size_1,305)\nBIF_LIST(am_erts_debug,am_get_internal_state,1,erts_debug_get_internal_state_1,306)\nBIF_LIST(am_erts_debug,am_set_internal_state,2,erts_debug_set_internal_state_2,307)\nBIF_LIST(am_erts_debug,am_display,1,erts_debug_display_1,308)\nBIF_LIST(am_erts_debug,am_dist_ext_to_term,2,erts_debug_dist_ext_to_term_2,309)\nBIF_LIST(am_erts_debug,am_instructions,0,erts_debug_instructions_0,310)\nBIF_LIST(am_erts_debug,am_dump_monitors,1,erts_debug_dump_monitors_1,311)\nBIF_LIST(am_erts_debug,am_dump_links,1,erts_debug_dump_links_1,312)\nBIF_LIST(am_erts_debug,am_lock_counters,1,erts_debug_lock_counters_1,313)\nBIF_LIST(am_code,am_get_chunk,2,code_get_chunk_2,314)\nBIF_LIST(am_code,am_module_md5,1,code_module_md5_1,315)\nBIF_LIST(am_code,am_make_stub_module,3,code_make_stub_module_3,316)\nBIF_LIST(am_code,am_is_module_native,1,code_is_module_native_1,317)\nBIF_LIST(am_erlang,am_hibernate,3,hibernate_3,318)\nBIF_LIST(am_error_logger,am_warning_map,0,error_logger_warning_map_0,319)\nBIF_LIST(am_erlang,am_get_module_info,1,get_module_info_1,320)\nBIF_LIST(am_erlang,am_get_module_info,2,get_module_info_2,321)\nBIF_LIST(am_erlang,am_is_boolean,1,is_boolean_1,322)\nBIF_LIST(am_string,am_to_integer,1,string_to_integer_1,323)\nBIF_LIST(am_string,am_to_float,1,string_to_float_1,324)\nBIF_LIST(am_erlang,am_make_fun,3,make_fun_3,325)\nBIF_LIST(am_erlang,am_iolist_size,1,iolist_size_1,326)\nBIF_LIST(am_erlang,am_iolist_to_binary,1,iolist_to_binary_1,327)\nBIF_LIST(am_erlang,am_list_to_existing_atom,1,list_to_existing_atom_1,328)\nBIF_LIST(am_erlang,am_is_bitstring,1,is_bitstring_1,329)\nBIF_LIST(am_erlang,am_tuple_size,1,tuple_size_1,330)\nBIF_LIST(am_erlang,am_byte_size,1,byte_size_1,331)\nBIF_LIST(am_erlang,am_bit_size,1,bit_size_1,332)\nBIF_LIST(am_erlang,am_list_to_bitstring,1,list_to_bitstring_1,333)\nBIF_LIST(am_erlang,am_bitstring_to_list,1,bitstring_to_list_1,334)\nBIF_LIST(am_ets,am_update_element,3,ets_update_element_3,335)\nBIF_LIST(am_erlang,am_decode_packet,3,decode_packet_3,336)\nBIF_LIST(am_unicode,am_characters_to_binary,2,unicode_characters_to_binary_2,337)\nBIF_LIST(am_unicode,am_characters_to_list,2,unicode_characters_to_list_2,338)\nBIF_LIST(am_unicode,am_bin_is_7bit,1,unicode_bin_is_7bit_1,339)\nBIF_LIST(am_erlang,am_atom_to_binary,2,atom_to_binary_2,340)\nBIF_LIST(am_erlang,am_binary_to_atom,2,binary_to_atom_2,341)\nBIF_LIST(am_erlang,am_binary_to_existing_atom,2,binary_to_existing_atom_2,342)\nBIF_LIST(am_net_kernel,am_dflag_unicode_io,1,net_kernel_dflag_unicode_io_1,343)\nBIF_LIST(am_ets,am_give_away,3,ets_give_away_3,344)\nBIF_LIST(am_ets,am_setopts,2,ets_setopts_2,345)\nBIF_LIST(am_erlang,am_load_nif,2,load_nif_2,346)\nBIF_LIST(am_erlang,am_call_on_load_function,1,call_on_load_function_1,347)\nBIF_LIST(am_erlang,am_finish_after_on_load,2,finish_after_on_load_2,348)\nBIF_LIST(am_erlang,am_binary_to_term,2,binary_to_term_2,349)\nBIF_LIST(am_erlang,am_binary_part,2,binary_part_2,350)\nBIF_LIST(am_erlang,am_binary_part,3,binary_part_3,351)\nBIF_LIST(am_binary,am_compile_pattern,1,binary_compile_pattern_1,352)\nBIF_LIST(am_binary,am_match,2,binary_match_2,353)\nBIF_LIST(am_binary,am_match,3,binary_match_3,354)\nBIF_LIST(am_binary,am_matches,2,binary_matches_2,355)\nBIF_LIST(am_binary,am_matches,3,binary_matches_3,356)\nBIF_LIST(am_binary,am_longest_common_prefix,1,binary_longest_common_prefix_1,357)\nBIF_LIST(am_binary,am_longest_common_suffix,1,binary_longest_common_suffix_1,358)\nBIF_LIST(am_binary,am_first,1,binary_first_1,359)\nBIF_LIST(am_binary,am_last,1,binary_last_1,360)\nBIF_LIST(am_binary,am_at,2,binary_at_2,361)\nBIF_LIST(am_binary,am_part,2,binary_binary_part_2,362)\nBIF_LIST(am_binary,am_part,3,binary_binary_part_3,363)\nBIF_LIST(am_binary,am_bin_to_list,1,binary_bin_to_list_1,364)\nBIF_LIST(am_binary,am_bin_to_list,2,binary_bin_to_list_2,365)\nBIF_LIST(am_binary,am_bin_to_list,3,binary_bin_to_list_3,366)\nBIF_LIST(am_binary,am_list_to_bin,1,binary_list_to_bin_1,367)\nBIF_LIST(am_binary,am_copy,1,binary_copy_1,368)\nBIF_LIST(am_binary,am_copy,2,binary_copy_2,369)\nBIF_LIST(am_binary,am_referenced_byte_size,1,binary_referenced_byte_size_1,370)\nBIF_LIST(am_binary,am_encode_unsigned,1,binary_encode_unsigned_1,371)\nBIF_LIST(am_binary,am_encode_unsigned,2,binary_encode_unsigned_2,372)\nBIF_LIST(am_binary,am_decode_unsigned,1,binary_decode_unsigned_1,373)\nBIF_LIST(am_binary,am_decode_unsigned,2,binary_decode_unsigned_2,374)\nBIF_LIST(am_erlang,am_nif_error,1,nif_error_1,375)\nBIF_LIST(am_erlang,am_nif_error,2,nif_error_2,376)\nBIF_LIST(am_prim_file,am_internal_name2native,1,prim_file_internal_name2native_1,377)\nBIF_LIST(am_prim_file,am_internal_native2name,1,prim_file_internal_native2name_1,378)\nBIF_LIST(am_prim_file,am_internal_normalize_utf8,1,prim_file_internal_normalize_utf8_1,379)\nBIF_LIST(am_prim_file,am_is_translatable,1,prim_file_is_translatable_1,380)\nBIF_LIST(am_file,am_native_name_encoding,0,file_native_name_encoding_0,381)\nBIF_LIST(am_erlang,am_check_old_code,1,check_old_code_1,382)\nBIF_LIST(am_erlang,am_universaltime_to_posixtime,1,universaltime_to_posixtime_1,383)\nBIF_LIST(am_erlang,am_posixtime_to_universaltime,1,posixtime_to_universaltime_1,384)\nBIF_LIST(am_erlang,am_dt_put_tag,1,dt_put_tag_1,385)\nBIF_LIST(am_erlang,am_dt_get_tag,0,dt_get_tag_0,386)\nBIF_LIST(am_erlang,am_dt_get_tag_data,0,dt_get_tag_data_0,387)\nBIF_LIST(am_erlang,am_dt_spread_tag,1,dt_spread_tag_1,388)\nBIF_LIST(am_erlang,am_dt_restore_tag,1,dt_restore_tag_1,389)\nBIF_LIST(am_erlang,am_dt_prepend_vm_tag_data,1,dt_prepend_vm_tag_data_1,390)\nBIF_LIST(am_erlang,am_dt_append_vm_tag_data,1,dt_append_vm_tag_data_1,391)\nBIF_LIST(am_erlang,am_prepare_loading,2,prepare_loading_2,392)\nBIF_LIST(am_erlang,am_finish_loading,1,finish_loading_1,393)\nBIF_LIST(am_erlang,am_insert_element,3,insert_element_3,394)\nBIF_LIST(am_erlang,am_delete_element,2,delete_element_2,395)\nBIF_LIST(am_erlang,am_binary_to_integer,1,binary_to_integer_1,396)\nBIF_LIST(am_erlang,am_binary_to_integer,2,binary_to_integer_2,397)\nBIF_LIST(am_erlang,am_integer_to_binary,1,integer_to_binary_1,398)\nBIF_LIST(am_erlang,am_list_to_integer,2,list_to_integer_2,399)\nBIF_LIST(am_erlang,am_float_to_binary,1,float_to_binary_1,400)\nBIF_LIST(am_erlang,am_float_to_binary,2,float_to_binary_2,401)\nBIF_LIST(am_erlang,am_binary_to_float,1,binary_to_float_1,402)\nBIF_LIST(am_io,am_printable_range,0,io_printable_range_0,403)\nBIF_LIST(am_os,am_unsetenv,1,os_unsetenv_1,404)\nBIF_LIST(am_re,am_inspect,2,re_inspect_2,405)\nBIF_LIST(am_erlang,am_is_map,1,is_map_1,406)\nBIF_LIST(am_erlang,am_map_size,1,map_size_1,407)\nBIF_LIST(am_maps,am_to_list,1,maps_to_list_1,408)\nBIF_LIST(am_maps,am_find,2,maps_find_2,409)\nBIF_LIST(am_maps,am_get,2,maps_get_2,410)\nBIF_LIST(am_maps,am_from_list,1,maps_from_list_1,411)\nBIF_LIST(am_maps,am_is_key,2,maps_is_key_2,412)\nBIF_LIST(am_maps,am_keys,1,maps_keys_1,413)\nBIF_LIST(am_maps,am_merge,2,maps_merge_2,414)\nBIF_LIST(am_maps,am_new,0,maps_new_0,415)\nBIF_LIST(am_maps,am_put,3,maps_put_3,416)\nBIF_LIST(am_maps,am_remove,2,maps_remove_2,417)\nBIF_LIST(am_maps,am_update,3,maps_update_3,418)\nBIF_LIST(am_maps,am_values,1,maps_values_1,419)\nBIF_LIST(am_erts_internal,am_cmp_term,2,erts_internal_cmp_term_2,420)\nBIF_LIST(am_ets,am_take,2,ets_take_2,421)\nBIF_LIST(am_erlang,am_fun_info_mfa,1,fun_info_mfa_1,422)\nBIF_LIST(am_erlang,am_get_keys,0,get_keys_0,423)\nBIF_LIST(am_ets,am_update_counter,4,ets_update_counter_4,424)\nBIF_LIST(am_erts_debug,am_map_info,1,erts_debug_map_info_1,425)\nBIF_LIST(am_erlang,am_hash,2,hash_2,426)\nBIF_LIST(am_hipe_bifs,am_write_u8,2,hipe_bifs_write_u8_2,427)\nBIF_LIST(am_hipe_bifs,am_write_u32,2,hipe_bifs_write_u32_2,428)\nBIF_LIST(am_hipe_bifs,am_bytearray,2,hipe_bifs_bytearray_2,429)\nBIF_LIST(am_hipe_bifs,am_bytearray_sub,2,hipe_bifs_bytearray_sub_2,430)\nBIF_LIST(am_hipe_bifs,am_bytearray_update,3,hipe_bifs_bytearray_update_3,431)\nBIF_LIST(am_hipe_bifs,am_bitarray,2,hipe_bifs_bitarray_2,432)\nBIF_LIST(am_hipe_bifs,am_bitarray_sub,2,hipe_bifs_bitarray_sub_2,433)\nBIF_LIST(am_hipe_bifs,am_bitarray_update,3,hipe_bifs_bitarray_update_3,434)\nBIF_LIST(am_hipe_bifs,am_array,2,hipe_bifs_array_2,435)\nBIF_LIST(am_hipe_bifs,am_array_length,1,hipe_bifs_array_length_1,436)\nBIF_LIST(am_hipe_bifs,am_array_sub,2,hipe_bifs_array_sub_2,437)\nBIF_LIST(am_hipe_bifs,am_array_update,3,hipe_bifs_array_update_3,438)\nBIF_LIST(am_hipe_bifs,am_ref,1,hipe_bifs_ref_1,439)\nBIF_LIST(am_hipe_bifs,am_ref_get,1,hipe_bifs_ref_get_1,440)\nBIF_LIST(am_hipe_bifs,am_ref_set,2,hipe_bifs_ref_set_2,441)\nBIF_LIST(am_hipe_bifs,am_enter_code,2,hipe_bifs_enter_code_2,442)\nBIF_LIST(am_hipe_bifs,am_alloc_data,2,hipe_bifs_alloc_data_2,443)\nBIF_LIST(am_hipe_bifs,am_constants_size,0,hipe_bifs_constants_size_0,444)\nBIF_LIST(am_hipe_bifs,am_merge_term,1,hipe_bifs_merge_term_1,445)\nBIF_LIST(am_hipe_bifs,am_fun_to_address,1,hipe_bifs_fun_to_address_1,446)\nBIF_LIST(am_hipe_bifs,am_set_native_address,3,hipe_bifs_set_native_address_3,447)\nBIF_LIST(am_hipe_bifs,am_set_funinfo_native_address,3,hipe_bifs_set_funinfo_native_address_3,448)\nBIF_LIST(am_hipe_bifs,am_invalidate_funinfo_native_addresses,1,hipe_bifs_invalidate_funinfo_native_addresses_1,449)\nBIF_LIST(am_hipe_bifs,am_update_code_size,3,hipe_bifs_update_code_size_3,450)\nBIF_LIST(am_hipe_bifs,am_code_size,1,hipe_bifs_code_size_1,451)\nBIF_LIST(am_hipe_bifs,am_enter_sdesc,1,hipe_bifs_enter_sdesc_1,452)\nBIF_LIST(am_hipe_bifs,am_bif_address,3,hipe_bifs_bif_address_3,453)\nBIF_LIST(am_hipe_bifs,am_primop_address,1,hipe_bifs_primop_address_1,454)\nBIF_LIST(am_hipe_bifs,am_atom_to_word,1,hipe_bifs_atom_to_word_1,455)\nBIF_LIST(am_hipe_bifs,am_term_to_word,1,hipe_bifs_term_to_word_1,456)\nBIF_LIST(am_hipe_bifs,am_get_fe,2,hipe_bifs_get_fe_2,457)\nBIF_LIST(am_hipe_bifs,am_set_native_address_in_fe,2,hipe_bifs_set_native_address_in_fe_2,458)\nBIF_LIST(am_hipe_bifs,am_find_na_or_make_stub,2,hipe_bifs_find_na_or_make_stub_2,459)\nBIF_LIST(am_hipe_bifs,am_check_crc,1,hipe_bifs_check_crc_1,460)\nBIF_LIST(am_hipe_bifs,am_system_crc,0,hipe_bifs_system_crc_0,461)\nBIF_LIST(am_hipe_bifs,am_get_rts_param,1,hipe_bifs_get_rts_param_1,462)\nBIF_LIST(am_hipe_bifs,am_patch_insn,3,hipe_bifs_patch_insn_3,463)\nBIF_LIST(am_hipe_bifs,am_patch_call,3,hipe_bifs_patch_call_3,464)\nBIF_LIST(am_hipe_bifs,am_add_ref,2,hipe_bifs_add_ref_2,465)\nBIF_LIST(am_hipe_bifs,am_mark_referred_from,1,hipe_bifs_mark_referred_from_1,466)\nBIF_LIST(am_hipe_bifs,am_remove_refs_from,1,hipe_bifs_remove_refs_from_1,467)\nBIF_LIST(am_hipe_bifs,am_redirect_referred_from,1,hipe_bifs_redirect_referred_from_1,468)\nBIF_LIST(am_hipe_bifs,am_call_count_on,1,hipe_bifs_call_count_on_1,469)\nBIF_LIST(am_hipe_bifs,am_call_count_off,1,hipe_bifs_call_count_off_1,470)\nBIF_LIST(am_hipe_bifs,am_call_count_get,1,hipe_bifs_call_count_get_1,471)\nBIF_LIST(am_hipe_bifs,am_call_count_clear,1,hipe_bifs_call_count_clear_1,472)\nBIF_LIST(am_hipe_bifs,am_trap_count_get,0,hipe_bifs_trap_count_get_0,473)\nBIF_LIST(am_hipe_bifs,am_trap_count_clear,0,hipe_bifs_trap_count_clear_0,474)\nBIF_LIST(am_hipe_bifs,am_process_info,0,hipe_bifs_process_info_0,475)\nBIF_LIST(am_hipe_bifs,am_process_info_clear,0,hipe_bifs_process_info_clear_0,476)\nBIF_LIST(am_hipe_bifs,am_message_info,0,hipe_bifs_message_info_0,477)\nBIF_LIST(am_hipe_bifs,am_message_info_clear,0,hipe_bifs_message_info_clear_0,478)\nBIF_LIST(am_hipe_bifs,am_message_sizes,0,hipe_bifs_message_sizes_0,479)\nBIF_LIST(am_hipe_bifs,am_gc_info,0,hipe_bifs_gc_info_0,480)\nBIF_LIST(am_hipe_bifs,am_shared_gc_info,0,hipe_bifs_shared_gc_info_0,481)\nBIF_LIST(am_hipe_bifs,am_incremental_gc_info,0,hipe_bifs_incremental_gc_info_0,482)\nBIF_LIST(am_hipe_bifs,am_gc_info_clear,0,hipe_bifs_gc_info_clear_0,483)\nBIF_LIST(am_hipe_bifs,am_pause_times,0,hipe_bifs_pause_times_0,484)\nBIF_LIST(am_hipe_bifs,am_system_timer,0,hipe_bifs_system_timer_0,485)\nBIF_LIST(am_hipe_bifs,am_system_timer_clear,0,hipe_bifs_system_timer_clear_0,486)\nBIF_LIST(am_hipe_bifs,am_send_timer,0,hipe_bifs_send_timer_0,487)\nBIF_LIST(am_hipe_bifs,am_send_timer_clear,0,hipe_bifs_send_timer_clear_0,488)\nBIF_LIST(am_hipe_bifs,am_gc_timer,0,hipe_bifs_gc_timer_0,489)\nBIF_LIST(am_hipe_bifs,am_shared_gc_timer,0,hipe_bifs_shared_gc_timer_0,490)\nBIF_LIST(am_hipe_bifs,am_gc_timer_clear,0,hipe_bifs_gc_timer_clear_0,491)\nBIF_LIST(am_hipe_bifs,am_misc_timer,0,hipe_bifs_misc_timer_0,492)\nBIF_LIST(am_hipe_bifs,am_misc_timer_clear,0,hipe_bifs_misc_timer_clear_0,493)\nBIF_LIST(am_hipe_bifs,am_get_hrvtime,0,hipe_bifs_get_hrvtime_0,494)\nBIF_LIST(am_hipe_bifs,am_stop_hrvtime,0,hipe_bifs_stop_hrvtime_0,495)\nBIF_LIST(am_hipe_bifs,am_show_estack,1,hipe_bifs_show_estack_1,496)\nBIF_LIST(am_hipe_bifs,am_show_heap,1,hipe_bifs_show_heap_1,497)\nBIF_LIST(am_hipe_bifs,am_show_nstack,1,hipe_bifs_show_nstack_1,498)\nBIF_LIST(am_hipe_bifs,am_nstack_used_size,0,hipe_bifs_nstack_used_size_0,499)\nBIF_LIST(am_hipe_bifs,am_show_pcb,1,hipe_bifs_show_pcb_1,500)\nBIF_LIST(am_hipe_bifs,am_show_term,1,hipe_bifs_show_term_1,501)\nBIF_LIST(am_hipe_bifs,am_in_native,0,hipe_bifs_in_native_0,502)\nBIF_LIST(am_hipe_bifs,am_modeswitch_debug_on,0,hipe_bifs_modeswitch_debug_on_0,503)\nBIF_LIST(am_hipe_bifs,am_modeswitch_debug_off,0,hipe_bifs_modeswitch_debug_off_0,504)\nBIF_LIST(am_hipe_bifs,am_debug_native_called,2,hipe_bifs_debug_native_called_2,505)\nBIF_LIST(am_hipe_bifs,am_llvm_fix_pinned_regs,0,hipe_bifs_llvm_fix_pinned_regs_0,506)\nBIF_LIST(am_hipe_bifs,am_write_u64,2,hipe_bifs_write_u64_2,507)\n"
},
{
"alpha_fraction": 0.5915789604187012,
"alphanum_fraction": 0.706315815448761,
"avg_line_length": 26.14285659790039,
"blob_id": "9caab00e4b26081cec608a9f152195723194c4cb",
"content_id": "0f4092ab0dda293664ffc0ec53abd926363e4d8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 35,
"path": "/bin/dvtplugin-uuid-update",
"repo_name": "users-tree/alex",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n#ID='A16FF353-8441-459E-A50C-B071F53F51B7' # Xcode 6.2\n#ID='992275C1-432A-4CF7-B659-D84ED6D42D3F' # Xcode 6.3\n#ID='7265231C-39B4-402C-89E1-16167C4CC990' # Xcode Version 7.1.1 (7B1005)\nID='F41BD31E-2683-44B8-AE7F-5F09E919790E' # Xcode 7.2\n\nPLIST_BUDDY=/usr/libexec/PlistBuddy\n\nID=`/usr/libexec/PlistBuddy -c 'Print DVTPlugInCompatibilityUUID' \"$(xcode-select -p)/../Info.plist\"`\n\nfunction add_compatibility() {\n \"$PLIST_BUDDY\" -c \"Add DVTPlugInCompatibilityUUIDs:10 string $2\" \\\n \"$1/Contents/Info.plist\"\n}\n\nfunction has_compatibility() {\n $PLIST_BUDDY -c 'Print DVTPlugInCompatibilityUUIDs' \\\n \"$1/Contents/Info.plist\"|grep -q \"$2\"\n return $?\n}\n\ncd \"$HOME/Library/Application Support/Developer/Shared/Xcode/Plug-ins\"\n\nfor file in `ls -d *`\ndo\n\n if `has_compatibility \"$file\" $ID` \n then\n true\n else\n echo \"Plugin $file is now compatible with the newest Xcode using UUID : $ID\"\n add_compatibility \"$file\" $ID\n fi\ndone\n"
}
] | 35 |
alexinfanger/distributionally_robust_optimization
|
https://github.com/alexinfanger/distributionally_robust_optimization
|
9362f41d57c838c5ffd1d3f7cc763128301f0077
|
cbf5b35704e560630419779f6359e77486a09438
|
99f399d1dcba4a020f58072b4f390275665bb7dd
|
refs/heads/master
| 2021-04-06T13:41:51.083975 | 2018-03-20T02:33:29 | 2018-03-20T02:33:29 | 124,680,282 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.3888888955116272,
"alphanum_fraction": 0.49074074625968933,
"avg_line_length": 11,
"blob_id": "cc123f7e501385d85e2aca7a3502e6faad77f155",
"content_id": "cae7b4e7caf59213a624a31856e10cead78ec7f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 9,
"path": "/config.py",
"repo_name": "alexinfanger/distributionally_robust_optimization",
"src_encoding": "UTF-8",
"text": "import numpy as np\nclass config():\n ndt = 1000\n nu = 1\n p = 2\n eta = .1\n dt = .1\n m_1 = 1\n b_var = 1\n"
},
{
"alpha_fraction": 0.5457016825675964,
"alphanum_fraction": 0.5691471099853516,
"avg_line_length": 21.105262756347656,
"blob_id": "52d5c9d60f8b888c705d0f89594bcfc4449de288",
"content_id": "8cbc1ef541ef9bc4e3e1bd89bf97dc396b22d4cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2943,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 133,
"path": "/Khoshnevisan_embedding.py",
"repo_name": "alexinfanger/distributionally_robust_optimization",
"src_encoding": "UTF-8",
"text": "from math import sqrt\nfrom scipy.stats import norm\nimport numpy as np\nfrom config import config\n\nimport matplotlib.pyplot as plt\nimport pickle\n\n\ndef simulate_coupling():\n\n X = np.ones(50)\n c_max = 0.0\n c_time = 0.0 \n c_sum = 0.0\n c = 0 # index\n tau=[0.0]\n B = np.zeros(config.ndt)\n S = np.zeros(config.ndt)\n N = np.zeros(config.ndt)\n A = np.zeros(config.ndt)\n Z = np.zeros(config.ndt)\n while True:\n x = np.random.pareto(2.2)\n found=False\n while not found:\n c+=1\n if c>=config.ndt-1:\n break\n c_time += config.dt\n B[c] = B[c-1]+np.random.normal(scale=np.sqrt(config.dt))\n if c_max < B[c]:\n c_max=B[c]\n elif B[c]<=c_max-x:\n tau.append(c_time)\n c_max = B[c]\n c_sum += x\n S[c] = c_max\n N[c] = c_sum\n break\n S[c] = c_max\n N[c] = c_sum\n if c>=config.ndt-1:\n break\n\n A = S+N\n\n\n t_last=0\n # sigma = np.zeros(len(B),dtype=np.int8)\n sigma = []\n for t1 in range(0,len(B)):\n for t2 in range(0,len(B)):\n if A[t2]>= config.m_1*t1*config.dt:\n sigma.append(t2)\n # sigma[t1]=int(t2)\n # t_last = t2\n break\n\n\n # print('len of sigma')\n # print(len(sigma))\n # print(sigma)\n\n theline = [i*config.dt*config.m_1 for i in range(0,len(B))]\n Z = np.array([S[i] for i in sigma])\n # Z = -Z\n # B = -np.array(B)\n\n\n\n B = -B\n Z=-Z\n t = np.linspace(0,len(B)*config.dt,num=len(B))\n\n # splot = plt.plot(t[0:len(Z)],S,label='S')\n bplot = plt.plot(t[0:len(Z)],B[0:len(Z)],label='B')\n zplot = plt.plot(t[0:len(Z)],Z[0:len(Z)],label='Z')\n # Aplot = plt.plot(t[0:len(Z)],A)\n # Nplot = plt.plot(t[0:len(Z)],N,label='N')\n # thelineplot = plt.plot(theline)\n # print('Is A non-decreasing')\n # print(np.all(np.diff(A) >= 0))\n\n plt.legend()\n plt.show()\n print(len(Z))\n return [B[0:len(Z)],Z[0:len(Z)]]\n\n\ndef expected_cost(nsamples):\n samples = np.zeros(nsamples)\n for i in range(0,nsamples):\n rtuple = simulate_coupling()\n samples[i] = max(abs(rtuple[0]-rtuple[1]))\n return np.mean(samples)\n\ndef clt(nsamples,nmeans):\n means = np.zeros(nmeans)\n samples = np.zeros(nmeans,nsamples)\n for i in range(0,nmeans):\n for j in range(0,nsamples):\n rtuple = simulate_coupling()\n samples[i,j] = max(abs(rtuple[0]-rtuple[1]))\n means[i] = np.mean(samples[i,:])\n print(np.shape(means))\n meansdict = {\"means\": means, \"samples\": samples}\n pickle_out = open(\"vals.pickle\",\"wb\")\n pickle.dump(meansdict,pickle_out)\n pickle_out.close()\n\n# def make_data(nsamples):\n # samples = np.zeros(nsamples,)\n # for i in range(0,nsamples):\n\n\nif __name__ == '__main__':\n rtuple = simulate_coupling()\n\n # ctuple = expected_cost(100)\n # \n # clt(1000,1000)\n\n # pickle_in = open(\"vals.pickle\",\"rb\")\n # test = pickle.load(pickle_in)\n # means = test[\"means\"]\n # print(test[\"samples\"])\n # pickle_in.close()\n\n # plt.hist(means,10)\n # plt.show()\n # A = [1,2,3]\n # print([A[i] for i in (1,2)])\n\n\n\n"
}
] | 2 |
siorin/udacity-data1-project2-explore-bikeshare-data
|
https://github.com/siorin/udacity-data1-project2-explore-bikeshare-data
|
9c9f19929919d13b05589248c58cf05ac28a1feb
|
d2a917d36eacbb3a3e7d0e998eeb5fbd1b364a5b
|
c27e2d60255e3c52f833ca8c3d0ecafbd7015c83
|
refs/heads/master
| 2020-04-24T07:22:32.590718 | 2019-02-21T06:13:00 | 2019-02-21T06:13:00 | 171,797,222 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5871074795722961,
"alphanum_fraction": 0.5912529826164246,
"avg_line_length": 36.54474639892578,
"blob_id": "53b2aba3606034b3a07c71fd9d6e6daf5ac57577",
"content_id": "83d09f2f8ec11b41853e919e0baf404c0b05a12d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9649,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 257,
"path": "/bikeshare_2.py",
"repo_name": "siorin/udacity-data1-project2-explore-bikeshare-data",
"src_encoding": "UTF-8",
"text": "import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n cities = ['chicago', 'new york city', 'washington']\n while True:\n city = input(\"Enter a city - chicago, new york city, or washington : \").lower()\n if city in cities:\n break\n print('Invalid Input. Enter one of : chicago, new york city, washington')\n\n # get user input for month (all, january, february, ... , june)\n months = ['all', 'january', 'february', 'march', 'april', 'may', 'june', \n 'july', 'august', 'september', 'october', 'november', 'december']\n while True: \n month = input(\"Enter a month - all, january, february, march, april, ... : \").lower()\n if month in months:\n break\n print('Invalid Input. Enter one of: all, january, february, march, april, may, june, july, august, september, october, november, december')\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n while True:\n day = input(\"Enter a day of week - all, monday, tuesday, wednesday, thursday, friday, saturday, sunday : \").lower()\n if day in days:\n break\n print('Invalid Input. Enter one of: all, monday, tuesday, wednesday, thursday, friday, saturday, sunday')\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n \n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # df['day_of_week'] = df['Start Time'].dt.weekday \n # days = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', \n # 5: 'Saturday', 6: 'Sunday'}\n # df['day_of_week'] = df['day_of_week'].apply(lambda x: days[x])\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june', \n 'july', 'august', 'september', 'october', 'november', 'december']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n # df = df.loc[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n # df = df.loc[df['day_of_week']==day.title()]\n \n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n months = ['january', 'february', 'march', 'april', 'may', 'june', \n 'july', 'august', 'september', 'october', 'november', 'december']\n mode_month = df['month'].mode()[0]\n print('Most common month: {}'.format(months[mode_month - 1]))\n\n # display the most common day of week\n mode_day = df['day_of_week'].mode()[0]\n print('Most common day of week: {}'.format(mode_day))\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n mode_hour = df['hour'].mode()[0]\n print('Most common start hour: {}'.format(mode_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n mode_start_st = df['Start Station'].mode()[0]\n print('Most common start station: {}'.format(mode_start_st))\n\n # display most commonly used end station\n mode_end_st = df['End Station'].mode()[0]\n print('Most common end station: {}'.format(mode_end_st))\n\n # display most frequent combination of start station and end station trip\n df['Start_End'] = df['Start Station'] + ' -> ' + df['End Station']\n mode_start_end = df['Start_End'].mode()[0]\n print('Most common combination of start and end stations: {}'.format(mode_start_end))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_time = df['Trip Duration'].sum()\n print('Total travel time: {} seconds'.format(total_time))\n\n # display mean travel time\n mean_time = df['Trip Duration'].mean()\n print('Mean travel time: {} seconds'.format(mean_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Counts of user types: ')\n print(df['User Type'].value_counts())\n\n # Display counts of gender\n # only available for NYC and Chicago *****\n try:\n count_gender = df['Gender'].value_counts()\n except: \n print('Selected city has no gender data')\n else: \n print('Counts of gender: ')\n print(count_gender)\n\n # Display earliest, most recent, and most common year of birth\n # only available for NYC and Chicago *****\n try: \n earliest_birth = int(df['Birth Year'].min())\n recent_birth = int(df['Birth Year'].max())\n common_birth = int(df['Birth Year'].mode()[0])\n except: \n print('Selected city has no birth year data')\n else: \n print('Earliest year of birth: {}'.format(earliest_birth))\n print('Most recent year of birth: {}'.format(recent_birth))\n print('Most common year of birth: {}'.format(common_birth))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n# edit 1 : added display raw data function\ndef display_data(df):\n \"\"\"\n responses = ['yes', 'no']\n while True:\n display = input(\"Do you want to view the raw data? Enter yes or no : \").lower()\n if display in responses:\n break\n print('Invalid Input. Enter one of : yes, no') \n # https://stackoverflow.com/questions/19124601/is-there-a-way-to-pretty-print-the-entire-pandas-series-dataframe\n if display == 'yes':\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n print(df.iloc[])\n \"\"\"\n # edit 2: printing additional raw data based on user input\n while True:\n display = input(\"Do you want to view the raw data? Enter yes or no : \").lower()\n # display first few rows\n if display == 'yes':\n current = 0 # current index\n counter = 5 # number of rows to display\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n print(df.iloc[current:counter, ])\n current += counter\n while current < df.shape[0]:\n additional = input(\"Do you want to view additional raw data? Enter yes or no : \").lower()\n if additional == 'yes':\n # print(\"current: {}\".format(current))\n # print(\"counter: {}\".format(counter))\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n print(df.iloc[current:current+counter, ])\n current += counter\n elif additional == 'no': \n break\n else: \n print('Invalid Input. Enter one of : yes, no')\n break\n if display == 'no': \n break\n print('Invalid Input. Enter one of : yes, no')\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n # edit 1 : added display raw data function\n display_data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n"
}
] | 1 |
imanoracle/dummydjango
|
https://github.com/imanoracle/dummydjango
|
010fc8d7d489ddb8ee7a0667dab12c141109a1cf
|
965d88d7b8de2d5e8f9e0fb9106a9f02fbfbf52a
|
de6caee15f63574a7d49b98b68e6f093f737506c
|
refs/heads/master
| 2022-04-24T14:35:21.768261 | 2020-04-22T12:13:16 | 2020-04-22T12:13:16 | 257,885,831 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.48441246151924133,
"alphanum_fraction": 0.5587530136108398,
"avg_line_length": 21.16666603088379,
"blob_id": "437b6fc09c2e2eb826921883c0d6fb5702cce6aa",
"content_id": "7c587da205517376c5af2f8e3312ba177b0b2f38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 18,
"path": "/demo/migrations/0004_auto_20200417_1623.py",
"repo_name": "imanoracle/dummydjango",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.5 on 2020-04-17 19:23\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('demo', '0003_auto_20200417_1621'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='book',\r\n name='cover',\r\n field=models.ImageField(blank=True, upload_to='covers/'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.7030567526817322,
"alphanum_fraction": 0.7030567526817322,
"avg_line_length": 23.66666603088379,
"blob_id": "86ac673df745c8fd5e7bfba654f16451445515eb",
"content_id": "f70e5cec43ab75747440f3541e91c10d39de0f4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 9,
"path": "/demo/admin.py",
"repo_name": "imanoracle/dummydjango",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\r\nfrom .models import Book \r\n \r\n#admin.site.register(Book)\r\n\r\[email protected](Book)\r\nclass BookAdmin(admin.ModelAdmin):\r\n list_display = ['title', 'description']\r\n list_filter = ['published']"
},
{
"alpha_fraction": 0.6696751117706299,
"alphanum_fraction": 0.6841155290603638,
"avg_line_length": 32.75,
"blob_id": "8ab15b109ec7ba0114d16f83ad7df6c5b4773f85",
"content_id": "9fdf32cce40a858e134194649017d9ee67cc8d68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 16,
"path": "/demo/models.py",
"repo_name": "imanoracle/dummydjango",
"src_encoding": "UTF-8",
"text": "from django.db import models\r\n\r\n# Create your models here.\r\n\r\n\r\nclass Book(models.Model):\r\n title = models.CharField(max_length=36, blank=False, unique = True)\r\n description = models.TextField(max_length=256, blank= True)\r\n price = models.DecimalField(default=0, max_digits= 3, decimal_places= 0)\r\n published = models.DateField(blank= True, null= True, default = None)\r\n is_published = models.BooleanField(default=False)\r\n\r\n cover = models.ImageField(upload_to='covers/', blank=True)\r\n\r\n def __str__(self):\r\n return self.title"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 30.66666603088379,
"blob_id": "8b3a57f0506225de314a5a2174a4bf892e1362b7",
"content_id": "b446f67cdf116dcf6ada70fb650a28dc3dd86b07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 12,
"path": "/demo/views.py",
"repo_name": "imanoracle/dummydjango",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom django.views import View\r\nfrom .models import Book\r\nfrom django.shortcuts import render\r\nfrom rest_framework import viewsets\r\nfrom .serializers import BookSerializer\r\nfrom .models import Book\r\n\r\nclass BookViewSet(viewsets.ModelViewSet):\r\n serializer_class = BookSerializer\r\n queryset = Book.objects.all()"
}
] | 4 |
Shandelier/Telegram2Text
|
https://github.com/Shandelier/Telegram2Text
|
24e1241c10f5f8f3b8e2a80519febb9470674e75
|
33684cd9d486e76e0ebbad6be1340a2886750faa
|
c51cfdad2a59d1eba3b26ebb858c993824be37cd
|
refs/heads/master
| 2020-09-12T17:12:30.476517 | 2019-11-18T18:39:12 | 2019-11-18T18:39:12 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5221518874168396,
"alphanum_fraction": 0.5395569801330566,
"avg_line_length": 21.571428298950195,
"blob_id": "9923244d11b9887f2b838cc75d62d6b65f3cac02",
"content_id": "428a75caf73079b318b07a6e34ef5d444018a4d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 28,
"path": "/main.py",
"repo_name": "Shandelier/Telegram2Text",
"src_encoding": "UTF-8",
"text": "import re\n\ntext, author1, content, author2 = \"\", \"\", \"\", \"\"\n\ninput_file = open('input.txt', 'r')\noutput_file = open('output.txt', 'w')\n\nfor line in input_file:\n line.rstrip()\n if line.isspace():\n continue\n if (re.search(\"\\[In reply\", line)):\n text += \"[reply] \"\n continue\n if (re.search(\".*\\[.*\\]\", line)):\n author2 = author1\n author1 = re.split(\",.\\[\", line, 1)[0]\n if author1 == author2 or not author1 and not author2:\n continue\n text += \"\\n\" + line\n else:\n text += line\n\noutput_file.write(text)\nprint(text)\n\ninput_file.close()\noutput_file.close()\n"
}
] | 1 |
spencerbraun/pytorch_hackathon
|
https://github.com/spencerbraun/pytorch_hackathon
|
9db75d684dc5e4de8e390dd886f89f5b0b54e0ab
|
9f2edac7a784ac2c16d2578a6e0f6a1ef305a817
|
5b4da68f9f039540848a41b5f72797d493a7d6e3
|
refs/heads/master
| 2022-12-16T12:17:10.028770 | 2020-09-02T02:47:40 | 2020-09-02T02:47:40 | 282,516,550 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6167904138565063,
"alphanum_fraction": 0.625356912612915,
"avg_line_length": 28.436975479125977,
"blob_id": "122fbfa25a203fcdd3dbc053db9815fb1a32b1d2",
"content_id": "3a1a6dd72e8f639b51f60eea6e2c9ad77e2d3ec1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3502,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 119,
"path": "/app/app.py",
"repo_name": "spencerbraun/pytorch_hackathon",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCentral script for running Dash app.\ndate: 20200823\nauthor: spencerbraun\n\"\"\"\n\nimport pickle\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table as dt\nimport plotly.express as px\nfrom dash.dependencies import Input, Output\n\nimport pandas as pd\n\nexternal_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\ncolors = {\"background\": \"#E0E0E2\", \"text\": \"#050949\"}\n\nwith open(\"tsne_labeled.pkl\", 'rb') as f:\n tsne = pickle.load(f)\n\ntsne_fig = px.scatter(\n tsne, x=\"x\", y=\"y\",\n color=\"Package\", hover_name=\"PageName\",\n size_max=60)\n\nwith open(\"recommendation_table.pkl\", 'rb') as f:\n table = pickle.load(f)\n\nstates = table.PageName.unique().tolist()\ntable_cols = [\n 'Package', \n 'Language', \n 'Section', \n 'PageName', \n 'Similarity Score',\n 'Link'\n]\n\napp.layout = html.Div(children=[\n html.H1(\n children='Machine Learning Package Lookup',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }\n ),\n\n dcc.Markdown('''\n\n Starting to work in a new machine learning framework or package can be confusing. \n Instead of wading into the documentation from the beginning, \n it can be helpful to find a familiar use case. The documentation for sklearn, caret, numpy, and\n scipy were used to train a doc2vec model implemented in pytorch, giving each module of the documentation\n a dense embedding vector representation. Pairwise similarities between vectors were computed and ranked,\n giving the closest match to each section of the documentation. Below is a [t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) \n low dimensional representation of the embedding vectors and a lookup table for the most similar documentation sections for the one selected. \n\n Written by [Spencer Braun](https://github.com/spencerbraun). \n To view the code used in this project, click [here](https://github.com/spencerbraun/pytorch_hackathon).\n ''',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n html.H4(\n children='t-SNE Plot',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n dcc.Graph(\n id='tsne-plot',\n figure=tsne_fig\n ),\n html.H4(\n children='Related Documentation Lookup',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n dcc.Dropdown(\n id='filter_dropdown',\n options=[{'label':st, 'value':st} for st in states],\n value = states[0]\n ),\n dt.DataTable(\n id='table-container', \n columns=[{'id': c, 'name': c} for c in table_cols]\n )\n])\n\[email protected](\n Output('table-container', 'data'),\n [Input('filter_dropdown', 'value') ] )\ndef display_table(state):\n idx = table.loc[table.PageName == state].index[0]\n outTable = processTable(table, idx)\n return outTable.to_dict('records')\n\ndef processTable(table, idx):\n probs = [\"{:.2f}\".format(x * 100) for x in table.iloc[idx].Rec_Probs]\n locs = table.iloc[idx].Rec_Index\n\n outputTable = (\n table\n .loc[locs]\n .join(pd.DataFrame(probs, index=locs, columns=['Similarity Score']))\n )\n \n return outputTable\n\nif __name__ == '__main__':\n app.run_server()"
},
{
"alpha_fraction": 0.6238581538200378,
"alphanum_fraction": 0.6458892822265625,
"avg_line_length": 24.50684928894043,
"blob_id": "07359b81a41ff79b49090c4f2fdf7ad7ce36778a",
"content_id": "573efe6f1f8fe72845d65bdb771cb369bf7b6e41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1861,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 73,
"path": "/app/data_functions.py",
"repo_name": "spencerbraun/pytorch_hackathon",
"src_encoding": "UTF-8",
"text": "import re\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.spatial import distance\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\n\nMODEL_PATH = 'all_data2_model.dbow_numnoisewords.2_vecdim.100_batchsize.32_lr.0.001000_epoch.95_loss.0.6850212.csv'\n\n\ndef readDocMatrix(path):\n input_lines = []\n with open('all_data2.csv') as f:\n for line in f:\n input_lines.append(line)\n\n output_lines = []\n with open(path) as f:\n for line in f:\n output_lines.append(line)\n\n matched = []\n for line_idx in range(len(output_lines)):\n matched.append((input_lines[line_idx], output_lines[line_idx]))\n \n matched_clean = [x for x in matched if not re.match(\".*[A-Z].*\", x[1])]\n\n return matched_clean\n\n\ndef toNumpyMatrix(matched):\n\n matched_clean_vectors = [x[1].split(\",\") for x in matched[1:]]\n vec_matrix = np.array(matched_clean_vectors)\n\n return vec_matrix\n\n\ndef topNMatches(idx, matched, n=5):\n \n matrix = toNumpyMatrix(matched)\n distances = distance.cdist([matrix[idx]], matrix, \"cosine\")[0]\n\n ind = np.argpartition(distances, n+1)[:(n+1)]\n sorted_ind = ind[np.argsort(distances[ind])][1:]\n min_distances = distances[sorted_ind]\n max_similarity = [1 - x for x in min_distances]\n \n return (max_similarity, sorted_ind)\n\n\ndef tsneMatrix(matched):\n\n doc_matrix = toNumpyMatrix(matched)\n doc_pca = PCA(n_components=5).fit_transform(doc_matrix)\n tsne = TSNE(n_components=2, perplexity=5).fit_transform(doc_pca)\n \n return tsne\n\n\ndef processTable(table, idx):\n probs = [\"{:.2f}\".format(x * 100) for x in table.iloc[idx].Rec_Probs]\n locs = table.iloc[idx].Rec_Index\n\n outputTable = (\n table\n .loc[locs]\n .join(pd.DataFrame(probs, index=locs, columns=['Similarity Score']))\n )\n \n return outputTable"
},
{
"alpha_fraction": 0.8025122284889221,
"alphanum_fraction": 0.8094905614852905,
"avg_line_length": 78.66666412353516,
"blob_id": "b9592ab6a8c973b5afcc45224f263a4cfea0958b",
"content_id": "8d7325501a157115792edd5b8845421966c29a3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1433,
"license_type": "no_license",
"max_line_length": 470,
"num_lines": 18,
"path": "/README.md",
"repo_name": "spencerbraun/pytorch_hackathon",
"src_encoding": "UTF-8",
"text": "# PyTorch Summer 2020 Hackathon\n\nThis project was built for the PyTorch Summer 2020 Hackathon. It has a few pieces that are still a work in progress, but the general outline is below. The submission can be viewed [here](https://devpost.com/software/math-notetaker).\n\n## Documentation Similarity\n\nI scraped the documentation for machine learning packages like Scipy, Numpy, Caret, and Sklearn and built embedding vectors for each section using a doc2vec / paragraph vectors model. I then matched the top 5 most similar vectors to each section to enable recommendations between packages. A more constructed version of this project could serve as an easy way to onboard to new general machine learing packages and find relevant resources to flatten the learning curve. \n\nThe recommendations and t-SNE projections of the vectors can be viewed on [this dash app](http://spencerbraun.pythonanywhere.com/), hosted on pythonanywhere. \n\n## arXiv Article recommendations\n\nI started on a second direct, embedding arXiv abstracts for machine learning papers using a BERT transformer. This work is ongoing but could similarly serve to relate packages to relevant papers that provide the theory behind the algorithms. \n\n\n## Future Directions\n\nI started building a CNN model as another way to create document embeddings. While it wasn't finished in time for the competition, language CNNs are an interesting way to maintain structure in embeddings."
}
] | 3 |
CantusLupus/Python_messenger
|
https://github.com/CantusLupus/Python_messenger
|
1a5264276738e9fba8945cdc78f6f3d5cd471160
|
f6436aa1152231fda6a111bc78ed29bd4826e333
|
faab78b9d8b91888c373b25dbca5a75ceb1a8ec4
|
refs/heads/main
| 2023-04-18T04:08:42.317400 | 2021-05-07T14:01:36 | 2021-05-07T14:01:36 | 365,247,984 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4784946143627167,
"alphanum_fraction": 0.4940262734889984,
"avg_line_length": 20.931507110595703,
"blob_id": "031af6842111d27a38a09b7f0194d95b76f8b73c",
"content_id": "f207429a791e2f600d4efc540e9422579df7f510",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1674,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 73,
"path": "/server.py",
"repo_name": "CantusLupus/Python_messenger",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\r\nfrom flask import Flask, request, abort\r\nimport time\r\n\r\napp = Flask(__name__)\r\n\r\ndb = []\r\n\r\[email protected](\"/status\", methods=['get'])\r\n\r\ndef status():\r\n return {\r\n 'status': True,\r\n 'name': 'Messanger',\r\n 'time1': time.asctime(),\r\n 'time2': time.time(),\r\n 'time3': datetime.now(),\r\n 'time4': str(datetime.now()),\r\n 'time5': datetime.now().strftime('%Y/%m/%d time: %H/%M/%S'),\r\n 'time6': datetime.now().isoformat(),\r\n 'users': len(set([db[i]['name'] for i in range(len(db))]))\r\n }\r\n\r\n\r\[email protected](\"/send\", methods=['POST'])\r\ndef send_message():\r\n data = request.json\r\n\r\n if not isinstance(data, dict):\r\n return abort(400)\r\n # if set(data.keys()) != {'name', 'text'}:\r\n # return abort(400)\r\n if 'name' not in data or 'text' not in data:\r\n return abort(400)\r\n if len(data) != 2:\r\n return abort(400)\r\n\r\n name = data['name']\r\n text = data['text']\r\n\r\n if not isinstance(name, str) or \\\r\n not isinstance(text, str) or \\\r\n name == '' or \\\r\n text == '':\r\n return abort(400)\r\n\r\n message = {\r\n 'time': time.time(),\r\n 'name': name,\r\n 'text': text,\r\n }\r\n db.append(message)\r\n return {'ok': True}\r\n\r\n\r\[email protected](\"/messages\")\r\ndef get_message():\r\n try:\r\n after = float(request.args['after'])\r\n except:\r\n return abort(400)\r\n\r\n result = []\r\n for message in db:\r\n if message['time'] > after:\r\n result.append(message)\r\n if len(result) >= 1:\r\n break\r\n\r\n return {'messages': result}\r\n\r\n\r\napp.run()\r\n"
},
{
"alpha_fraction": 0.7389557957649231,
"alphanum_fraction": 0.7469879388809204,
"avg_line_length": 48.400001525878906,
"blob_id": "9352a05fcb9d4292ada2931de3fcab8424b0f566",
"content_id": "7fad925301ddaed858f33ca051fce478b058ca30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 5,
"path": "/README.md",
"repo_name": "CantusLupus/Python_messenger",
"src_encoding": "UTF-8",
"text": "# Python_messenger\nSimple Python Messenger created by using framework Flask and UI toolkit PyQt6\n\n### You can create your own UI with PyQt6 and convert it to .py file\n### You can also check how to do this [here](https://youtu.be/TiHOQwzBOOc) (ru)\n\n\n"
}
] | 2 |
qlindazm/coinCollector
|
https://github.com/qlindazm/coinCollector
|
1f2e161259a0f24f4a3c80a83394ed8e394530f3
|
29a00c53b38a3eaceda62ef8f3702bbc27a4ecf4
|
667d1174d3513c457c9ead03823ab8eedb3b7840
|
refs/heads/master
| 2020-03-27T07:23:48.714939 | 2018-08-26T15:44:45 | 2018-08-26T15:44:45 | 146,187,914 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6386010646820068,
"alphanum_fraction": 0.6761658191680908,
"avg_line_length": 21.676469802856445,
"blob_id": "2d4a98b888b81ba1b725c27e62bcd8e32ebb78a6",
"content_id": "8874163ff3768a2723107cf78b0c41e20995ceab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1378,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 34,
"path": "/README.md",
"repo_name": "qlindazm/coinCollector",
"src_encoding": "UTF-8",
"text": "## King of Glory刷金币脚本\n\n忽然发现了写好的[轮子](https://github.com/xyxsns/King-of-glory)有兴趣可以看这个,有更详细的介绍及相关资料,代码也比我的好看hhh\n\n### 需要\n\n- **adb调试**\n- **开启usb调试并允许模拟点击**\n- **需要可以打冒险模式的通天塔**\n- **狄仁杰专用(其他英雄不保证可行)**\n- **效率大概在1分17秒19~29金币(取决于铭文等级,等级高可以打大师级,29金币/次)**\n- **需要调整参数:位置参数(和屏幕有关),时间参数(和设备与铭文加的移速有关)**\n- **中途被提醒你下线的小妲己或者对战邀请之类的对话框打断可能会产生问题,需要重启**\n- **需要把“自动”关掉,会影响操作**\n- **注意方向问题,测试默认是听筒朝左的方向**\n\n\n\n\n\n以下是示例图,根据自己手机实际的按钮位置调整参数 \n\n<img src=\"./1.png\" width = \"400\" align=center />\n\n- 这里只用一个狄仁杰,记得关掉“自动”选项 \n\n<img src=\"./2.png\" width = \"400\" align=center />\n<img src=\"./3.png\" width = \"400\" align=center />\n<img src=\"./4.png\" width = \"400\" align=center />\n\n- 两个小红点就是用来确定方向的mark point \n同时根据上图确定left joystick, basic attack, 以及两个ability的位置\n\n<img src=\"./5.png\" width = \"400\" align=left />\n\n"
},
{
"alpha_fraction": 0.6168145537376404,
"alphanum_fraction": 0.6662442088127136,
"avg_line_length": 61.31578826904297,
"blob_id": "261052294c239a488771031e17dd7dcde9fbeb19",
"content_id": "c1f09130b35806e188dca9d9297af22a9e3d746b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2423,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 38,
"path": "/coinCollector.py",
"repo_name": "qlindazm/coinCollector",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n# device: Mi8SE\n# screen: 2244x1080\n# adb support\n# open USB debugging and allow simulating input via USB debugging\nimport os\nimport time\n\nfir_in_button_x, fir_in_button_y = 1485, 870 # first button, click to start\ncheck_in_button_x, check_in_button_y = 1400, 750 # dialog:英雄不足三个是否继续\ntime_waiting = 13 # loading time (second)\nskip_x, skip_y = 2060, 65 # skip useless story\ndirec_x1, direc_y1, direc_x2, direc_y2 = 900, 364, 995, 485 # two mark points, calculate the direction \ndirec_tan = (direc_y2 - direc_y1) / (direc_x2 - direc_x1)\nmagic_num1 = 35 # magic number to fix the direction so that you can walking straight\nmove_x, move_y = 450, 850 # the center of the left joystick\ntime_move1 = 32000 # time from the very begining to the boss, fixing according to your own moving speed (millisecond)\nult_ability_x, ult_ability_y = 1860, 620 # the position of the ultimate ability joystick(就是大招,反正之前打dota大招叫ultimate ability)\nsec_ability_x, sec_ability_y = 1650, 750 # second ability \nA_x, A_y = 1850, 900 # basic attack\nagain_x, again_y = 1800, 980 # button(再次挑战)\n\nwhile(True):\n\tos.system('adb shell input swipe {} {} {} {} {}'.format(fir_in_button_x, fir_in_button_y, fir_in_button_x, fir_in_button_y, 10)) # start the game (see: 1.png)\n\tos.system('adb shell input swipe {} {} {} {} {}'.format(check_in_button_x, check_in_button_y, check_in_button_x, check_in_button_y, 10)) # check dialog (see: 2.png)\n\ttime.sleep(time_waiting) # wait for loading\n\tos.system('adb shell input swipe {} {} {} {} {}'.format(skip_x, skip_y, skip_x, skip_y, 10)) # skip story\n\tprint('move')\n\tos.system('adb shell input swipe {} {} {} {} {}'.format(move_x, move_y, move_x-50, move_y-50*direc_tan-magic_num1, time_move1)) # move to the boss\n\tprint('attack')\n\tos.system('adb shell input swipe {} {} {} {} {}'.format(ult_ability_x, ult_ability_y, ult_ability_x, ult_ability_y, 3)) # ultimate ability\n\tos.system('adb shell input swipe {} {} {} {} {}'.format(sec_ability_x, sec_ability_y, sec_ability_x, sec_ability_y, 3)) # second ability\n\tprint('A')\n\tfor i in range(40):\n\t\tos.system('adb shell input swipe {} {} {} {} {}'.format(A_x, A_y, A_x, A_y, 1)) # basic attack and skip following story\n\tprint('done')\n\tos.system('adb shell input swipe {} {} {} {} {}'.format(again_x, again_y, again_x, again_y, 10)) # play again?\n\ttime.sleep(3)"
}
] | 2 |
LieonShelly/PythonDemo
|
https://github.com/LieonShelly/PythonDemo
|
47fa9f8af480c4baca27c95632c6fec8ba379a04
|
39ebd41074671c59297adfeb86adf733261ad6d4
|
be071e6027a118240523b0730e9464a4bed3c706
|
refs/heads/master
| 2020-03-18T23:20:25.637198 | 2018-06-04T15:07:07 | 2018-06-04T15:07:07 | 135,396,824 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5585106611251831,
"alphanum_fraction": 0.5585106611251831,
"avg_line_length": 24.133333206176758,
"blob_id": "f71b52de63d86758f09253317b1109d5f95bf165",
"content_id": "7bc67607ef02cf33c02fc75a9dd804a99dab2482",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 392,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 15,
"path": "/urllib_test08.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\nfrom urllib import error\n\nif __name__ == \"__main__\":\n #一个不存在的连接\n url = \"asdf\"\n req = request.Request(url)\n try:\n response = request.urlopen(req)\n except error.URLError as e:\n if hasattr(e, 'code'):\n print('HTTPERROR')\n elif hasattr(e, 'reason'):\n print('URLERROR')\n print(e.reason)"
},
{
"alpha_fraction": 0.6238341927528381,
"alphanum_fraction": 0.6849740743637085,
"avg_line_length": 29.1875,
"blob_id": "b3b93ea6a8c04d403f353ab491c59f3d0b63a7fd",
"content_id": "8a765c3211381905bfe050215dea4fa790eab512",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 983,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 32,
"path": "/translate_test.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\nfrom urllib import parse\nimport json\n\nif __name__ == \"__main__\":\n\tRequest_URL = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'\n\tForm_Data = {}\n\tForm_Data['from'] = 'zh-CHS'\n\tForm_Data['i'] = '引导'\n\tForm_Data['to'] = 'en'\n\tForm_Data['smartresult'] = 'dict'\n\tForm_Data['client'] = 'fanyideskweb'\n\tForm_Data['salt'] = '1527695103127'\n\tForm_Data['sign'] = '5a68db9031869c618d014ab5d4cdfe05'\n\n\n\tForm_Data['doctype'] = 'json'\n\tForm_Data['sign'] = '5a68db9031869c618d014ab5d4cdfe05'\n\n\n\tForm_Data['version'] = '2.1'\n\tForm_Data['keyfrom'] = 'fanyi.web'\n\n\tForm_Data['action'] = 'FY_BY_CLICKBUTTION'\n\tForm_Data['typoResult'] = 'false'\n\n\tdata = parse.urlencode(Form_Data).encode('utf-8')\n\tresponse = request.urlopen(Request_URL, data)\n\thtml = response.read().decode('utf-8')\n\ttranslate_results = json.loads(html)\n\t# translate_results = translate_results['translateResult'][0][0]['tgt']\n\tprint(\"翻译的结果是:%s\" % translate_results)"
},
{
"alpha_fraction": 0.65864098072052,
"alphanum_fraction": 0.6730872392654419,
"avg_line_length": 25.685714721679688,
"blob_id": "3d72427966a53cb2a71da6e8d221f42fafcda78e",
"content_id": "e82866d829bb3b284dd6ee8834d23e1423a95257",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1917,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 70,
"path": "/msql_connect.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Enum, DATE\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import func\nimport time\n\nengine = create_engine('mysql+pymysql://root:lieon1992316@localhost:3306/demo', echo=False)\nBase = declarative_base()\n\nclass User(Base):\n __tablename__ = 'test_user'\n id = Column(Integer, primary_key=True)\n name = Column(String(64))\n fullname = Column(String(64))\n password = Column(String(64))\n\n # 打印该对象\n def __repr__(self):\n return \"<User(name='%s', fullname='%s', password='%s')>\" % (\n self.name, self.fullname, self.password)\n\n\n\nclass Student(Base):\n __tablename__ = 'student'\n id = Column(Integer, primary_key=True)\n name = Column(String(64))\n register_date = Column(DATE)\n sex = Column(Enum(\"M\", \"F\"))\n\n # 打印该对象\n def __repr__(self):\n return \"<User(name='%s', register_date='%s', sex='%s')>\" % (\n self.name, self.register_date, self.sex)\n\nBase.metadata.create_all(engine)\nSession = sessionmaker()\nSession.configure(bind=engine)\nsession = Session()\n\n# 增加数据\ned_user = User()\ned_user.name = \"lieon\"\ned_user.fullname = \"lieon lee\"\ned_user.password = \"lieon1992316\"\n# session.add(ed_user)\n\nstudent = Student()\nstudent.name = \"test\"\nstudent.register_date = \"2018-09-09\"\nstudent.sex = \"M\"\n# session.add(student)\n\n# 查数据\nquryUser = session.query(User).filter_by(name='lieon').first()\nprint(quryUser)\n# Querying with Joins\nprint(session.query(User, Student).filter(User.id == Student.id).all())\n#统计\nprint(session.query(User).filter(User.name.like('li%')).count())\n#分组\nprint(session.query(func.count(User.name), User.name).group_by(User.name).all())\n# 改数据\nquryUser.password = \"21321qw\";\n\nsession.commit()\n\n# | id | name | register_date | sex |\n\n"
},
{
"alpha_fraction": 0.682692289352417,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 19.899999618530273,
"blob_id": "70e8b3110254f526d1d9ca9192e2658685f1fdeb",
"content_id": "08fac984a517aa50c8e09e458c163dfe97cf2df4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 208,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 10,
"path": "/urllib_test07.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\nfrom urllib import error\n\nif __name__ == \"__main__\":\n\turl = \"asdf\"\n\treq = request.Request(url)\n\ttry:\n\t\tresponse = request.urlopen(req)\n\texcept error.HTTPError as e:\n\t\tprint(e.code)"
},
{
"alpha_fraction": 0.6133005023002625,
"alphanum_fraction": 0.6847290396690369,
"avg_line_length": 30.269229888916016,
"blob_id": "4c9b5db89586705bf890c1c68da3f81900e89f5d",
"content_id": "9a4c9533a937540065505de23c4633cb75a64697",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 896,
"license_type": "permissive",
"max_line_length": 151,
"num_lines": 26,
"path": "/urllib_test09.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\n\nif __name__ == \"__main__\":\n\turl = 'http://www.csdn.net/'\n\thead = {\n\t\t'User-Agent': 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19'\n\t}\n\treq = request.Request(url, headers=head)\n\tresponse = request.urlopen(req)\n\thtml = response.read().decode('utf-8')\n\tprint(html)\n\n\t# 以CSDN为例,CSDN不更改User Agent是无法访问的\n\turl = 'http://www.csdn.net/'\n\thead = {}\n\t# 写入User Agent信息\n\thead[\n\t\t'User-Agent'] = 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19'\n\t# 创建Request对象\n\treq = request.Request(url, headers=head)\n\t# 传入创建好的Request对象\n\tresponse = request.urlopen(req)\n\t# 读取响应信息并解码\n\thtml = response.read().decode('utf-8')\n\t# 打印信息\n\tprint(html)"
},
{
"alpha_fraction": 0.7116104960441589,
"alphanum_fraction": 0.7490636706352234,
"avg_line_length": 30.47058868408203,
"blob_id": "ce1b9b81588dee4cf6bc41877fba7583365271d2",
"content_id": "177baa3086a90e8b4307daf446083c3bb714c432",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 534,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 17,
"path": "/InnerModule.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import time\nimport datetime\nimport random\n\nstring_2_struct = time.strptime(\"2016/05/22\",\"%Y/%m/%d\")\ntime.mktime(string_2_struct)\ntime.strftime(\"%Y-%m-%d %H:%M:%S\",time.gmtime())\ndatetime.datetime.now()\ndatetime.date.fromtimestamp(time.time())\ndatetime.datetime.now()\ndatetime.datetime.now() + datetime.timedelta(3)\ndatetime.datetime.now() + datetime.timedelta(-3)\ndatetime.datetime.now() + datetime.timedelta(hours=3)\ndatetime.datetime.now() + datetime.timedelta(minutes=30)\nrandom.random()\nrandom.randint(1,2)\nrandom.randrange(1,10)"
},
{
"alpha_fraction": 0.6086398363113403,
"alphanum_fraction": 0.6544054746627808,
"avg_line_length": 29.776315689086914,
"blob_id": "344d4989f3905e16b0ee66ca73a6b39c41d5406e",
"content_id": "6a812225d65170c13288b21759e4ceb316443d7c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2352,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 76,
"path": "/picture.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport requests\nimport os\nfrom urllib.request import urlretrieve\nimport time\n\nif __name__ == '__main__':\n\tlist_url = []\n\tfor num in range(1, 2):\n\t\tif num == 1:\n\t\t\turl = 'http://www.shuaia.net/index.html'\n\t\telse:\n\t\t\turl = 'http://www.shuaia.net/index_%d.html' % num\n\t\theaders = {\n\t\t\t\"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n\t\t}\n\t\treq = requests.get(url=url, headers=headers)\n\t\treq.encoding = 'utf-8'\n\t\thtml = req.text\n\t\tbf = BeautifulSoup(html, 'lxml')\n\t\ttargets_url = bf.find_all(class_='item-img')\n\n\n\tfor each in targets_url:\n\t\tlist_url.append(each.img.get('alt') + '=' + each.get('href'))\n\tfor each_img in list_url:\n\t\timg_info = each_img.split('=')\n\t\ttarget_url = img_info[1]\n\t\tfilename = img_info[0] + '.jpg'\n\t\theaders = {\n\t\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'\n\t\t}\n\t\timg_req = requests.get(url=target_url, headers = headers)\n\t\timg_req.encoding = 'utf-8'\n\t\timg_html = img_req.text\n\t\timg_html_bf = BeautifulSoup(img_html, 'lxml')\n\t\tdiv_content_list = img_html_bf.find_all('div', class_ = 'wr-single-content-list')\n\t\tdiv_content_list_bf = BeautifulSoup(str(div_content_list), 'lxml')\n\t\timg_url = 'http://www.shuaia.net' + div_content_list_bf.div.img.get('src')\n\t\tif 'images' not in os.listdir():\n\t\t\tos.makedirs('images')\n\t\turlretrieve(url=img_url, filename='images/' + filename)\n\t\ttime.sleep(1)\n\t\tprint('done')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t# target_url = 'http://www.shuaia.net/oumei/2018-05-03/14954.html'\n\t# filename = '痞气十足的酷男' + '.jpg'\n\t# headers = {\n\t# \t\"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n\t# }\n\t# img_req = requests.get(url=target_url, headers=headers)\n\t# img_req.encoding = 'utf-8'\n\t# img_html = img_req.text\n\t# img_html_bf = BeautifulSoup(img_html, 'lxml')\n\t# div_content_list = img_html_bf.find_all('div', class_ = 'wr-single-content-list')\n\t# div_content_list_bf = BeautifulSoup(str(div_content_list), 'lxml')\n\t# img_url = 'http://www.shuaia.net' + div_content_list_bf.div.img.get('src')\n\t# if 'images' not in os.listdir():\n\t# \tos.makedirs('images')\n\t# urlretrieve(url=img_url, filename='images/' + filename)\n\t# print('done')"
},
{
"alpha_fraction": 0.5751252174377441,
"alphanum_fraction": 0.5843071937561035,
"avg_line_length": 22.0256404876709,
"blob_id": "0d2916895aa86b8ff47c19049b35f35317cb271d",
"content_id": "8498f955f8a928b16195231f9eae8645dbd72c47",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3714,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 156,
"path": "/var.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "# python是动态语言\nprint(\"hello world\")\nname = \"lieon\"\nprint(name)\nname1 = name\nname = \"pao\"\nprint(name1)\nif name == name1:\n print(\"equal\")\nelif name != name1:\n print(\"....\")\nelse:\n print(\"not equal\")\n\ncount = 0\nmy_integers = [5,2,123,1231,123]\nprint(my_integers[0])\nreleatives_names = [\n \"Toshaasf\",\n \"asdf\",\n \"Yuji\",\n \"Bruno\",\n \"Kaio\"\n]\nbookself = []\nbookself.append(\"asf\")\nbookself.append(\"sfa\")\n# Dictionary\ndictionary_example = {\n \"key1\": \"value1\",\n \"key2\": \"value2\",\n \"key3\": \"value3\"\n}\ndictionary_tk = {\n \"name\": \"Leandro\",\n \"nickiname\": \"TK\",\n \"nationality\": \"Brazilian\"\n}\nprint(\"my name is %s\" %(dictionary_tk[\"name\"]))\ndictionary_tk[\"age\"] = 24\nfor book in bookself:\n print(book)\n\nfor key in dictionary_tk:\n print(\"%s --> %s\" % (key, dictionary_tk[key]))\n\nfor key, value in dictionary_tk.items():\n print(\"%s -----> %s\" % (key, value))\n\n################ CLASS ###########################\nclass Vehicle:\n pass\nclass Vehicle:\n def __init__(self, number_of_wheels, type_of_trunk, seating_capacity):\n self.number_of_wheels = number_of_wheels\n self.type_of_tank = type_of_trunk\n self.seating_capacity = seating_capacity\n\n def number_of_wheels(self):\n return 4\n\n def set_number_of_wheels(self, number):\n self.number_of_wheels = number\n\n @property\n def number_of_wheels(self):\n return self.number_of_wheels\n\n @number_of_wheels.setter\n def number_of_wheels(self, number):\n self.number_of_wheels = number\n\n def make_noise(self):\n print('VARRRRRR')\ntesla_model_s = Vehicle(4,\"electric\",5)\nprint(tesla_model_s.number_of_wheels)\ntesla_model_s.make_noise()\n\nclass Person:\n first_name = 'TK'\n # non-public\n _email = 'private email'\n\n def show_age(self):\n return self._email\n #non-public method\n def _show_emali(self):\n return self._email\n\nclass Car:\n def __init__(self, number_of_wheels, seating_capacity, maximum_velocity):\n self.number_of_wheels = number_of_wheels\n self.seating_capacity = seating_capacity\n self.maximum_velocity = maximum_velocity\n\nclass ElectriCar(Car):\n def __init__(self, number_of_wheels, seating_capacity, maximum_velocity):\n Car.__init__(self, number_of_wheels, seating_capacity, maximum_velocity)\n\nclass Dog:\n\n def __init__(self): #构造方法\n\n def __del__(self): #析构方法\n\n def __call__(self, *args, **kwargs):# 对象后加括号时调用\n\n @staticmethod\n def eat(self):\n print(\"asdf\")\n\n @classmethod\n def bark(self):\n print (\"asdfas\")\n @property\n def flight_status(self):\n print ('drik')\n @flight_status.setter\n def flight_status(self,status):\n print ('drik')\n @flight_status.deleter\n def flight_status(self):\n print ('drik')\n\nd = Dog()\nd.eat(d)\nd.flight_status = 2;\nprint(Dog.__doc__) #输出类的描述信息\nprint(d.__module__) # 输出改动对象所在的模块\nprint(d.__class__) # 输出该对象的类\n\nclass Foo:\n def __getitem__(self, key):\n print('__getitem__', key)\n\n def __setitem__(self, key, value):\n print('__setitem__', key, value)\n\n def __delitem__(self, key):\n print('__delitem__',key)\n\n def __init__(self,name):\n self.name = name\n\nobj = Foo()\nresult = obj['k1'] # # 自动触发执行 __getitem__\nobj['k2'] = 'lieon' # 自动触发执行 __setitem__\ndel obj['k1']\n\nclass MyType(type):\n def __init__(self, *args, **kwargs):\n print(\"Mytype __init___\", *args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n print(\"Mytype __call__\", *args, ** kwargs)\n obj = self.__new__(self)\n\n\n"
},
{
"alpha_fraction": 0.6912280917167664,
"alphanum_fraction": 0.7043859362602234,
"avg_line_length": 32.55882263183594,
"blob_id": "4be93c2c7a814a3eb41ef3959b1e9806b71b02e5",
"content_id": "0b20e0df8e9033c8468651f8413374d3e11d045b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1150,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 34,
"path": "/orm_mfk.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "# 多外键关联\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Enum, DATE, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy import func\nimport time\n\nengine = create_engine('mysql+pymysql://root:lieon1992316@localhost:3306/demo', echo=False)\nBase = declarative_base()\n\nclass Customer(Base):\n __tablename__ = 'customer'\n id = Column(Integer, primary_key=True)\n name = Column(String(64))\n bill_address_id = Column(Integer, ForeignKey('address.id'))\n ship_address_id = Column(Integer, ForeignKey('address.id'))\n\n billing_address = relationship('Address', foreign_keys = [bill_address_id])\n shiping_address = relationship('Address', foreign_keys = [ship_address_id])\n\n def __repr__(self):\n return \"<Customer(name='%s')>\" % (self.name)\n\nclass Address(Base):\n __tablename__ = 'address'\n id = Column(Integer, primary_key=True)\n street = Column(String(64))\n\n def __repr__(self):\n return \"<Address(street='%s')>\" % (self.street)\n\nBase.metadata.create_all(engine)"
},
{
"alpha_fraction": 0.6936416029930115,
"alphanum_fraction": 0.6994219422340393,
"avg_line_length": 26.263158798217773,
"blob_id": "6bc76f95d8e7ee3811ee35415ad61f17eff0f7c6",
"content_id": "9df788ee8e586f814bbe92fcfe733a2552aeddf7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 519,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 19,
"path": "/my_novel.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\nfrom bs4 import BeautifulSoup\n\n\nif __name__ == \"__main__\":\n\n\n\n\nclass MyNovelDownloader(object):\n\tdef get_chapter_content(self, chapter_url):\n\t target_url = chapter_url\n\t\treq = request.Request(target_url)\n\t\tresponse = request.urlopen(req)\n\t\thtml = response.read().decode('utf-8')\n\t\thtml_soup = BeautifulSoup(html, 'lxml')\n\t\tdiv_text = html_soup.find_all('div', id='TextContent')\n\t\ttext_content_soup = BeautifulSoup(str(div_text), 'lxml')\n\t\treturn text_content_soup.contents[0].div.text\n\n"
},
{
"alpha_fraction": 0.380952388048172,
"alphanum_fraction": 0.5476190447807312,
"avg_line_length": 13,
"blob_id": "0410fba05c081ef4e4c39bdc50dcb1a5b41fe0e2",
"content_id": "45490ffd36663991d37fab13b8af2cafc28afa56",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 42,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 3,
"path": "/common.js",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "/**\n * Created by lieon on 2018/5/25.\n */\n"
},
{
"alpha_fraction": 0.5570341944694519,
"alphanum_fraction": 0.5807985067367554,
"avg_line_length": 19.25,
"blob_id": "0871af320608a6762d1bddf66598f4c4c19027e5",
"content_id": "87074216d408e47f71cd655fbc068120b0e75f6b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1148,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 52,
"path": "/Thread.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import threading\nimport time\n\ndef saygi(num):\n print(\"running on number:%s\" %num)\n time.sleep(3)\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=saygi, args=(1,))\n t2 = threading.Thread(target=saygi, args=(2,))\n t1.start()\n t2.start()\n print(t1.getName())\n print(t2.getName())\n\nclass MyThread(threading.Thread):\n def __init__(self, num):\n threading.Thread.__init__(self)\n self.num = num\n\n def run(self):\n print(\"running on number of :%s\" %self.num)\n time.sleep(3)\n\nif __name__ == '__main__':\n t1 = MyThread(1)\n t2 = MyThread(2)\n t1.start()\n t2.start()\n\n#线程锁\n\ndef addNum():\n global num\n print('___get num:',num)\n time.sleep(1)\n lock.acquire() # 修改数据前加锁\n num -= 1 # 对此公共变量进行-1操作\n lock.release() # 修改后释放\n\nnum = 100 # 设定一个共享变量\nthread_list = []\nlock = threading.Lock() # 生成全局锁\nfor i in range(100):\n t = threading.Thread(target=addNum)\n t.start()\n thread_list.append(t)\n\nfor t in thread_list: # 等待所有线程执行完毕\n t.join()\n\nprint('final num:', num)"
},
{
"alpha_fraction": 0.653789758682251,
"alphanum_fraction": 0.6953545212745667,
"avg_line_length": 35.53571319580078,
"blob_id": "a9be58ea99f7aa5a23cb16fb09c7872478adc00e",
"content_id": "22acbc419fb1bf2b313a6c6cc42c74401fe58bdc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2055,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 56,
"path": "/orm_fk.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Enum, DATE, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy import func\nimport time\n\nengine = create_engine('mysql+pymysql://root:lieon1992316@localhost:3306/demo', echo=False)\nBase = declarative_base()\n\nclass Student(Base):\n __tablename__ = 'student'\n id = Column(Integer, primary_key=True)\n name = Column(String(64))\n register_date = Column(DATE)\n\n # 打印该对象\n def __repr__(self):\n return \"<User(name='%s', register_date='%s')>\" % (\n self.name, self.register_date)\n\nclass StudentRecord(Base):\n __tablename__ = \"study_record\"\n id = Column(Integer, primary_key=True)\n day = Column(Integer, nullable=False)\n status = Column(String (32), nullable=False)\n stu_id = Column(Integer, ForeignKey('student.id'))\n\n student = relationship(\"Student\", backref=\"my_study_record\")\n\n def __repr__(self):\n return \"<StudentRecord(name='%s', day='%s', status='%s', status='%d')>\" % (self.student.name,self.day, self.status, self.stu_id)\n\nBase.metadata.create_all(engine)\n\nSession = sessionmaker()\nSession.configure(bind=engine)\nsession = Session()\n\n# s1 = Student(name=\"lieon\", register_date=\"2018-09-01\")\n# s2 = Student(name=\"lieo1\", register_date=\"2018-09-02\")\n# s3 = Student(name=\"lieon2\", register_date=\"2018-09-03\")\n# s4 = Student(name=\"lieon3\", register_date=\"2018-09-04\")\n# s5 = Student(name=\"lieon4\", register_date=\"2018-09-05\")\n#\n# studyRecord0 = StudentRecord(day=1, status=\"yes\", stu_id=2)\n# studyRecord1 = StudentRecord(day=2, status=\"yes\", stu_id=1)\n# studyRecord2 = StudentRecord(day=4, status=\"false\", stu_id=3)\n# studyRecord3 = StudentRecord(day=5, status=\"yes\", stu_id=4)\n#\n# session.add_all([s1, s2, s3, s4, s5, studyRecord0, studyRecord1, studyRecord2, studyRecord3])\n\nstu_obj = session.query(Student).filter(Student.name==\"lieon\").first()\nprint(stu_obj.my_study_record)\nsession.commit()"
},
{
"alpha_fraction": 0.668912410736084,
"alphanum_fraction": 0.6862367391586304,
"avg_line_length": 31.5,
"blob_id": "30230eecdcd1f98a67c96519ac86b008a56fd000",
"content_id": "21bef67e750f3042e6e7a1c967b47cc12cd559bc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1045,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 32,
"path": "/orm_m2m.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "# 多对多\n\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Enum, DATE, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy import func, Table\nimport time\n\n\nengine = create_engine('mysql+pymysql://root:lieon1992316@localhost:3306/demo', echo=False)\nBase = declarative_base()\n\nbook_m2m_author = Table('book_m2m_author', Base.metadata,\n Column('book_id', Integer, ForeignKey('book.id')),\n Column('author_id', Integer, ForeignKey('author.id')),\n )\n\nclass Book(Base):\n __tablename__ = \"book\"\n id = Column(Integer, primary_key=True)\n name = Column(String(64), nullable=False)\n authors = relationship('Author', secondary=book_m2m_author, backref='books')\n\n\nclass Author(Base):\n __tablename__ = \"author\"\n id = Column(Integer, primary_key=True)\n name = Column(String(64), nullable=False)\n\nBase.metadata.create_all(engine)"
},
{
"alpha_fraction": 0.6469939351081848,
"alphanum_fraction": 0.6938775777816772,
"avg_line_length": 44.29999923706055,
"blob_id": "745ae04afee832ee4377441ec099d4c65ad088ef",
"content_id": "661bb448727c7cdbd59bd45664ae14dd9cfea577",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1875,
"license_type": "permissive",
"max_line_length": 429,
"num_lines": 40,
"path": "/cookie.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\nfrom urllib import error\nfrom urllib import parse\nfrom http import cookiejar\n\nif __name__ == \"__main__\":\n\tlogin_url = 'http://www.jobbole.com/wp-admin/admin-ajax.php'\n\tuser_agent = \"Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19 Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30Mozilla/5.0 (Linux; U; Android 2.2; en-gb; GT-P1000 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.12.Firefox\"\n\thead = {\n\t\t'User-Agent': user_agent\n\t}\n\tLogin_Data = {}\n\tLogin_Data['action'] = 'user_login'\n\tLogin_Data['redirect_url'] = 'http://www.jobbole.com/'\n\tLogin_Data['remember_me'] = '0' # 是否一个月内自动登陆\n\tLogin_Data['user_login'] = '********' # 改成你自己的用户名\n\tLogin_Data['user_pass'] = '********' # 改成你自己的密码\n\tlogingpostdata = parse.urlencode(Login_Data).encode('utf-8')\n\tcookie = cookiejar.CookieJar()\n\tcookie_support = request.HTTPCookieProcessor(cookie)\n\topener = request.build_opener(cookie_support)\n\treq1 = request.Request(url=login_url, data=logingpostdata, headers=head)\n\tdate_url = 'http://date.jobbole.com/wp-admin/admin-ajax.php'\n\tDate_Data = {}\n\tDate_Data['action'] = 'get_date_contact'\n\tDate_Data['postId'] = '4128'\n\tdatapostdata = parse.urlencode(Date_Data).encode('utf-8')\n\treq2 = request.Request(url=date_url, data=datapostdata, headers=head)\n\n\ttry:\n\t\tresponse1 = opener.open(req1)\n\t\tresponse2 = opener.open(req2)\n\t\thtml = response2.read().decode('utf-8')\n\t\tindex = html.find('jb_contact_email')\n\t\tprint('联系邮箱:%s' % html[index])\n\texcept error.URLError as e:\n\t\tif hasattr(e, 'code'):\n\t\t\tprint(\"HTTPError:%d\" % e.code)\n\t\telif hasattr(e, 'reason'):\n\t\t\tprint(\"URLError:%s\" % e.reason)\n\n"
},
{
"alpha_fraction": 0.601547360420227,
"alphanum_fraction": 0.6141199469566345,
"avg_line_length": 26.236841201782227,
"blob_id": "dac8acbb38981e9bfadaffd3a992d429ae93b89d",
"content_id": "4c4b5e2769db1797d46244b8740c90b34f4e47cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1034,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 38,
"path": "/video_spider.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import requests, re, json\nfrom bs4 import BeautifulSoup\n\nclass video_downloader():\n\tdef __init__(self, url):\n\t\tself.server = 'http://api.xfsub.com'\n\t\tself.api = 'http://api.xfsub.com/xfsub_api/?url='\n\t\tself.get_url_api = 'http://api.xfsub.com/xfsub_api/url.php'\n\t\tself.url = url.split('#')[0]\n\t\tself.target = self.api + self.url\n\t\tself.s = requests.session()\n\n\n\tdef get_key(self):\n\t\treq = self.s.get(url=self.target)\n\t\treq.encoding = 'utf-8'\n\t\tself.info = json.loads(re.findall('\"url.php\",\\ (.+),', req.text)[0])\n\n\n\tdef get_url(self):\n\t\tdata = {\n\t\t\t\t'time': self.info['time'],\n\t\t\t\t'key': self.info['key'],\n\t\t\t\t'url': self.info['url'],\n\t\t\t\t'type': ''\n\t\t }\n\t\treq = self.s.post(url=self.get_url_api, data=data)\n\t\turl = self.server + json.loads(req.text)['url']\n\t\treq = self.s.get(url)\n\t\tbf = BeautifulSoup(req.text, 'xml')\n\t\tvideo_url = bf.find('file').string\n\t\treturn video_url\n\nif __name__ == '__main__':\n\turl = 'http://www.iqiyi.com/v_19rr7qhfg0.html#vfrm=19-9-0-1'\n\tvd = video_downloader(url)\n\tvd.get_key()\n\tprint(vd.get_url())"
},
{
"alpha_fraction": 0.5974025726318359,
"alphanum_fraction": 0.600649356842041,
"avg_line_length": 24.75,
"blob_id": "88b41046f346ebe0cf5847c6e040d259e8eefa97",
"content_id": "7b44c2c8bba2357d2aea773ff19fe211e45d1ce9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 308,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 12,
"path": "/urllib_test06.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\nfrom urllib import error\n\nif __name__ == \"__main__\":\n url = \"htasdfdsafd\";\n req = request.Request(url)\n try:\n respomse = request.urlopen(req)\n html = respomse.read().decode('utf-8')\n print(html)\n except error.URLError as e:\n print(e.reason)"
},
{
"alpha_fraction": 0.7084019780158997,
"alphanum_fraction": 0.7429983615875244,
"avg_line_length": 31,
"blob_id": "594d4d0dea751b699b9ab0d0888744eb1282d136",
"content_id": "2422fcce69423290149fe05f8e82f124c8c8656c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 607,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 19,
"path": "/orm_api.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import orm_mfk\nfrom sqlalchemy.orm import sessionmaker\n\nSession = sessionmaker(bind=orm_mfk.engine)\nsession = Session()\n\naddr1 = orm_mfk.Address(street=\"adsfad\")\naddr2 = orm_mfk.Address(street=\"123\")\naddr3 = orm_mfk.Address(street=\"2321asd\")\n\nc1 = orm_mfk.Customer(name=\"jack\", billing_address = addr1, shiping_address=addr2)\nc2 = orm_mfk.Customer(name=\"lieon\", billing_address = addr2, shiping_address=addr3)\n\n\n# session.add_all([addr1, addr2, addr3, c1, c2])\n\nobj = session.query(orm_mfk.Customer).filter_by(name=\"lieon\").first()\nprint(obj.name, obj.billing_address, obj.shiping_address)\nsession.commit()"
},
{
"alpha_fraction": 0.6888068914413452,
"alphanum_fraction": 0.7343173623085022,
"avg_line_length": 31.559999465942383,
"blob_id": "73db29db24920ca392ab521d0c62238941e2a7c9",
"content_id": "5b4e9a594a330bcf297ba967de5e9eb115fc302f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 813,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 25,
"path": "/orm_m2m_api.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import orm_m2m\nfrom sqlalchemy.orm import sessionmaker\n\nSession = sessionmaker(bind=orm_m2m.engine)\nsession = Session()\n\nauthor0 = orm_m2m.Author(name=\"lieon0\")\nauthor1 = orm_m2m.Author(name=\"lieon1\")\nauthor2 = orm_m2m.Author(name=\"lieon2\")\nauthor3 = orm_m2m.Author(name=\"lieon3\")\n\nbook0 = orm_m2m.Book(name=\"asdf\",authors=[author0, author1])\nbook1 = orm_m2m.Book(name=\"we\",authors=[author1, author2])\nbook2 = orm_m2m.Book(name=\"ew\",authors=[author1, author3])\n\n# session.add_all([author0, author1, author2, author3])\n# session.add_all([book0, book1, book2])\n\nauthor_obj = session.query(orm_m2m.Author).filter_by(name=\"lieon0\").first()\nprint(author_obj, author_obj.books)\n\nbook_obj = session.query(orm_m2m.Book).filter_by(id=3).first()\nprint(book_obj, book_obj.authors)\nbook_obj.authors.remove()\nsession.commit()"
},
{
"alpha_fraction": 0.4878048896789551,
"alphanum_fraction": 0.49024391174316406,
"avg_line_length": 33.25,
"blob_id": "d933bf4b3258923df700af25e23e06b7d60d0098",
"content_id": "5c151184c3c773b66544a7b42189a1360fc59798",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 12,
"path": "/urllib_test01.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "from urllib import request\n\nif __name__ == '__main__':\n\tresponse = request.urlopen('http://fanyi.baidu.com')\n\thtml = response.read()\n\thtml = html.decode('utf-8')\n\tprint(html)\n\tprint('geturl: %s' % response.geturl())\n\tprint('**********************************************')\n\tprint('info: %s' % response.info())\n\tprint('**********************************************')\n\tprint('getcode: %s' % response.getcode())"
},
{
"alpha_fraction": 0.6766110062599182,
"alphanum_fraction": 0.7112171649932861,
"avg_line_length": 37.09090805053711,
"blob_id": "b283095aa429b0790383d6fb8dce4abb20a5ca92",
"content_id": "630bb7a2e60ad2d97da81132065a433909053796",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 844,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 22,
"path": "/biqukan.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import re\nimport os\nimport time\nimport sys\nimport types\nfrom urllib import request\nfrom bs4 import BeautifulSoup\n\nclass Donload(object):\n\tdef __init__(self, target):\n\t\tself.__target_url = target\n\t\tself.__head = {'User-Agent':'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19',}\n\n\tdef get_download_url(self):\n\t\tcharter = re.compile(u'[第第](.+)章]', re.IGNORECASE)\n\t\ttarget_req = request.Request(url=self.__target_url, headers=self.__head)\n\t\ttarget_response = request.urlopen(target_req)\n\t\ttarget_html = target_req.read().decode('gbk', 'ignore')\n\t\tlistmain_suop = BeautifulSoup(target_html, 'lxml')\n\t\tchapters = listmain_suop.find_all('div', classs_ = 'listmain')\n\t\tdownload_soup = BeautifulSoup(str(chapters), 'lxml')\n\t\tnovl_name = str(download_soup.dl.dt)\n"
},
{
"alpha_fraction": 0.6375121474266052,
"alphanum_fraction": 0.6433430314064026,
"avg_line_length": 26.83783721923828,
"blob_id": "bfd0579fed70720fbaf0c6aee5fe2c4d7c67b06a",
"content_id": "bec0bf94862b38d7ed7e27a5d91ed3ea635a6f44",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1041,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 37,
"path": "/image_spider.py",
"repo_name": "LieonShelly/PythonDemo",
"src_encoding": "UTF-8",
"text": "import requests, json, time\nfrom contextlib import closing\n\n\nclass Get_phots(object):\n\tdef __init__(self):\n\t\tself.photos_id = []\n\t\tself.download_server = 'https://unsplash.com/photos/xxx/download?force=trues'\n\t\tself.target = 'http://unsplash.com/napi/feeds/home'\n\n\tdef get_ids(self):\n\t\treq = requests.get(url=self.target)\n\t\thtml = json.loads(req.text)\n\t\tnext_page = html['next_page']\n\t\tprint('下一页地址:', next_page)\n\t\tfor each in html['photos']:\n\t\t\tself.photos_id.append(each['id'])\n\n\n\tdef download(self, photo_id, filename):\n\t\ttarget = self.download_server.replace('xxx', photo_id)\n\t\twith closing(requests.get(url=target, stream=True)) as r:\n\t\t\twith open('%d.jpg' % filename, 'ab+') as f:\n\t\t\t\tfor chunk in r.iter_content(chunk_size=1024):\n\t\t\t\t\tif chunk:\n\t\t\t\t\t\tf.write(chunk)\n\t\t\t\t\t\tf.flush()\n\n\nif __name__ == '__main__':\n\tgp = Get_phots()\n\tprint('link image --------')\n\tgp.get_ids()\n\tprint('downloding------')\n\tfor i in range(len(gp.photos_id)):\n\t\tprint('downloading picture NO.%d' % (i + 1))\n\t\tgp.download(gp.photos_id[i], (i + 1))"
}
] | 22 |
mharyam/UUID-Generator
|
https://github.com/mharyam/UUID-Generator
|
3a3a240e6e9414f57772bd17103535a4b0e68a9f
|
b237f10e3bdcb6201653e7e4781e7c951df4c9fd
|
9158f2cdbd4566a47ad6ae5f9a250e2240623d7b
|
refs/heads/master
| 2023-05-13T03:17:40.038281 | 2021-05-25T10:32:50 | 2021-05-25T10:32:50 | 370,636,884 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7594339847564697,
"alphanum_fraction": 0.7594339847564697,
"avg_line_length": 22.66666603088379,
"blob_id": "5eb9983566a87b7fa5a77c659db0393bab5d4df6",
"content_id": "05f0b999b7e4639afbec1e15898a87032bd115aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 9,
"path": "/api/admin.py",
"repo_name": "mharyam/UUID-Generator",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom .models import UUIDRequest\n# Register your models here.\n\n\[email protected](UUIDRequest)\nclass UUIDRequestAdmin(admin.ModelAdmin):\n list_display = ['uuid', 'created', 'id']"
},
{
"alpha_fraction": 0.683748185634613,
"alphanum_fraction": 0.6852123141288757,
"avg_line_length": 28.69565200805664,
"blob_id": "40a1e6ac215c2ca20e8a3240f3db9759ffee082b",
"content_id": "3d4765110aeff41f2573ff4d565a862a42e9fd99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 683,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 23,
"path": "/api/views.py",
"repo_name": "mharyam/UUID-Generator",
"src_encoding": "UTF-8",
"text": "import uuid\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom api.models import UUIDRequest\nfrom api.serializers import UUIDSerializer\n\n\nclass UUIDAPIVIEW(APIView):\n \"\"\"\n List all uuid, and create a new uuid.\n \"\"\"\n\n def get(self, request):\n UUIDRequest.objects.create(uuid=str(uuid.uuid4()))\n uuid_dict = {}\n uuid_list = UUIDRequest.objects.all().order_by('-created').values('created', 'uuid')\n for uuid_value in uuid_list:\n uuid_dict[str(uuid_value.get('created'))] = uuid_value.get('uuid')\n return Response(uuid_dict)\n"
},
{
"alpha_fraction": 0.7244094610214233,
"alphanum_fraction": 0.7244094610214233,
"avg_line_length": 17.285715103149414,
"blob_id": "3ff54631f0893ddb561a131f6ef758f854558625",
"content_id": "f6b83c9c9aa0f649b98a9afb279a084c3f6b6819",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 7,
"path": "/api/urls.py",
"repo_name": "mharyam/UUID-Generator",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom api.views import UUIDAPIVIEW\n\nurlpatterns = [\n path('get-uuid', UUIDAPIVIEW.as_view()),\n]"
},
{
"alpha_fraction": 0.659375011920929,
"alphanum_fraction": 0.668749988079071,
"avg_line_length": 20.33333396911621,
"blob_id": "e051f04d9171da8399e2c2a1fe9f547a96b15eeb",
"content_id": "4a52d22ff5b1a156013c6fc5285e275c186e7dcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 320,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 15,
"path": "/api/models.py",
"repo_name": "mharyam/UUID-Generator",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\n\nclass UUIDRequest(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n uuid = models.CharField(max_length=500)\n\n def __str__(self):\n return self.uuid\n\n @property\n def key_value(self):\n return {self.created: self.uuid}\n"
},
{
"alpha_fraction": 0.7080745100975037,
"alphanum_fraction": 0.7080745100975037,
"avg_line_length": 23.846153259277344,
"blob_id": "4e0edb2eedf5862885eba00440428198852eb373",
"content_id": "f121f9dfff7fbbc26c82ee3c8e5550136012efb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 322,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 13,
"path": "/api/serializers.py",
"repo_name": "mharyam/UUID-Generator",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\n\nfrom api.models import UUIDRequest\n\n\nclass UUIDSerializer(serializers.ModelSerializer):\n key_value = serializers.SerializerMethodField\n class Meta:\n model = UUIDRequest\n fields = ['key_value', 'uuid', ]\n\n def get_key_value(self):\n return self.key_value"
},
{
"alpha_fraction": 0.6852589845657349,
"alphanum_fraction": 0.7649402618408203,
"avg_line_length": 40.83333206176758,
"blob_id": "3aa67f1ca56c2689e279a6c96a52ee61597014f6",
"content_id": "753348b4ea411399c8ccd23fb4e55415e24466de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 6,
"path": "/README.md",
"repo_name": "mharyam/UUID-Generator",
"src_encoding": "UTF-8",
"text": "# UUID-Generator\nThis app generates a unique id for you with the time it was generated\n\nEndpoint for getting UUID is http://127.0.0.1:8000/api/get-uuid\n\nIf you are not running this on your local server replace http://127.0.0.1:8000 with the right URL\n"
}
] | 6 |
pokyu/SuperDBA
|
https://github.com/pokyu/SuperDBA
|
449fa65047305aa28d5a71afe5827d20f515b92e
|
a76e4e40ccb91d63beba216c559511c9ad91e73f
|
4b4ff10773f9e78fd6197c15d4b633403c35c839
|
refs/heads/master
| 2018-10-23T05:42:17.018048 | 2018-09-13T11:52:37 | 2018-09-13T11:52:37 | 145,412,852 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.43814027309417725,
"alphanum_fraction": 0.4672970771789551,
"avg_line_length": 25.4375,
"blob_id": "c2e15cbf7e034bc5f543972adab43d06ef0d2607",
"content_id": "ddafcda5ac6a9603d4ef5e75ee959f8882b6dbeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1331,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 48,
"path": "/superexplain.py",
"repo_name": "pokyu/SuperDBA",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n##########################################################################################\n# - Copyright(C), 2018-2020, Chris Tung\n# - File name: superexplain.py\n# - Author: Chris Tung Version: 1.0 Date: 2018-08-23\n# - Description: //mysql的sql执行计划分析\n#\t\t\t\t \n# - Others: \n# - Function List: //主要函数列表,每条记录应包括函数名及功能简要说明\n# - 1. ...\n# - 2. ... \n# - History: \n# - 1. Date: ...\n# Author: ...\n# Modification: ...\n# - 2. ...\n##########################################################################################\n\n\nimport pymysql\n\nclass superexplain(object):\n def __init__(self):\n self.host='10.0.8.197'\n self.username='dbmgr'\n self.password='Muli1234'\n self.port=3306\n \n def main(self,sql):\n sql='explain '+sql\n conn = pymysql.connect(host=self.host,user=self.username,passwd=self.password,port=self.port)\n cursor = conn.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n print (\"SQL Explain : %s \" % data)\n conn.close()\n\n def test(self):\n print(\"test\")\n \n\nif __name__ == '__main__':\n sql ='select * from wesure_blitzcrank.apps limit 10'\n db=superexplain()\n db.test()\n db.main(sql)\n"
},
{
"alpha_fraction": 0.4953271150588989,
"alphanum_fraction": 0.512950599193573,
"avg_line_length": 30.982906341552734,
"blob_id": "7ee057d46a146c3ddbf99fd18a67b016558baa67",
"content_id": "c1e2c59f962cfde8ea955806405216526b267c3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3795,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 117,
"path": "/getlongsql",
"repo_name": "pokyu/SuperDBA",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n########################################################################################################\n# File name : getlongsql.py\n# Author : Jessetong\n# Date : 08/29/2018\n#\n# Description : Get the Long SQL and record\n#\n# Usage : ./getlongsql.py\n#\n# Prerequisite : None\n# Function List: //主要函数列表,每条记录应包括函数名及功能简要说明\n# 1. kill(): get long sql for more than 30 minutes\n# 2. storge_log_sql(data): store the result from the function 'kill()' return \n#\n# Modifications :\n#\n# When Who What\n# ========== =========== ================================================================\n# 08/29/2018 tongboyu V 1.0 Record the Long SQL for more than 30 minutes.\n########################################################################################################\n\nimport pymysql\nimport argparse\nimport datetime\n\nclass killsql(object):\n\n def __init__(self,kw):\n self.host=kw['host']\n self.username=kw['user']\n self.password=kw['passwd']\n self.port=kw['port']\n\n self.dhost='10.95.32.165'\n self.dusername='dba'\n self.dpassword='wesure@dba2016'\n self.ddatabase='wesure_dba'\n self.dport=3306\n\n\n def kill(self):\n sql = 'SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO FROM information_schema.PROCESSLIST WHERE COMMAND<>\\'Sleep\\' AND TIME >1800'\n conn = pymysql.connect(host=self.host,user=self.username,passwd=self.password,port=self.port)\n cursor=conn.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n #print(data)\n conn.close()\n return data\n \n def storge_log_sql(self,data):\n #sql2 = 'show tables'\n sql = 'INSERT INTO WESURE_PROCESSLIST (PROCESSLIST_ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,CREATE_DATE) VALUE'\n i= 0\n for x in data:\n if i != 0:\n sql += ','\n sql = sql + '('\n for y in x:\n sql += '\"'+str(y) + '\"'+ ','\n sql += 'now())' \n i += 1\n\n print (sql)\n try:\n dconn = pymysql.connect(host=self.dhost,user=self.dusername,passwd=self.dpassword,port=self.dport,database=self.ddatabase)\n print (\"db connect !\")\n dcursor=dconn.cursor()\n dcursor.execute(sql)\n dcursor.execute('commit')\n ddata = dcursor.fetchall()\n print(\"data commit complite!\")\n except Execption as e:\n print (\"error:\",e)\n dconn.close()\n\ndef get_parse_argument():\n function_argument = {\n 'port': 3306,\n 'host': '10.0.8.2',\n 'user':'dbmgr',\n 'passwd':'M4',\n 'charset':'utf8'\n }\n \n parser = argparse.ArgumentParser(description='Created By [email protected].') \n \n parser.add_argument(\"-u\",\"--user\", help=\"db username\")\n parser.add_argument(\"-p\",\"--passwd\", help=\"db password\")\n parser.add_argument(\"-H\",\"--host\", help=\"db host ip\")\n parser.add_argument(\"-P\",\"--port\", help=\"Port number to use for mysql connection(default 3306).\", type=int,default=3306)\n\n args = parser.parse_args() \n\n if args.user :\n function_argument['user']=args.user\n\n if args.passwd :\n function_argument['passwd']=args.passwd\n \n if args.host :\n function_argument['host']=args.host\n\n if args.port :\n function_argument['port']=args.port\n \n return function_argument\n\n\nif __name__ == '__main__':\n print (\"Execute Time : \",datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n param=get_parse_argument()\n db=killsql(param)\n data=db.kill()\n db.storge_log_sql(data)\n\n\n\n"
},
{
"alpha_fraction": 0.5116777420043945,
"alphanum_fraction": 0.541252076625824,
"avg_line_length": 47,
"blob_id": "66dc5fcc1c859160e40d421ec6c3e1ec6939fc37",
"content_id": "b12ffd10eed34794a61245a18e04290b46db8648",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29198,
"license_type": "no_license",
"max_line_length": 623,
"num_lines": 603,
"path": "/supermysql.py",
"repo_name": "pokyu/SuperDBA",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n##########################################################################################\n# - Copyright(C), 2017-2020, Chris Tung\n# - File name: hdfs_reblance.sh\n# - Author: Chris Tung Version: 1.0 Date: 2018-08-23\n# - Description: //用于详细说明程序文件完成的主要功能,与其它模块或函接口,输出值,取值范\n#\t\t\t\t 围,含意及参数间的控制、顺序、独立或依赖等关系\n# - Others: \n# - Function List: //主要函数列表,每条记录应包括函数名及功能简要说明\n# - 1. ...\n# - 2. ... \n# - History: \n# - 1. Date: ...\n# Author: ...\n# Modification: ...\n# - 2. ...\n##########################################################################################\n\nimport pymysql\nimport argparse\nimport time\nimport re\n\nmysql_headline1 = ''\nmysql_headline2 = ''\n\n\nclass SuperMySQL(object):\n def __init__(self,kw):\n\n self.host=kw['host']\n self.username=kw['user']\n self.password=kw['passwd']\n self.port=kw['port']\n self.charset=kw['charset']\n self.dbconn = pymysql.connect(host=self.host,user=self.username,passwd=self.password,port=self.port,charset=self.charset)\n self.showtype = kw['showtype']\n #self.showtype = ['current_time','com', 'innodb_hit', 'innodb_rows', 'innodb_pages', 'innodb_data', 'innodb_log', 'innodb_status', 'threads', 'bytes']\n\n self.innodbstatscount = 0\n self.not_first = 0\n self.interval = kw['interval']\n self.innodb_stats_predata = {\n 'Com_select':0,\n 'Com_delete':0 ,\n 'Com_update':0 ,\n 'Com_insert': 0,\n 'Innodb_buffer_pool_read_requests': 0,\n 'Innodb_rows_inserted': 0 ,\n 'Innodb_rows_updated': 0 ,\n 'Innodb_rows_deleted': 0 ,\n 'Innodb_rows_read': 0,\n 'Threads_created': 0,\n 'Bytes_received': 0,\n 'Bytes_sent':0,\n 'Innodb_buffer_pool_pages_flushed': 0,\n 'Innodb_data_read':0,\n 'Innodb_data_reads': 0,\n 'Innodb_data_writes': 0,\n 'Innodb_data_written': 0,\n 'Innodb_os_log_fsyncs': 0,\n 'Innodb_os_log_written': 0\n }\n\n def select(self):\n #conn = pymysql.connect(self.host,self.username,self.password,self.databases)\n conn = pymysql.connect(host=self.host,user=self.username,passwd=self.password,port=self.port,charset=self.charset)\n cursor = conn.cursor()\n cursor.execute(\"SELECT VERSION()\")\n data = cursor.fetchall()\n print (\"Database version : %s \" % data)\n conn.close()\n \n def get_innodb_stats(self):\n interval=self.interval\n cursor = self.dbconn.cursor()\n showtype=self.showtype\n sql = 'show global status where Variable_name in (\"Com_select\",\"Com_insert\",\"Com_update\",\"Com_delete\",\"Innodb_buffer_pool_read_requests\",\"Innodb_buffer_pool_reads\",\"Innodb_rows_inserted\",\"Innodb_rows_updated\",\"Innodb_rows_deleted\",\"Innodb_rows_read\",\"Threads_running\",\"Threads_connected\",\"Threads_cached\",\"Threads_created\",\"Bytes_received\",\"Bytes_sent\",\"Innodb_buffer_pool_pages_data\",\"Innodb_buffer_pool_pages_free\",\"Innodb_buffer_pool_pages_dirty\",\"Innodb_buffer_pool_pages_flushed\",\"Innodb_data_reads\",\"Innodb_data_writes\",\"Innodb_data_read\",\"Innodb_data_written\",\"Innodb_os_log_fsyncs\",\"Innodb_os_log_written\")'\n cursor.execute(sql)\n innodb_stats_curdata = dict(cursor.fetchall())\n #print (data)\n output = ''\n if self.not_first == 0:\n if 'current_time' in self.showtype:\n output += time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) +' |'\n if 'com' in self.showtype:\n output += \" {0:^8}{1:^8}{2:^8}{3:^8}{4:^8}|\".format(0, 0, 0, 0, 0)\n if 'innodb_hit' in self.showtype:\n output += \"{0:^8}{1:^8,.2f}|\".format(0,100)\n\n if 'innodb_rows' in self.showtype:\n output += \"{0:^8}{1:^8}{2:^8}{3:^8}|\" .format(0,0,0,0)\n\n if 'innodb_pages' in showtype:\n output += \"{0:^8}{1:^8}{2:^8}{3:^8}|\".format(0,0,0,0)\n\n if 'innodb_data' in showtype:\n output += \"{0:^8}{1:^8}{2:^8}{3:^8}|\".format(0,0,0,0)\n\n if 'innodb_log' in showtype:\n output += \"{0:^8}{1:^8}|\".format(0,0)\n\n if 'innodb_status' in showtype:\n output += \"{0:^8}{1:^8}{2:^8}{3:^8}{4:^8}{5:^8}|\".format(0,0,0,0,0,0)\n\n if 'threads' in showtype:\n output += \"{0:^8}{1:^8}{2:^8}{3:^8}|\".format(0,0,0,0)\n\n if 'bytes' in showtype:\n output += \"{0:^8}{1:^8}\".format(0,0)\n \n self.not_first = 1\n self.innodb_stats_predata = innodb_stats_curdata\n return\n\n else:\n insert_diff = round(((int(innodb_stats_curdata['Com_insert']) - int(self.innodb_stats_predata['Com_insert'])) / interval),2)\n update_diff = round(((int(innodb_stats_curdata['Com_update']) - int(self.innodb_stats_predata['Com_update'])) / interval),2)\n delete_diff = round(((int(innodb_stats_curdata['Com_delete']) - int(self.innodb_stats_predata['Com_delete'])) / interval),2)\n select_diff = round(((int(innodb_stats_curdata['Com_select']) - int(self.innodb_stats_predata['Com_select'])) / interval),2)\n read_request = round(((int(innodb_stats_curdata['Innodb_buffer_pool_read_requests']) - int(self.innodb_stats_predata['Innodb_buffer_pool_read_requests'])) / interval),2)\n read = round(((int(innodb_stats_curdata['Innodb_buffer_pool_reads']) -int( self.innodb_stats_predata['Innodb_buffer_pool_reads'])) / interval),2)\n innodb_rows_inserted_diff = round(((int(innodb_stats_curdata['Innodb_rows_inserted']) - int(self.innodb_stats_predata['Innodb_rows_inserted'])) / interval),2)\n innodb_rows_updated_diff = round(((int(innodb_stats_curdata['Innodb_rows_updated']) - int(self.innodb_stats_predata['Innodb_rows_updated'])) / interval),2)\n innodb_rows_deleted_diff = round(((int(innodb_stats_curdata['Innodb_rows_deleted']) - int(self.innodb_stats_predata['Innodb_rows_deleted'])) / interval),2)\n innodb_rows_read_diff = round(((int(innodb_stats_curdata['Innodb_rows_read']) - int(self.innodb_stats_predata['Innodb_rows_read'])) / interval),2)\n innodb_bp_pages_flushed_diff = round(((int(innodb_stats_curdata['Innodb_buffer_pool_pages_flushed']) - int(self.innodb_stats_predata['Innodb_buffer_pool_pages_flushed']) ) / interval),2)\n innodb_data_reads_diff = round(((int(innodb_stats_curdata['Innodb_data_reads']) - int(self.innodb_stats_predata['Innodb_data_reads'])) / interval),2)\n innodb_data_writes_diff = round(((int(innodb_stats_curdata['Innodb_data_writes']) - int(self.innodb_stats_predata['Innodb_data_writes'])) / interval),2)\n innodb_data_read_diff = round(((int(innodb_stats_curdata['Innodb_data_read']) - int(self.innodb_stats_predata['Innodb_data_read'])) / interval),2)\n innodb_data_written_diff = round(((int(innodb_stats_curdata['Innodb_data_written']) - int(self.innodb_stats_predata['Innodb_data_written'])) / interval),2)\n innodb_os_log_fsyncs_diff =round(((int(innodb_stats_curdata['Innodb_os_log_fsyncs']) - int(self.innodb_stats_predata['Innodb_os_log_fsyncs'])) / interval),2)\n innodb_os_log_written_diff = round(((int(innodb_stats_curdata['Innodb_os_log_written']) - int(self.innodb_stats_predata['Innodb_os_log_written'])) / interval),2)\n threads_created_diff = round(((int(innodb_stats_curdata['Threads_created']) - int(self.innodb_stats_predata['Threads_created'])) / interval),2)\n bytes_received_diff = round(((int(innodb_stats_curdata['Bytes_received']) - int(self.innodb_stats_predata['Bytes_received'])) / interval),2)\n bytes_sent_diff = round(((int(innodb_stats_curdata['Bytes_sent']) - int(self.innodb_stats_predata['Bytes_sent'])) / interval),2)\n\n if 'current_time' in self.showtype:\n output += time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) +' |'\n\n if 'com' in self.showtype:\n output += \"\\33[37m\"\n output += \" {0:^8}{1:^8}{2:^8}\".format(insert_diff,update_diff,delete_diff)\n output += \"\\33[33m\"\n output += \"{0:^8}{1:^8}|\".format(select_diff,insert_diff+update_diff+delete_diff)\n output += \"\\33[0m\"\n\n if 'innodb_hit' in self.showtype:\n output += \"\\33[37m\"\n output += \"{0:^8}\".format(read_request)\n if read_request:\n hit = (read_request-read)/read_request*100\n if hit > 99:\n output += \"\\33[32m\"\n else:\n output += \"\\33[31m\"\n output += \"{0:^8,.2f}|\".format(hit)\n else:\n hit = 100.00\n output += \"\\33[32m\"\n output += \"{0:^8,.2f}|\".format(hit)\n output += \"\\33[0m\"\n\n if 'innodb_rows' in self.showtype:\n output += \"\\33[37m\"\n output += \"{0:^8}{1:^8}{2:^8}{3:^8}|\".format(innodb_rows_inserted_diff,innodb_rows_updated_diff,innodb_rows_deleted_diff,innodb_rows_read_diff)\n output += \"\\33[0m\"\n\n if 'innodb_pages' in showtype:\n output += \"\\33[37m\"\n output += \"{0:^8}{1:^8}\".format(innodb_stats_curdata['Innodb_buffer_pool_pages_data'],innodb_stats_curdata['Innodb_buffer_pool_pages_free'])\n output += \"\\33[33m\"\n output += \"{0:^8}{1:^8}|\".format(innodb_stats_curdata['Innodb_buffer_pool_pages_dirty'],innodb_bp_pages_flushed_diff)\n output += \"\\33[0m\"\n \n if 'innodb_data' in showtype:\n output += \"\\33[37m\"\n output += \"{0:^8}{1:^8}\".format(innodb_data_reads_diff,innodb_data_writes_diff)\n if (innodb_data_read_diff/1024/1024) > 9:\n output += \"\\33[31m\"\n else:\n output += \"\\33[37m\"\n\n if (innodb_data_read_diff/1024/1024) > 1:\n output += \"{0:^8}\".format(str(round((innodb_data_read_diff/1024/1024),2))+'m')\n elif (innodb_data_read_diff/1024) > 1 :\n output += \"{0:^8}\".format(str(round(((innodb_data_read_diff/1024)+0.5),2))+'k')\n else:\n output += \"{0:^8}\".format(str(innodb_data_read_diff))\n\n if (innodb_data_written_diff/1024/1024) > 9:\n output += \"\\33[31m\"\n else:\n output += \"\\33[37m\"\n\n if (innodb_data_written_diff/1024/1024) > 1:\n output += \"{0:^8}|\".format(str(round((innodb_data_written_diff/1024/1024),2))+'m')\n elif (innodb_data_written_diff/1024) > 1 :\n output += \"{0:^8}|\".format(str(round(((innodb_data_written_diff/1024)+0.5),2))+'k')\n else:\n output += \"{0:^8}|\".format(str(innodb_data_written_diff))\n output += \"\\33[0m\"\n\n if 'innodb_log' in showtype:\n output += \"\\33[37m\"\n output += \"{0:^8}\".format(innodb_os_log_fsyncs_diff)\n\n if (innodb_os_log_written_diff/1024/1024) > 1:\n output += \"\\33[31m\"\n else:\n output += \"\\33[33m\"\n\n if (innodb_os_log_written_diff/1024/1024) > 1:\n output += \"{0:^8}|\".format(str(round((innodb_os_log_written_diff/1024/1024),2))+'m')\n elif (innodb_data_written_diff/1024) > 1 :\n output += \"{0:^8}|\".format(str(round(((innodb_os_log_written_diff/1024)+0.5),2))+'k')\n else:\n output += \"{0:^8}|\".format(str(innodb_os_log_written_diff))\n output += \"\\33[0m\"\n\n \n if 'innodb_status' in showtype:\n innodb_status = self.get_innodb_status(self.dbconn)\n output += \"\\33[37m\"\n innodb_status['history_list'] = innodb_status['history_list']\n output += \"{0:^8}\" .format(innodb_status['history_list'])\n output += \"\\33[33m\"\n if (int(innodb_status['unflushed_log'])/1024/1024) > 1:\n innodb_status['unflushed_log'] = int(innodb_status['unflushed_log'])/1024/1024\n output += \"{0:^8}\".format(str(round(innodb_status['unflushed_log'],2))+'m')\n elif (int(innodb_status[\"unflushed_log\"])/1024) > 1:\n innodb_status['unflushed_log'] = str(round(int(innodb_status['unflushed_log'])/1024 + 0.5,2))\n output += \"{0:^8}\".format(str(round(innodb_status['unflushed_log'],2))+'k')\n else:\n output += \"{0:^8}\".format(innodb_status['unflushed_log'])\n\n if (int(innodb_status['uncheckpointed_bytes'])/1024/1024) > 1:\n innodb_status['uncheckpointed_bytes'] = int(innodb_status['uncheckpointed_bytes'])/1024/1024\n output += \"{0:^8,.1f}m\".format(innodb_status['uncheckpointed_bytes'])\n elif (int(innodb_status['uncheckpointed_bytes'])/1024) > 1:\n innodb_status['uncheckpointed_bytes'] = int(innodb_status['uncheckpointed_bytes'])/1024 + 0.5\n output += \"{0:^8,.1f}k\".format(innodb_status['uncheckpointed_bytes'])\n else:\n innodb_status['uncheckpointed_bytes'] = str(innodb_status['uncheckpointed_bytes'])\n output += \"{0:^8}\".format(innodb_status['uncheckpointed_bytes'])\n output += \"{0:^8}{1:^8}{2:^8}|\".format(innodb_status['read_views'],innodb_status['queries_inside'],innodb_status['queries_queued'])\n output += \"\\33[0m\"\n \n if 'threads' in showtype :\n output += \"\\33[37m\"\n output += \"{0:^8}{1:^8}{2:^8}{3:^8}|\".format(innodb_stats_curdata['Threads_running'],innodb_stats_curdata['Threads_connected'],threads_created_diff,innodb_stats_curdata['Threads_cached'])\n output += \"\\33[0m\"\t \n\n if 'bytes' in showtype:\n output += \"\\33[37m\"\n if (bytes_received_diff/1024/1024) > 1:\n output += \"{0:^8}\".format(str(bytes_received_diff/1024/1024)+'m')\n elif (bytes_received_diff/1024) > 1:\n output += \"{0:^8}\".format(str(bytes_received_diff/1024 + 0.5)+'k')\n else:\n output += \"{0:^8}\".format(str(bytes_received_diff))\n\n if (bytes_sent_diff/1024/1024) > 1:\n output += \"{0:^8}\".format(str(bytes_sent_diff/1024/1024)+'m')\n elif (bytes_sent_diff/1024) > 1:\n output += \"{0:^8}\".format(str(bytes_sent_diff/1024 + 0.5)+'k')\n else:\n output += \"{0:^8}\".format(str(bytes_sent_diff))\n output += \"\\33[0m\"\n\n print(output)\n self.innodb_stats_predata = innodb_stats_curdata\n return\n \n def get_title(self):\n cursor = self.dbconn.cursor()\n cursor.execute(\"show databases\")\n res = cursor.fetchall()\n rel = self.dealWithData1(res)\n #print (\"DB : %s\" % rel)\n\n sql = 'show variables where Variable_name in (\"sync_binlog\",\"max_connections\",\"max_user_connections\",\"max_connect_errors\",\"table_open_cache\",\"table_definition_cache\",\"thread_cache_size\",\"binlog_format\",\"open_files_limit\",\"max_binlog_size\",\"max_binlog_cache_size\")'\n cursor.execute(sql)\n res = cursor.fetchall()\n rel = self.dealWithData2(res)\n print (\"Var : %s\" % rel)\n sql = 'show variables where Variable_name in (\"innodb_flush_log_at_trx_commit\",\"innodb_flush_method\",\"innodb_buffer_pool_size\",\"innodb_max_dirty_pages_pct\",\"innodb_log_buffer_size\",\"innodb_log_file_size\",\"innodb_log_files_in_group\",\"innodb_thread_concurrency\",\"innodb_file_per_table\",\"innodb_adaptive_hash_index\",\"innodb_open_files\",\"innodb_io_capacity\",\"innodb_read_io_threads\",\"innodb_write_io_threads\",\"innodb_adaptive_flushing\",\"innodb_lock_wait_timeout\",\"innodb_log_files_in_group\")'\n cursor.execute(sql)\n res = cursor.fetchall()\n outputs=[]\n for val in res:\n if val[0] == 'innodb_buffer_pool_size' or val[0] == 'innodb_log_file_size' or val[0] == 'innodb_log_buffer_size' or val[0] == 'max_binlog_cache_size' or val[0] == 'max_binlog_size':\n if (int(val[1])/1024/1024/1024) >= 1 :\n r = \"%d G\" % (int(val[1])/1024/1024/1024)\n elif (int(val[1])/1024/1024)>=1:\n r = \"%d M\" % (int(val[1])/1024/1024)\n else:\n r = val[1]\n else:\n r = val[1]\n rel = \"%s:[%s]\" % (val[0],r)\n outputs.append(\"%s\" % rel)\n outputs = ','.join(outputs)\n print (outputs)\n return\n\n def dealWithData1(self,res):\n outputs=[]\n for i in res:\n r = i[0]\n outputs.append(\"%s\" % r)\n outputs = ','.join(outputs)\n return outputs\n \n def dealWithData2(self,res):\n outputs=[]\n for i in res:\n r = \"%s:[\\033[1;31;40m%s\\033[0m]\" % (i[0],i[1])\n outputs.append(\"\\n%s\" % r)\n outputs = ','.join(outputs)\n return outputs\n\n\n\n def get_innodb_status(self,conn):\n sql = 'show engine innodb status'\n cursor = conn.cursor()\n cursor.execute(sql)\n res = cursor.fetchone()\n result = res[2].split('\\n')\n innodb_status = {}\n for i in result:\n try:\n if i.index(\"History list length\") == 0:\n r = re.compile(\"\\s+\")\n rel = r.split(i)\n innodb_status['history_list'] = rel[-1]\n except Exception as e:\n #print e\n pass\n try:\n if i.index(\"Log sequence number\") == 0:\n r = re.compile(\"\\s+\")\n rel = r.split(i)\n innodb_status['log_bytes_written'] = rel[-1]\n except Exception as e:\n pass\n try:\n if i.index(\"Log flushed up to\") == 0:\n r = re.compile(\"\\s+\")\n rel = r.split(i)\n innodb_status['log_bytes_flushed'] = rel[-1]\n except Exception as e:\n pass\n try:\n if i.index(\"Last checkpoint at\") == 0:\n r = re.compile(\"\\s+\")\n rel = r.split(i)\n innodb_status['last_checkpoint'] = rel[-1]\n except Exception as e:\n pass\n\n try:\n if i.index(\"queries inside InnoDB\") == 2:\n #print i\n r = re.compile(\"\\s+\")\n rel = r.split(i)\n innodb_status['queries_inside'] = rel[0]\n innodb_status['queries_queued'] = rel[4]\n except Exception as e:\n pass\n\n try:\n if i.index(\"read views open inside InnoDB\") == 2:\n #print i\n r = re.compile(\"\\s+\")\n rel = r.split(i)\n #print rel\n innodb_status['read_views'] = rel[0]\n except Exception as e:\n pass\n\n innodb_status[\"unflushed_log\"] = int(innodb_status['log_bytes_written']) - int(innodb_status['log_bytes_flushed'])\n innodb_status[\"uncheckpointed_bytes\"] = int(innodb_status['log_bytes_written']) - int(innodb_status['last_checkpoint'])\n return innodb_status\n \n def close_db_connect(self):\n self.dbconn.close()\n\n def get_options(self):\n\n global mysql_headline1\n global mysql_headline2\n showtype=self.showtype\n if 'current_time' in self.showtype:\n mysql_headline1 +=\" Date \"\n mysql_headline2 +=\"{0:^20}|\".format('date')\n\n if 'com' in self.showtype:\n mysql_headline1 +=\"| -QPS- -TPS- \"\n mysql_headline2 +=\"{0:^8}{1:^8}{2:^8}{3:^8}{4:^8} |\".format('ins','upd','del','sel','iud')\n \n if 'innodb_hit' in self.showtype:\n mysql_headline1 += \"| -Hit%- \"\n mysql_headline2 += \"{0:^8}{1:^8} |\".format('lor','hit')\n\n if 'innodb_rows' in self.showtype:\n mysql_headline1 += \"| -innodb rows status- \"\n mysql_headline2 += \"{0:^8}{1:^8}{2:^8}{3:^8} |\".format('ins', 'upd','del','read')\n \n if 'innodb_pages' in showtype:\n mysql_headline1 += \"| -innodb bp pages status- \"\n mysql_headline2 += \"{0:^8}{1:^8}{2:^8}{3:^8} |\".format('data','free','dirty','flush')\n\n if 'innodb_data' in showtype:\n mysql_headline1 += \"| -innodb data status- \"\n mysql_headline2 += \"{0:^8}{1:^8}{2:^8}{3:^8} |\".format('reads', 'writes', 'readed', 'written')\n\n if 'innodb_log' in showtype:\n mysql_headline1 += \"| --innodb log-- \"\n mysql_headline2 += \"{0:^8}{1:^8}|\".format('fsyncs', 'written')\n\n if 'innodb_status' in showtype:\n mysql_headline1 += \"| his --log(byte)-- read --query-- \"\n mysql_headline2 += \"{0:^8}{1:^8}{2:^8}{3:^8}{4:^8}{5:^8} |\".format('list', 'uflush', 'uckpt','view','inside', 'que')\n\n if 'threads' in showtype:\n mysql_headline1 += \"| --threads-- \"\n mysql_headline2 += \"{0:^8}{1:^8}{2:^8}{3:^8} |\".format('run','con','cre','cac')\n\n if 'bytes' in showtype:\n mysql_headline1 += \"| --bytes-- |\"\n mysql_headline2 += \"{0:^8}{1:^8} |\".format('recv','send')\ndef get_parse_argument():\n innodb_argument = {\n 'interval':1,\n 'count':-2,\n 'time':True ,\n 'nocolor': False,\n 'load': False,\n 'cpu': False,\n 'swap': False,\n 'disk': False,\n 'net': False,\n 'port': 3306,\n 'host': '10.0.8.197',\n 'user':'dbmgr',\n 'passwd':'Muli1234',\n 'charset':'utf8',\n 'showtype':[]\n }\n \n parser = argparse.ArgumentParser(description='Created By [email protected].') \n\n parser.add_argument(\"-i\",\"--interval\", help=\"Time(second) Interval(default 1). \", type=int,default=1)\n parser.add_argument(\"-C\",\"--count\", help=\"Times. Type : int\", type=int)\n parser.add_argument(\"-t\", \"--time\", help=\"Print The Current Time.\", action=\"store_true\")\n parser.add_argument(\"--nocolor\", help=\"Print NO Color.\", action=\"store_true\") # 未实现\n \n\n parser.add_argument(\"-l\", \"--load\", help=\"Print Load Info.\", action=\"store_true\")# 未实现\n parser.add_argument(\"-c\", \"--cpu\", help=\"Print Cpu Info.\", action=\"store_true\")# 未实现\n parser.add_argument(\"-s\", \"--swap\", help=\"Print Swap Info.\", action=\"store_true\")# 未实现\n parser.add_argument(\"-d\", \"--disk\", help=\"Print Disk Info.\", action=\"store_true\")# 未实现\n parser.add_argument(\"-n\", \"--net\", help=\"Print Net Info.\", action=\"store_true\") # 未实现\n\n parser.add_argument(\"-S\",\"--socket\", help=\"Socket file to use for mysql connection.\") # 未实现\n parser.add_argument(\"-u\",\"--user\", help=\"db username\")\n parser.add_argument(\"-p\",\"--passwd\", help=\"db password\")\n parser.add_argument(\"-H\",\"--host\", help=\"db host ip\")\n parser.add_argument(\"-P\",\"--port\", help=\"Port number to use for mysql connection(default 3306).\", type=int,default=3306)\n\n parser.add_argument(\"-com\", help=\"Print MySQL Status(Com_select,Com_insert,Com_update,Com_delete).\", action=\"store_true\")\n parser.add_argument(\"-hit\", help=\"Print Innodb Hit%%.\", action=\"store_true\")\n parser.add_argument(\"-innodb_rows\", help=\"Print Innodb Rows Status(Innodb_rows_inserted/updated/deleted/read).\", action=\"store_true\")\n parser.add_argument(\"-innodb_pages\", help=\"Print Innodb Buffer Pool Pages Status(Innodb_buffer_pool_pages_data/free/dirty/flushed).\", action=\"store_true\")\n parser.add_argument(\"-innodb_data\", help=\"Print Innodb Data Status(Innodb_data_reads/writes/read/written)\", action=\"store_true\")\n parser.add_argument(\"-innodb_log\", help=\"Print Innodb Log Status(Innodb_os_log_fsyncs/written).\", action=\"store_true\")\n parser.add_argument(\"-innodb_status\", help=\"Print Innodb Status from Command: 'Show Engine Innodb Status'.\\\n \t(history list/ log unflushed/uncheckpointed bytes/ read views/ queries inside/queued)\", action=\"store_true\") \n\n parser.add_argument(\"-T\", \"--threads\", help=\"Print Threads Status(Threads_running,Threads_connected,Threads_created,Threads_cached).\", action=\"store_true\")# 未实现\n parser.add_argument(\"-rt\", help=\"Print MySQL DB RT(us).\", action=\"store_true\")# 未实现\n parser.add_argument(\"-B\", \"--bytes\", help=\"Print Bytes received from/send to MySQL(Bytes_received,Bytes_sent).\", action=\"store_true\") # 未实现 \n\n parser.add_argument(\"-mysql\", help=\"Print MySQLInfo (include -t,-com,-hit,-T,-B).\", action=\"store_true\") # 未实现\n parser.add_argument(\"-innodb\", help=\"Print InnodbInfo(include -t,-innodb_pages,-innodb_data,-innodb_log,-innodb_status).\", action=\"store_true\") \n parser.add_argument(\"-sys\", help=\"Print SysInfo (include -t,-l,-c,-s).\", action=\"store_true\") # 未实现\n parser.add_argument(\"-lazy\", help=\"Print Info (include -t,-l,-c,-s,-com,-hit).\", action=\"store_true\") # 未实现\n\n parser.add_argument(\"-L\", \"--logfile\", help=\"Print to Logfile..\", action=\"store_true\") # 未实现\n parser.add_argument(\"-logfile_by_day\", help=\"One day a logfile,the suffix of logfile is 'yyyy-mm-dd';and is valid with -L.\", action=\"store_true\") # 未实现\n\n args = parser.parse_args() \n\n if args.com:\n \tinnodb_argument['showtype'].append('com') \n\n if args.hit:\n \tinnodb_argument['showtype'].append('hit')\t \n\n if args.innodb_rows:\n \tinnodb_argument['showtype'].append('innodb_rows')\t \n\n if args.innodb_pages:\n \tinnodb_argument['showtype'].append('innodb_pages')\t \n\n if args.innodb_data:\n \tinnodb_argument['showtype'].append('innodb_data')\t \n\n if args.innodb_log:\n \tinnodb_argument['showtype'].append('innodb_log')\t \n \n if args.innodb_status:\n \tinnodb_argument['showtype'].append('innodb_status')\n \n if args.innodb:\n innodb_argument['showtype'].append('current_time')\n innodb_argument['showtype'].append('innodb_pages')\n innodb_argument['showtype'].append('innodb_data')\n innodb_argument['showtype'].append('innodb_log')\n innodb_argument['showtype'].append('innodb_status')\n\n if len(innodb_argument['showtype'])==0:\n innodb_argument['showtype'].append('com')\n\n if args.time:\n \tinnodb_argument['showtype'].append('current_time') \n\n if args.interval > 0 :\n innodb_argument['interval']=args.interval\n \n if args.count:\n if args.count > 0:\n innodb_argument['count']=args.count\n\n if args.user :\n innodb_argument['user']=args.user\n\n if args.passwd :\n innodb_argument['passwd']=args.passwd\n \n if args.host :\n innodb_argument['host']=args.host\n\n if args.port :\n innodb_argument['port']=args.port\n \n return innodb_argument\n\n\n\ndef main():\n #host='10.0.8.197'\n #username='dbmgr'\n #password='Muli1234'\n #port=3306\n #charset='utf8'\n #databases='information_schema'\n \n try:\n innodb_argument=get_parse_argument()\n db=SuperMySQL(innodb_argument)\n db.get_title()\n db.get_options()\n print(mysql_headline1)\n print(mysql_headline2)\n count =innodb_argument['count']+1\n while count != 0:\n if count%10 == 0:\n print(mysql_headline1)\n print(mysql_headline2)\n db.get_innodb_stats()\n time.sleep(innodb_argument['interval'])\n count =count-1\n db.close_db_connect()\n #db.get_innodb_status(db.dbconn)\n except KeyboardInterrupt as identifier:\n print(\"Exit\")\n db.close_db_connect()\n #finally:\n # db.close_db_connect()\n\n\"\"\"\n innodb_argument=get_parse_argument()\n #self.showtype = ['current_time','com', 'innodb_hit', 'innodb_rows', 'innodb_pages', 'innodb_data', 'innodb_log', 'innodb_status', 'threads', 'bytes']\n #db=SuperMySQL(host=host,user=username,passwd=password,port=port,charset=charset)\n db=SuperMySQL(innodb_argument)\n db.get_options()\n print(mysql_headline1)\n print(mysql_headline2)\n count =innodb_argument['count']+1\n while count != 0:\n db.get_innodb_stats()\n time.sleep(innodb_argument['interval'])\n count =count-1\n #db.get_innodb_status(db.dbconn)\n db.close_db_connect()\n\"\"\"\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.495953768491745,
"alphanum_fraction": 0.5141618251800537,
"avg_line_length": 30.436363220214844,
"blob_id": "5c65ec4a4a3f66b56746e04ad066f0225cbb6d0c",
"content_id": "871ebcc4f0c6979bbdef7a6f66e0d3ac27ddbdad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3510,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 110,
"path": "/killsql.py",
"repo_name": "pokyu/SuperDBA",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n########################################################################################################\n# File name : killsql.py\n# Author : Jessetong\n# Date : 08/29/2018\n#\n# Description : Get the Long SQL and record\n#\n# Usage : ./killsql.py\n#\n# Prerequisite : None\n# Function List: //主要函数列表,每条记录应包括函数名及功能简要说明\n# 1. kill(): get long sql for more than 30 minutes\n# 2. storge_log_sql(data): store the result from the function 'kill()' return \n#\n# Modifications :\n#\n# When Who What\n# ========== =========== ================================================================\n# 08/29/2018 tongboyu V 1.0 Record the Long SQL for more than 30 minutes.\n########################################################################################################\n\nimport pymysql\nimport argparse\n\nclass killsql(object):\n\n def __init__(self,kw):\n self.host=kw['host']\n self.username=kw['user']\n self.password=kw['passwd']\n self.port=kw['port']\n\n self.dhost='10.0.8.197'\n self.dusername='dbmgr'\n self.dpassword='Muli1234'\n self.ddatabase='test'\n self.dport=3306\n\n\n def kill(self):\n sql = 'SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO FROM information_schema.PROCESSLIST WHERE COMMAND<>\\'Sleep\\' AND TIME >1800'\n conn = pymysql.connect(host=self.host,user=self.username,passwd=self.password,port=self.port)\n cursor=conn.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n #print(data)\n conn.close()\n return data\n \n def storge_log_sql(self,data):\n #sql2 = 'show tables'\n sql = 'INSERT INTO WESURE_PROCESSLIST (PROCESSLIST_ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,CREATE_DATE) VALUE'\n i= 0\n for x in data:\n if i != 0:\n sql += ','\n sql = sql + '('\n for y in x:\n sql += \"'\"+str(y) +\"'\"+ ','\n sql += 'now())' \n i += 1\n\n print (sql)\n dconn = pymysql.connect(host=self.dhost,user=self.dusername,passwd=self.dpassword,port=self.dport,database=self.ddatabase)\n dcursor=dconn.cursor()\n dcursor.execute(sql)\n ddata = dcursor.fetchall()\n #print(ddata)\n dconn.close()\n\ndef get_parse_argument():\n function_argument = {\n 'port': 3306,\n 'host': '10.0.8.2',\n 'user':'dbmgr',\n 'passwd':'Muli1234',\n 'charset':'utf8'\n }\n \n parser = argparse.ArgumentParser(description='Created By [email protected].') \n \n parser.add_argument(\"-u\",\"--user\", help=\"db username\")\n parser.add_argument(\"-p\",\"--passwd\", help=\"db password\")\n parser.add_argument(\"-H\",\"--host\", help=\"db host ip\")\n parser.add_argument(\"-P\",\"--port\", help=\"Port number to use for mysql connection(default 3306).\", type=int,default=3306)\n\n args = parser.parse_args() \n\n if args.user :\n function_argument['user']=args.user\n\n if args.passwd :\n function_argument['passwd']=args.passwd\n \n if args.host :\n function_argument['host']=args.host\n\n if args.port :\n function_argument['port']=args.port\n \n return function_argument\n\n\nif __name__ == '__main__':\n param=get_parse_argument()\n db=killsql(param)\n data=db.kill()\n db.storge_log_sql(data)\n\n\n"
},
{
"alpha_fraction": 0.506681501865387,
"alphanum_fraction": 0.525612473487854,
"avg_line_length": 22.05128288269043,
"blob_id": "e781703645080a48e7042f76c55d460bdd8e4ef6",
"content_id": "a8e0ef8187ee06d7860e7e17320f46d433377fa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 910,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 39,
"path": "/superdba.py",
"repo_name": "pokyu/SuperDBA",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport time\n\n__version__ = '1.0.0'\n__author__ = 'Chris Tong'\n\ndef command_main():\n while 1:\n command = input('sdba >>> ')\n command = command.strip() #去掉前后空格\n print (command ,\"x\")\n if command == 'exit':\n break\n\n #print(command.split())\n \n #command = input('TMULY > ')\n print (\"End the SuperDBA!\")\n\n\ndef start_logo():\n #_starttime = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n #__starttime = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n __starttime = time.asctime(time.localtime(time.time())) # Sub Jul 2 10:34:34 2018\n\n logo_out = '''\nSuperDBA : Release %s Production on %s\n ''' % (__version__ , __starttime)\n print (logo_out)\n\n\ndef main():\n start_logo()\n command_main()\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.5282959342002869,
"alphanum_fraction": 0.5447360277175903,
"avg_line_length": 30.929292678833008,
"blob_id": "643fc9f2cfd6d0a0612da8feb8129e6f64e051bd",
"content_id": "3b701681efcba26f7ff2b16249f651535e002266",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3163,
"license_type": "no_license",
"max_line_length": 208,
"num_lines": 99,
"path": "/getsnapshot",
"repo_name": "pokyu/SuperDBA",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport pymysql\nimport argparse\nimport datetime\nimport time\n\nclass Exec_every_mysql(object):\n\n def __init__(self):\n self.host='10.95.32.165'\n self.username='dba_admin'\n self.password='k18d@Lmnis'\n self.port=3306\n self.database='wesure_blitzcrank_cmdb'\n\n self.dhost='10.95.32.165'\n self.dusername='dba'\n self.dpassword='wesure@dba2016'\n self.ddatabase='wesure_dba'\n self.dport=3306\n \n self.iplist=['10.0.24.2']\n\n def getinstalllist(self):\n sql = 'select endpoint from blc_cmdb_cdb'\n conn = pymysql.connect(host=self.host,user=self.username,passwd=self.password,port=self.port,database=self.database)\n cursor=conn.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n conn.close()\n return data\n\n def changeiptolist(self):\n data=self.getinstalllist()\n rel=[]\n for mylist in data:\n if mylist[0] not in self.iplist:\n rel.append(mylist[0])\n return rel\n\n def execsql(self):\n data=self.changeiptolist()\n print(data)\n for myip in data:\n print(myip)\n self.getsnapshot(myip)\n return data\n\n def getsnapshot(self,ip):\n sql = 'SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO FROM information_schema.PROCESSLIST WHERE COMMAND<>\\'Sleep\\' AND COMMAND <>\\'Binlog Dump\\' and info not like \\'%information_schema.PROCESSLIST%\\''\n conn = pymysql.connect(host=ip,user=self.username,passwd=self.password,port=self.port)\n cursor=conn.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n #print(data)\n conn.close()\n if len(data)>0:\n self.storge_log_sql(data,ip)\n return data\n \n def storge_log_sql(self,data,ip):\n #sql2 = 'show tables'\n sql = 'INSERT INTO WESURE_PROCESSLIST (SOURCE_IP,PROCESSLIST_ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,CREATE_DATE) VALUE'\n i= 0\n for x in data:\n if i != 0:\n sql += ','\n sql = sql + '('+'\"'+ip+'\"'+ ','\n for y in x:\n if i == 7:\n sql += '\"'+str(y)[0:999] + '\"'+ ','\n else:\n sql += '\"'+str(y) + '\"'+ ','\n sql += 'now())' \n i += 1\n print (sql)\n try:\n dconn = pymysql.connect(host=self.dhost,user=self.dusername,passwd=self.dpassword,port=self.dport,database=self.ddatabase)\n print (\"db connect !\")\n dcursor=dconn.cursor()\n dcursor.execute(sql)\n dcursor.execute('commit')\n ddata = dcursor.fetchall()\n print(\"data commit complite!\")\n except Exception as e:\n print (\"error:\",e)\n dconn.close()\n\nif __name__ == '__main__':\n while True:\n print (\"Execute Time : \",datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n #param=get_parse_argument()\n db=Exec_every_mysql()\n data=db.execsql()\n time.sleep(1)\n #db.getsnapshot('10.0.8.2')\n #data=db.getinstalllist()\n\n\n"
},
{
"alpha_fraction": 0.5814850330352783,
"alphanum_fraction": 0.586306631565094,
"avg_line_length": 22.545454025268555,
"blob_id": "8855e6611a14b4af1555e08d301896d4e4146c7b",
"content_id": "c9a3694e5e824733ac86b1aa1f8b6fe3effacab5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2508,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 88,
"path": "/README.md",
"repo_name": "pokyu/SuperDBA",
"src_encoding": "UTF-8",
"text": "# SuperDBA说明文档 By Chris\n\n## 设置python环境变量\n```\n. /Users/jessetong/Venv/ChisOS/bin/activate \n```\n\n\n## SuperMySQL\n\n### 参数说明\n\n+ -h, --help\n\n```\n -h, --help show this help message and exit\n```\n \n查看帮助。\n程序不执行仅仅执行参数部分就退出。 \n\n+ -i INTERVAL, --interval INTERVAL\n\n```\n -i INTERVAL, --interval INTERVAL\n Time(second) Interval(default 1).\n```\n\nINTERVAL是int型,统计的时间间隔,单位是:秒,如果不设置,默认值:1秒\n\n+ -C COUNT, --count COUNT\n\n```\n -C COUNT, --count COUNT\n Times. Type : int\n```\n\nCOUNT为int型,会根据些此值打印COUNT次,如果不设置,程序默认无限次,直到接受到Ctrl+C结束程序\n\n+ -t, --time\n\n```\n -t, --time Print The Current Time.\n```\n\n是否在输出结果的第一列打印当前时间,不代此项,代表不打印\n\n+ 数据库连接配置\n\n```\n -u USER, --user USER db username\n -p PASSWD, --passwd PASSWD\n db password\n -H HOST, --host HOST db host ip\n -P PORT, --port PORT Port number to use for mysql connection(default 3306).\n```\n\n这组参数与MySQL的参数相同,PORT可以不输入,默认值为3306(注意:-u 与 USER之间必须有空格)\n\n+ 实时监控内容控制参数\n\n```\n -com Print MySQL\n Status(Com_select,Com_insert,Com_update,Com_delete).\n -hit Print Innodb Hit%.\n -innodb_rows Print Innodb Rows\n Status(Innodb_rows_inserted/updated/deleted/read).\n -innodb_pages Print Innodb Buffer Pool Pages Status(Innodb_buffer_po\n ol_pages_data/free/dirty/flushed).\n -innodb_data Print Innodb Data\n Status(Innodb_data_reads/writes/read/written)\n -innodb_log Print Innodb Log Status(Innodb_os_log_fsyncs/written).\n -innodb_status Print Innodb Status from Command: 'Show Engine Innodb\n Status'. (history list/ log unflushed/uncheckpointed\n bytes/ read views/ queries inside/queued)\n```\n\n监控内容选项。\n(每个选项内容待补充)\n\n+ 实时监控内容控制参数汇总 -innodb\n\n```\n -innodb Print InnodbInfo(include -t,-innodb_pages,-innodb_data\n ,-innodb_log,-innodb_status).\n```\n\n相当与-t,-innodb_pages,-innodb_data,-innodb_log,-innodb_status,算是简写\n\n\n"
}
] | 7 |
siddeshas07/assigments_10_2_part4
|
https://github.com/siddeshas07/assigments_10_2_part4
|
406ffa588a4752c40aceb1101c4d41cba1cca15c
|
89f00475d013c5db30eee3a84a07b499e0c847a5
|
3a9d38d1d99fed0a601014b19b0aa1187874dac7
|
refs/heads/main
| 2023-06-15T17:49:51.269643 | 2021-07-09T17:07:28 | 2021-07-09T17:07:28 | 384,501,416 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5753064751625061,
"alphanum_fraction": 0.5831874012947083,
"avg_line_length": 24.604650497436523,
"blob_id": "8b1a8fb84f6a6f1793e6103b8a02dc48699b1ec6",
"content_id": "9575aecc7491db4f0d0b470cf25c8ad6e2ff0092",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1142,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 43,
"path": "/as.py",
"repo_name": "siddeshas07/assigments_10_2_part4",
"src_encoding": "UTF-8",
"text": "import time\r\nfrom selenium import webdriver\r\nimport json\r\nfrom xlwt import Workbook\r\n\r\ndriver = webdriver.Chrome(r'C:\\Users\\ASSiddesh\\Downloads\\chromedriver_win32 (1)\\chromedriver')\r\ntry:\r\n driver.get(\"https://www.imdb.com/?ref_=nv_home\")\r\n links = driver.find_elements_by_tag_name('a')\r\n links_arr = []\r\n for link in links:\r\n links_arr.append(link.get_attribute('href'))\r\n\r\n link_dict = {}\r\n l = len(links_arr)\r\n for i in range(l):\r\n if (links_arr[i] == None):\r\n continue\r\n link_dict[i] = links_arr[i]\r\n\r\n # json file handling\r\n with open(\"link_json.json\", \"w\") as write_file:\r\n json.dump(link_dict, write_file, indent=4)\r\n print(link_dict)\r\n time.sleep(3)\r\n\r\n # excel file handling\r\n wb = Workbook()\r\n sheet1 = wb.add_sheet('Sheet 1')\r\n for i in range(l):\r\n if (links_arr == None):\r\n continue\r\n sheet1.write(i, 0, links_arr[i])\r\n wb.save('xlwt example.xls')\r\n\r\n # txt file handling\r\n file = open('Text.txt', 'w')\r\n file.write(str(links_arr))\r\n file.close()\r\n\r\n driver.close()\r\nexcept Exception as e:\r\n print(e)"
},
{
"alpha_fraction": 0.6105769276618958,
"alphanum_fraction": 0.6189903616905212,
"avg_line_length": 19.104839324951172,
"blob_id": "e0d28893b86aafdfb76d2630911f22da34abd042",
"content_id": "c6dfdbceb4f3735e9aea6228aedc871dcb3763be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2496,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 124,
"path": "/README.md",
"repo_name": "siddeshas07/assigments_10_2_part4",
"src_encoding": "UTF-8",
"text": "# assigments_10_2_part4\n\n\n4. Understand and implement Test data maintenance.\n<br></br>\n\n External file collaboration for test data or other aspects\n .config\n .xlsx\n .txt\n other files\n \n \n \nexporting to differnt files\n\nfilename as.py\n\n\n import time\nfrom selenium import webdriver\nimport json\nfrom xlwt import Workbook\n\ndriver = webdriver.Chrome(r'C:\\Users\\ASSiddesh\\Downloads\\chromedriver_win32 (1)\\chromedriver')\ntry:\n driver.get(\"https://www.imdb.com/?ref_=nv_home\")\n links = driver.find_elements_by_tag_name('a')\n links_arr = []\n for link in links:\n links_arr.append(link.get_attribute('href'))\n\n link_dict = {}\n l = len(links_arr)\n for i in range(l):\n if (links_arr[i] == None):\n continue\n link_dict[i] = links_arr[i]\n\n # json file handling\n with open(\"link_json.json\", \"w\") as write_file:\n json.dump(link_dict, write_file, indent=4)\n print(link_dict)\n time.sleep(3)\n\n # excel file handling\n wb = Workbook()\n sheet1 = wb.add_sheet('Sheet 1')\n for i in range(l):\n if (links_arr == None):\n continue\n sheet1.write(i, 0, links_arr[i])\n wb.save('xlwt example.xls')\n\n # txt file handling\n file = open('Text.txt', 'w')\n file.write(str(links_arr))\n file.close()\n\n driver.close()\nexcept Exception as e:\n print(e)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n importing differnt file to robot \n \n \n filename file.robot \n \n * Settings *\nDocumentation Simple example using SeleniumLibrary.\nLibrary SeleniumLibrary\nResource ..keka\\file.robot\nLibrary ..keka\\search.xlsx\nLibaray ..keak\\search.json\nLibaray ..keak\\search.csv\n\n\n* Variables *\n${LOGIN URL} https://www.imdb.com/\n${BROWSER} Firefox\n\n* Test Cases *\nValid Open\n Open Browser To imdb page\nValid search\n Input search1 ${search1}\nvalid searchquery\n Submit SearchQuery\n Search\n Navigate\n# [Teardown] Close Browser\n\n* Keywords *\nOpen Browser To imdb page\n Open Browser ${LOGIN URL} ${BROWSER}\n Title Should Be IMDb: Ratings, Reviews, and Where to Watch the Best Movies & TV Shows\n\nScroll Down\n Scroll Elements Into View //footer[@class='imdb-footer VUGIPjGgHtzvbHiU19iTQ']\n\nInput search1\n [Arguments] ${search1}\n Input Text //input[@id='suggestion-search'] ${search1}\n\n\n\n\nSubmit SearchQuery\n Click Button //button[@id='suggestion-search-button']\n\n\n\n"
}
] | 2 |
z1pti3/jimiPlugin-googlechat
|
https://github.com/z1pti3/jimiPlugin-googlechat
|
b5305f1e510b6ef03d55f4ba41b6b302dc5889e6
|
00663470397ff2420db8e16fee780976d6961a9c
|
e7342ada85f38c533be0f1b0cbc64f0c7310df1c
|
refs/heads/master
| 2023-03-10T04:36:37.026925 | 2021-02-26T21:55:02 | 2021-02-26T21:55:02 | 301,509,107 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6757851839065552,
"alphanum_fraction": 0.6798378825187683,
"avg_line_length": 24.30769157409668,
"blob_id": "17c6cad172bf60a4075288cf20cfb56f15a6d59a",
"content_id": "e88c2d46fa160391faa89e3e7acb36b9e8de97d0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 987,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 39,
"path": "/models/action.py",
"repo_name": "z1pti3/jimiPlugin-googlechat",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\nfrom pathlib import Path\n\nfrom core.models import action\nfrom core import helpers\n\n# Based on jimi API plugin\nclass _googlechatWebHook(action._action):\n\turl = str()\n\tbody = str()\n\ttimeout = int()\n\tproxy = dict()\n\tca = str()\n\n\tdef run(self,data,persistentData,actionResult):\n\t\theaders = { \"Content-Type\": \"application/json; charset=UTF-8\" }\n\t\turl = helpers.evalString(self.url,{\"data\" : data})\n\t\tbody = helpers.evalString(self.body,{\"data\" : data})\n\n\t\ttimeout = 60\n\t\tif self.timeout > 0:\n\t\t\ttimeout = self.timeout\n\n\t\tkwargs={}\n\t\tkwargs[\"headers\"] = headers\n\t\tkwargs[\"timeout\"] = timeout\n\t\tkwargs[\"data\"] = json.dumps({\"text\" : body })\n\t\tif self.ca:\n\t\t\tkwargs[\"verify\"] == Path(self.ca)\n\t\tif self.proxy:\n\t\t\tkwargs[\"proxies\"] = self.proxy\n\n\t\tresponse = requests.post(url,**kwargs)\n\n\t\tactionResult[\"result\"] = True\n\t\tactionResult[\"rc\"] = response.status_code\n\t\tactionResult[\"data\"] = { \"headers\" : response.headers, \"text\" : response.text }\n\t\treturn actionResult\n"
},
{
"alpha_fraction": 0.8222222328186035,
"alphanum_fraction": 0.8222222328186035,
"avg_line_length": 29,
"blob_id": "3124bf5b061209052fcac59d98715f587d9ec914",
"content_id": "9167e459aefaa05b1e346dcc1396cb49c925f8db",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 3,
"path": "/README.md",
"repo_name": "z1pti3/jimiPlugin-googlechat",
"src_encoding": "UTF-8",
"text": "# jimiPlugin-googlechat\n\nBased on jimiPlugin-api enables api calls to google chat webhook\n"
}
] | 2 |
jyotsnab/gym-jsbsim
|
https://github.com/jyotsnab/gym-jsbsim
|
f1c491c0e0b5224b918fe55a7eed40fd35cb46d2
|
66e5bea3b0123557f0bf9d1092d19816b5045eea
|
6280ae19dd8d9b4ff789e8c64c083bb040a41460
|
refs/heads/master
| 2020-04-23T03:38:18.368333 | 2019-02-15T16:36:07 | 2019-02-15T16:36:07 | 170,884,004 | 0 | 0 | null | 2019-02-15T15:14:03 | 2019-02-15T13:52:28 | 2019-02-15T13:52:26 | null |
[
{
"alpha_fraction": 0.5328964591026306,
"alphanum_fraction": 0.5599929094314575,
"avg_line_length": 44.9065055847168,
"blob_id": "027d1cfc3d6d0a6d1ba366905136a38e000c0015",
"content_id": "175ee0e8fb51411526b626b7dade37132bea1408",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 11293,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 246,
"path": "/gym_jsbsim/config-state-action.ini",
"repo_name": "jyotsnab/gym-jsbsim",
"src_encoding": "UTF-8",
"text": "# config-state-action.ini\n# config simulation runtime\n[SIMULATION]\n#'simulation/dt', 'JSBSim simulation timestep [s]')\nsim_dt = 0\n#'simulation/sim-time-sec', 'Simulation time [s]') \nsim_time_s = 0 \n\n# Setting initial conditions\n[HEADING_CONTROL_TASK_CONDITION]\n# initial conditions\n#'ic/h-sl-ft', 'initial altitude MSL [ft]')\ninitial_altitude_ft = 5000 \n#'ic/terrain-elevation-ft', 'initial terrain alt [ft]')\ninitial_terrain_altitude_ft = 0 \n#'ic/long-gc-deg', 'initial geocentric longitude [deg]')\ninitial_longitude_geoc_deg = 0 \n#'ic/lat-geod-deg', 'initial geodesic latitude [deg]')\ninitial_latitude_geod_deg = 0 \n#'ic/u-fps', 'body frame x-axis velocity; positive forward [ft/s]')\ninitial_u_fps = 0 \n#'ic/v-fps', 'body frame y-axis velocity; positive right [ft/s]')\ninitial_v_fps = 0 \n#'ic/w-fps', 'body frame z-axis velocity; positive down [ft/s]')\ninitial_w_fps = 0 \n#'ic/p-rad_sec', 'roll rate [rad/s]')\ninitial_p_radps = 0 \n#'ic/q-rad_sec', 'pitch rate [rad/s]')\ninitial_q_radps = 0 \n#'ic/r-rad_sec', 'yaw rate [rad/s]')\ninitial_r_radps = 0 \n#'ic/roc-fpm', 'initial rate of climb [ft/min]')\ninitial_roc_fpm = 0 \n#'ic/psi-true-deg', 'initial (true) heading [deg]')\ninitial_heading_deg = 100\n# target heading deg\ntarget_heading_deg = 100 \n# controls command\n#'fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.)\nthrottle_cmd = 0.8 \n#'fcs/mixture-cmd-norm', 'engine mixture setting, normalised', 0., 1.)\nmixture_cmd = 0.8 \n#target time\ntarget_time = 400\n#target waypoint latitude\ntarget_latitude_geod_deg = 49.0447\n#target waypoint longitude\ntarget_longitude_geod_deg = -120.3206 \n\n# All possible state action variable\n[SA_ALL]\n# position and attitude\n# 'position/h-sl-ft', 'altitude above mean sea level [ft]', -1400, 85000\nstates = altitude_sl_ft \n# ('attitude/pitch-rad', 'pitch [rad]', -0.5 * math.pi, 0.5 * math.pi)\n pitch_rad \n# 'attitude/roll-rad', 'roll [rad]', -math.pi, math.pi\n roll_rad \n#'attitude/psi-deg', 'heading [deg]', 0, 360) \n heading_deg\n#'aero/beta-deg', 'sideslip [deg]', -180, +180) \n sideslip_deg \n#'position/lat-geod-deg', 'geocentric latitude [deg]', -90, 90) \n lat_geod_deg \n#'position/long-gc-deg', 'geodesic longitude [deg]', -180, 180)\n lng_geoc_deg \n#'position/distance-from-start-mag-mt', 'distance travelled from starting position [m]')\n dist_travel_m \n# velocities\n#'velocities/u-fps', 'body frame x-axis velocity [ft/s]', -2200, 2200)\n u_fps \n#'velocities/v-fps', 'body frame y-axis velocity [ft/s]', -2200, 2200)\n v_fps \n#'velocities/w-fps', 'body frame z-axis velocity [ft/s]', -2200, 2200)\n w_fps \n#'velocities/v-north-fps', 'velocity true north [ft/s]', float('-inf'), float('+inf'))\n v_north_fps \n#'velocities/v-east-fps', 'velocity east [ft/s]', float('-inf'), float('+inf'))\n v_east_fps \n#'velocities/v-down-fps', 'velocity downwards [ft/s]', float('-inf'), float('+inf'))\n v_down_fps \n#'velocities/p-rad_sec', 'roll rate [rad/s]', -2 * math.pi, 2 * math.pi)\n p_radps \n#'velocities/q-rad_sec', 'pitch rate [rad/s]', -2 * math.pi, 2 * math.pi)\n q_radps \n#'velocities/r-rad_sec', 'yaw rate [rad/s]', -2 * math.pi, 2 * math.pi)\n r_radps \n#'velocities/h-dot-fps', 'rate of altitude change [ft/s]')\n altitude_rate_fps \n# controls state\n#'fcs/left-aileron-pos-norm', 'left aileron position, normalised', -1, 1)\n aileron_left \n#'fcs/right-aileron-pos-norm', 'right aileron position, normalised', -1, 1)\n aileron_right \n#'fcs/elevator-pos-norm', 'elevator position, normalised', -1, 1)\n elevator \n#'fcs/rudder-pos-norm', 'rudder position, normalised', -1, 1) \n rudder \n#'fcs/throttle-pos-norm', 'throttle position, normalised', 0, 1)\n throttle \n#'gear/gear-pos-norm', 'landing gear position, normalised', 0, 1) \n gear \n# engines\n#'propulsion/engine/set-running', 'engine running (0/1 bool)')\n engine_running \n#'propulsion/set-running', 'set engine running (-1 for all engines)')\n all_engine_running \n#'propulsion/engine/thrust-lbs', 'engine thrust [lb]')\n engine_thrust_lbs \n# controls command\n#'fcs/aileron-cmd-norm', 'aileron commanded position, normalised', -1., 1.)\n aileron_cmd \n#'fcs/elevator-cmd-norm', 'elevator commanded position, normalised', -1., 1.) \n elevator_cmd \n#'fcs/rudder-cmd-norm', 'rudder commanded position, normalised', -1., 1.)\n rudder_cmd \n#'fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.) \n throttle_cmd \n#'fcs/mixture-cmd-norm', 'engine mixture setting, normalised', 0., 1.)\n mixture_cmd \n#'fcs/throttle-cmd-norm[1]', 'throttle 1 commanded position, normalised', 0., 1.) \n throttle_1_cmd \n#'fcs/mixture-cmd-norm[1]', 'engine mixture 1 setting, normalised', 0., 1.)\n mixture_1_cmd \n#'gear/gear-cmd-norm', 'all landing gear commanded position, normalised', 0, 1)\n gear_all_cmd \n# controls command\n#'fcs/aileron-cmd-norm', 'aileron commanded position, normalised', -1., 1.)\nactions = aileron_cmd\n#'fcs/elevator-cmd-norm', 'elevator commanded position, normalised', -1., 1.)\n elevator_cmd\n#'fcs/rudder-cmd-norm', 'rudder commanded position, normalised', -1., 1.)\n rudder_cmd \n#'fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.)\n throttle_cmd \n#'fcs/mixture-cmd-norm', 'engine mixture setting, normalised', 0., 1.)\n mixture_cmd \n#'fcs/throttle-cmd-norm[1]', 'throttle 1 commanded position, normalised', 0., 1.)\n throttle_1_cmd \n#'fcs/mixture-cmd-norm[1]', 'engine mixture 1 setting, normalised', 0., 1.)\n mixture_1_cmd \n#'gear/gear-cmd-norm', 'all landing gear commanded position, normalised', 0, 1)\n gear_all_cmd \n# default state action\n[SA_DEFAULT]\n# position and attitude\n# 'position/h-sl-ft', 'altitude above mean sea level [ft]', -1400, 85000\nstates = altitude_sl_ft\n#'attitude/psi-deg', 'heading [deg]', 0, 360)\n heading_deg\n# ('attitude/pitch-rad', 'pitch [rad]', -0.5 * math.pi, 0.5 * math.pi)\n pitch_rad\n# 'attitude/roll-rad', 'roll [rad]', -math.pi, math.pi\n roll_rad\n#'velocities/u-fps', 'body frame x-axis velocity [ft/s]', -2200, 2200)\n u_fps\n#'velocities/v-fps', 'body frame y-axis velocity [ft/s]', -2200, 2200)\n v_fps\n#'velocities/w-fps', 'body frame z-axis velocity [ft/s]', -2200, 2200)\n w_fps\n#'aero/beta-deg', 'sideslip [deg]', -180, +180)\n sideslip_deg \n initial_altitude_ft\n initial_heading_deg \n# velocities\n#'velocities/v-east-fps', 'velocity east [ft/s]', float('-inf'), float('+inf'))\n #v_north_fps \n #v_east_fps \n#'velocities/v-down-fps', 'velocity downwards [ft/s]', float('-inf'), float('+inf'))\n #v_down_fps \n#'velocities/p-rad_sec', 'roll rate [rad/s]', -2 * math.pi, 2 * math.pi) \n #p_radps \n#'velocities/q-rad_sec', 'pitch rate [rad/s]', -2 * math.pi, 2 * math.pi)\n #q_radps \n#'velocities/r-rad_sec', 'yaw rate [rad/s]', -2 * math.pi, 2 * math.pi)\n #r_radps \n# controls state\n# engines\n# controls command\n#'fcs/aileron-cmd-norm', 'aileron commanded position, normalised', -1., 1.)\n #aileron_cmd \n#'fcs/elevator-cmd-norm', 'elevator commanded position, normalised', -1., 1.)\n #elevator_cmd \n#'fcs/rudder-cmd-norm', 'rudder commanded position, normalised', -1., 1.)\n #rudder_cmd \n#'fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.)\n #throttle_cmd \n# controls command\n#'fcs/aileron-cmd-norm', 'aileron commanded position, normalised', -1., 1.)\nactions = aileron_cmd \n#'fcs/elevator-cmd-norm', 'elevator commanded position, normalised', -1., 1.)\n elevator_cmd \n#'fcs/rudder-cmd-norm', 'rudder commanded position, normalised', -1., 1.)\n rudder_cmd \n#'fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.)\n throttle_cmd\n[SA_DEFAULT_CPLX]\n# position and attitude\n# 'position/h-sl-ft', 'altitude above mean sea level [ft]', -1400, 85000\nstates = altitude_sl_ft\n#'attitude/psi-deg', 'heading [deg]', 0, 360)\n #heading_deg\n# ('attitude/pitch-rad', 'pitch [rad]', -0.5 * math.pi, 0.5 * math.pi)\n #pitch_rad\n# 'attitude/roll-rad', 'roll [rad]', -math.pi, math.pi\n #roll_rad\n#'velocities/u-fps', 'body frame x-axis velocity [ft/s]', -2200, 2200)\n #u_fps\n#'velocities/v-fps', 'body frame y-axis velocity [ft/s]', -2200, 2200)\n #v_fps\n#'velocities/w-fps', 'body frame z-axis velocity [ft/s]', -2200, 2200)\n #w_fps\n#'aero/beta-deg', 'sideslip [deg]', -180, +180)\n #sideslip_deg \n# velocities\n#'velocities/v-east-fps', 'velocity east [ft/s]', float('-inf'), float('+inf'))\n v_north_fps \n v_east_fps \n#'velocities/v-down-fps', 'velocity downwards [ft/s]', float('-inf'), float('+inf'))\n v_down_fps \n#'velocities/p-rad_sec', 'roll rate [rad/s]', -2 * math.pi, 2 * math.pi) \n p_radps \n#'velocities/q-rad_sec', 'pitch rate [rad/s]', -2 * math.pi, 2 * math.pi)\n q_radps \n#'velocities/r-rad_sec', 'yaw rate [rad/s]', -2 * math.pi, 2 * math.pi)\n r_radps \n# controls state\n# engines\n# controls command\n#'fcs/aileron-cmd-norm', 'aileron commanded position, normalised', -1., 1.)\n #aileron_cmd \n#'fcs/elevator-cmd-norm', 'elevator commanded position, normalised', -1., 1.)\n #elevator_cmd \n#'fcs/rudder-cmd-norm', 'rudder commanded position, normalised', -1., 1.)\n #rudder_cmd \n#'fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.)\n #throttle_cmd \n# controls command\n#'fcs/aileron-cmd-norm', 'aileron commanded position, normalised', -1., 1.)\nactions = aileron_cmd \n#'fcs/elevator-cmd-norm', 'elevator commanded position, normalised', -1., 1.)\n elevator_cmd \n#'fcs/rudder-cmd-norm', 'rudder commanded position, normalised', -1., 1.)\n rudder_cmd \n#'fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.)\n throttle_cmd\n"
},
{
"alpha_fraction": 0.6346036195755005,
"alphanum_fraction": 0.6470407843589783,
"avg_line_length": 54.14189147949219,
"blob_id": "7532884c497771b857c8296f14746698e746adc3",
"content_id": "6af6ce5d2bcf4990fe23ef657814fc638255541b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16322,
"license_type": "permissive",
"max_line_length": 193,
"num_lines": 296,
"path": "/gym_jsbsim/heading_control_task.py",
"repo_name": "jyotsnab/gym-jsbsim",
"src_encoding": "UTF-8",
"text": "import math\nimport random\nimport gym_jsbsim.properties as prp\nfrom gym_jsbsim import utils\nfrom gym_jsbsim.simulation import Simulation\nfrom gym_jsbsim.base_flight_task import BaseFlightTask\nfrom gym_jsbsim.properties import BoundedProperty, Property\nfrom gym_jsbsim.aircraft import Aircraft\nfrom typing import Dict, Tuple, Sequence, NamedTuple\nimport json\nimport configparser\n\n\n### Collect Config Value\nconfig = configparser.ConfigParser()\nprint(config.read('/home/ubuntu/gym-jsbsim/gym_jsbsim/config-state-action.ini'))\n#print(config.sections())\n\n### collect state var from config file\nstate_list = config.get('SA_DEFAULT', 'states').split('\\n')\nstate_var = ()\nfor s in state_list:\n #print(s)\n state_var = state_var + (prp.prp_dict[s],)\n\naction_list = config.get('SA_DEFAULT', 'actions').split('\\n')\naction_var = ()\nfor a in action_list:\n #print(a)\n action_var = action_var + (prp.prp_dict[a],)\n\nclass HeadingControlTask(BaseFlightTask):\n \"\"\"\n A task in which the agent must perform steady, level flight maintaining its\n initial heading.\n \"\"\"\n\n ### Set config var\n THROTTLE_CMD = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"throttle_cmd\"])\n MIXTURE_CMD = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"mixture_cmd\"])\n #INITIAL_HEADING_DEG = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"initial_heading_deg\"])\n #INITIAL_ALTITUDE_FT = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"initial_altitude_ft\"])\n #TARGET_HEADING_DEG = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"target_heading_deg\"])\n DEFAULT_EPISODE_TIME_S = 1000.\n ALTITUDE_SCALING_FT = 150\n MAX_ALTITUDE_DEVIATION_FT = 800 # terminate if altitude error exceeds this\n\n def __init__(self, step_frequency_hz: float, aircraft: Aircraft,\n episode_time_s: float = DEFAULT_EPISODE_TIME_S, debug: bool = False) -> None:\n \"\"\"\n Constructor.\n\n :param step_frequency_hz: the number of agent interaction steps per second\n :param aircraft: the aircraft used in the simulation\n \"\"\"\n self.max_time_s = episode_time_s\n episode_steps = math.ceil(self.max_time_s * step_frequency_hz)\n self.steps_left = BoundedProperty('info/steps_left', 'steps remaining in episode', 0,\n episode_steps)\n self.nb_episodes = Property('info/nb_episodes', 'number of episodes since the beginning')\n self.aircraft = aircraft\n\n\n\n #self.state_variables = (prp.pitch_rad, prp.roll_rad, prp.sideslip_deg, prp.v_north_fps, prp.v_east_fps, prp.altitude_sl_ft, # minimal state variables for the task\n # prp.v_down_fps, prp.p_radps, prp.q_radps, prp.r_radps) # additional state variables used for reward shaping\n self.state_variables = state_var\n #print(\"state_variables = \" , self.state_variables)\n #self.action_variables = (prp.aileron_cmd, prp.elevator_cmd, prp.rudder_cmd)\n self.action_variables = action_var\n #print(\"action_variables = \", self.action_variables)\n super().__init__(debug)\n\n def get_initial_conditions(self) -> Dict[Property, float]:\n self.INITIAL_ALTITUDE_FT = random.uniform(1000, 32000)\n self.INITIAL_HEADING_DEG = random.uniform(prp.heading_deg.min, prp.heading_deg.max)\n #print(\"self.INITIAL_ALTITUDE_FT\", self.INITIAL_ALTITUDE_FT)\n #print(\"self.INITIAL_HEADING_DEG\", self.INITIAL_HEADING_DEG)\n initial_conditions = {prp.initial_altitude_ft: self.INITIAL_ALTITUDE_FT,\n prp.initial_u_fps: self.aircraft.get_cruise_speed_fps(),\n prp.initial_v_fps: 0,\n prp.initial_w_fps: 0,\n prp.initial_p_radps: 0,\n prp.initial_latitude_geod_deg: 49.243824,\n prp.initial_longitude_geoc_deg: -121.887340,\n prp.initial_q_radps: 0,\n prp.initial_r_radps: 0,\n prp.initial_roc_fpm: 0,\n prp.all_engine_running: -1,\n prp.initial_heading_deg: self.INITIAL_HEADING_DEG,\n self.nb_episodes: 0\n }\n return initial_conditions\n\n def _update_custom_properties(self, sim: Simulation) -> None:\n self._decrement_steps_left(sim)\n\n def _decrement_steps_left(self, sim: Simulation):\n sim[self.steps_left] -= 1\n\n def _is_terminal(self, sim: Simulation, state: NamedTuple) -> bool:\n # terminate when time >= max, but use math.isclose() for float equality test\n terminal_step = sim[self.steps_left] <= 0\n #terminal_step = sim[prp.dist_travel_m] >= 100000\n return terminal_step or self._altitude_out_of_bounds(sim, state)\n \n def _get_reward(self, sim: Simulation, last_state: NamedTuple, action: NamedTuple, new_state: NamedTuple) -> float:\n # inverse of the proportional absolute value of the minimal angle between the initial and current heading ... \n abs_h = math.fabs(self.INITIAL_HEADING_DEG - last_state.attitude_psi_deg)\n heading_r = 1.0/math.sqrt((0.1*min(360-abs_h, abs_h)+1))\n # inverse of the proportional absolute value between the initial and current altitude ... \n alt_r = 1.0/math.sqrt((0.1*math.fabs(self.INITIAL_ALTITUDE_FT - last_state.position_h_sl_ft)+1))\n\n return (heading_r + alt_r)/2.0\n \n def _get_reward_cplx(self, sim: Simulation, last_state: NamedTuple, action: NamedTuple, new_state: NamedTuple) -> float:\n # Get \n track_deg = prp.Vector2(last_state.velocities_v_east_fps, last_state.velocities_v_north_fps).heading_deg()\n normalised_error_track_deg = math.fabs(utils.reduce_reflex_angle_deg(track_deg - self.INITIAL_HEADING_DEG)) / 180.0\n normalised_altitude_error = min(math.fabs(last_state.position_h_sl_ft - self.INITIAL_ALTITUDE_FT) / self.INITIAL_ALTITUDE_FT, 1.0)\n target_reward = - normalised_error_track_deg - normalised_altitude_error\n\n # Get negative reward proportional to normalised speed angles and vertical speed\n normalised_angle_speed = min((math.fabs(last_state.velocities_p_rad_sec) + math.fabs(last_state.velocities_q_rad_sec) + math.fabs(last_state.velocities_r_rad_sec)) / (3*2*math.pi), 1.0)\n normalised_vertical_speed = min(math.fabs(last_state.velocities_v_down_fps) / self.INITIAL_ALTITUDE_FT, 1.0)\n stabilisation_reward = - math.exp(- sim[self.nb_episodes] / 100) * (normalised_angle_speed + normalised_vertical_speed)\n\n return target_reward + stabilisation_reward\n\n def _altitude_out_of_bounds(self, sim: Simulation, state: NamedTuple) -> bool:\n altitude_error_ft = math.fabs(state.position_h_sl_ft - self.INITIAL_ALTITUDE_FT)\n return abs(altitude_error_ft) > self.MAX_ALTITUDE_DEVIATION_FT\n\n def _new_episode_init(self, sim: Simulation) -> None:\n super()._new_episode_init(sim)\n sim.set_throttle_mixture_controls(self.THROTTLE_CMD, self.MIXTURE_CMD)\n sim[self.steps_left] = self.steps_left.max\n sim[self.nb_episodes] += 1\n\n def get_props_to_output(self, sim: Simulation) -> Tuple:\n return (*self.state_variables, prp.lat_geod_deg, prp.lng_geoc_deg, self.steps_left)\n\n\nclass HeadingControlTask_1Bis(BaseFlightTask):\n \"\"\"\n A task in which the agent must perform steady, level flight maintaining its\n initial heading and reach a target waypoint at target time.\n \"\"\"\n\n ### Set config var\n THROTTLE_CMD = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"throttle_cmd\"])\n MIXTURE_CMD = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"mixture_cmd\"])\n INITIAL_HEADING_DEG = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"initial_heading_deg\"])\n INITIAL_ALTITUDE_FT = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"initial_altitude_ft\"])\n TARGET_HEADING_DEG = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"target_heading_deg\"])\n TARGET_TIME = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"target_time\"])\n TARGET_WP_LAT_DEG = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"target_latitude_geod_deg\"])\n TARGET_WP_LON_DEG = float(config[\"HEADING_CONTROL_TASK_CONDITION\"][\"target_longitude_geod_deg\"])\n DEFAULT_EPISODE_TIME_S = TARGET_TIME+300\n ALTITUDE_SCALING_FT = 150\n MAX_ALTITUDE_DEVIATION_FT = 1000 # terminate if altitude error exceeds this\n\n\n def __init__(self, step_frequency_hz: float, aircraft: Aircraft,\n episode_time_s: float = DEFAULT_EPISODE_TIME_S, debug: bool = False) -> None:\n \"\"\"\n Constructor.\n\n :param step_frequency_hz: the number of agent interaction steps per second\n :param aircraft: the aircraft used in the simulation\n \"\"\"\n self.max_time_s = episode_time_s\n episode_steps = math.ceil(self.max_time_s * step_frequency_hz)\n self.steps_left = BoundedProperty('info/steps_left', 'steps remaining in episode', 0,\n episode_steps)\n self.nb_episodes = Property('info/nb_episodes', 'number of episodes since the beginning')\n self.aircraft = aircraft\n\n # self.state_variables = (prp.pitch_rad, prp.roll_rad, prp.sideslip_deg, prp.v_north_fps, prp.v_east_fps, prp.altitude_sl_ft, # minimal state variables for the task\n # prp.v_down_fps, prp.p_radps, prp.q_radps, prp.r_radps) # additional state variables used for reward shaping\n self.state_variables = state_var\n print(\"state_variables = \", self.state_variables)\n # self.action_variables = (prp.aileron_cmd, prp.elevator_cmd, prp.rudder_cmd)\n self.action_variables = action_var\n print(\"action_variables = \", self.action_variables)\n super().__init__(debug)\n\n def get_initial_conditions(self) -> Dict[Property, float]:\n initial_conditions = {prp.initial_altitude_ft: self.INITIAL_ALTITUDE_FT,\n prp.initial_u_fps: self.aircraft.get_cruise_speed_fps(),\n prp.initial_v_fps: 0,\n prp.initial_w_fps: 0,\n prp.initial_p_radps: 0,\n prp.initial_latitude_geod_deg: 47.4498333,\n prp.initial_longitude_geoc_deg: -122.3118333,\n prp.initial_q_radps: 0,\n prp.initial_r_radps: 0,\n prp.initial_roc_fpm: 0,\n prp.all_engine_running: -1,\n prp.initial_heading_deg: self.INITIAL_HEADING_DEG,\n self.nb_episodes: 0\n }\n return initial_conditions\n\n def _update_custom_properties(self, sim: Simulation) -> None:\n self._decrement_steps_left(sim)\n\n\n def _decrement_steps_left(self, sim: Simulation):\n sim[self.steps_left] -= 1\n\n def _is_terminal(self, sim: Simulation, state: NamedTuple) -> bool:\n # terminate when time >= max, but use math.isclose() for float equality test\n # check decimal accuracy\n # do we restart simulation if the heading is extremely off track?\n\n terminal_step = sim[self.steps_left] <= 0\n reached_target = self._is_at_target_wp(sim,state)\n\n return reached_target or terminal_step or self._altitude_out_of_bounds(sim, state)\n\n def _is_at_target_wp(self,sim: Simulation, state: NamedTuple)->bool:\n # TBD to check the floating point accuracy\n float_accuracy = 0.0000001\n reached_target = (self.TARGET_WP_LAT_DEG-sim[prp.lat_geod_deg])<float_accuracy and (self.TARGET_WP_LON_DEG-sim[prp.lng_geoc_deg])<float_accuracy\n return reached_target\n\n def _get_reward(self, sim: Simulation, last_state: NamedTuple, action: NamedTuple,\n new_state: NamedTuple) -> float:\n heading_r = 1.0 / math.sqrt((0.1 * math.fabs(self.TARGET_HEADING_DEG - last_state.attitude_psi_deg) + 1))\n # alt_r = 2*(self.INITIAL_ALTITUDE_FT/360. - new_state.position_h_sl_ft/360.)\n # print(\"ALTITUDE REWARD !!! \", self.INITIAL_ALTITUDE_FT, last_state.position_h_sl_ft)\n alt_r = 1.0 / math.sqrt((0.1 * math.fabs(self.INITIAL_ALTITUDE_FT - last_state.position_h_sl_ft) + 1))\n # print(heading_r + alt_r, -(heading_r + alt_r), -(heading_r + alt_r)/2.)\n time_r = 0\n if self._is_at_target_wp(sim,last_state):\n time_r = 1.0 / math.sqrt((0.1 * math.fabs(self.TARGET_TIME - sim.get_sim_time()) + 1))\n\n return (heading_r + alt_r+ time_r) / 3.0\n\n def _get_reward_cmplx(self, sim: Simulation, last_state: NamedTuple, action: NamedTuple, new_state: NamedTuple) -> float:\n # Get negative reward proportional to normalised heading and altitude errors\n track_deg = prp.Vector2(last_state.velocities_v_east_fps, last_state.velocities_v_north_fps).heading_deg()\n normalised_error_track_deg = math.fabs(\n utils.reduce_reflex_angle_deg(track_deg - self.INITIAL_HEADING_DEG)) / 180.0\n normalised_altitude_error = min(\n math.fabs(last_state.position_h_sl_ft - self.INITIAL_ALTITUDE_FT) / self.INITIAL_ALTITUDE_FT, 1.0)\n target_reward = - normalised_error_track_deg - normalised_altitude_error\n\n # Get negative reward proportional to normalised speed angles and vertical speed\n normalised_angle_speed = min((math.fabs(last_state.velocities_p_rad_sec) + math.fabs(\n last_state.velocities_q_rad_sec) + math.fabs(last_state.velocities_r_rad_sec)) / (3 * 2 * math.pi), 1.0)\n normalised_vertical_speed = min(math.fabs(last_state.velocities_v_down_fps) / self.INITIAL_ALTITUDE_FT, 1.0)\n stabilisation_reward = - math.exp(- sim[self.nb_episodes] / 100) * (normalised_angle_speed + normalised_vertical_speed)\n\n return target_reward + stabilisation_reward\n\n def _altitude_out_of_bounds(self, sim: Simulation, state: NamedTuple) -> bool:\n altitude_error_ft = math.fabs(state.position_h_sl_ft - self.INITIAL_ALTITUDE_FT)\n return abs(altitude_error_ft) > self.MAX_ALTITUDE_DEVIATION_FT\n\n def _heading_out_of_bounds(self,sim:Simulation,state:NamedTuple,new_state:NamedTuple) -> bool:\n heading_error_deg = math.fabs(self.TARGET_HEADING_DEG - new_state.attitude_psi_deg)\n return heading_error_deg>90.0\n\n def _new_episode_init(self, sim: Simulation) -> None:\n super()._new_episode_init(sim)\n sim.set_throttle_mixture_controls(self.THROTTLE_CMD, self.MIXTURE_CMD)\n sim[self.steps_left] = self.steps_left.max\n sim[self.nb_episodes] += 1\n\n def get_props_to_output(self, sim: Simulation) -> Tuple:\n return (*self.state_variables, prp.lat_geod_deg, prp.lng_geoc_deg, self.steps_left)\n\n\nclass TurnHeadingChangeLevelControlTask(HeadingControlTask):\n \"\"\"\n A task in which the agent must make a turn and change its altitude\n \"\"\"\n\n TARGET_HEADING_DEG = 360\n TARGET_ALTITUDE_FT = 3000\n\n def _get_reward(self, sim: Simulation, last_state: NamedTuple, action: NamedTuple, new_state: NamedTuple) -> float:\n # Get negative reward proportional to normalised heading and altitude errors\n track_deg = prp.Vector2(last_state.velocities_v_east_fps, last_state.velocities_v_north_fps).heading_deg()\n normalised_error_track_deg = math.fabs(utils.reduce_reflex_angle_deg(track_deg - self.INITIAL_HEADING_DEG)) / 180.0\n normalised_altitude_error = min(math.fabs(last_state.position_h_sl_ft - self.TARGET_ALTITUDE_FT) / self.INITIAL_ALTITUDE_FT, 1.0)\n target_reward = - normalised_error_track_deg - normalised_altitude_error\n\n # Get negative reward proportional to normalised speed angles and vertical speed\n normalised_angle_speed = min((math.fabs(last_state.velocities_p_rad_sec) + math.fabs(last_state.velocities_q_rad_sec) + math.fabs(last_state.velocities_r_rad_sec)) / (3*2*math.pi), 1.0)\n normalised_vertical_speed = min(math.fabs(last_state.velocities_v_down_fps) / self.INITIAL_ALTITUDE_FT, 1.0)\n stabilisation_reward = - math.exp(- sim[self.nb_episodes] / 100) * (normalised_angle_speed + normalised_vertical_speed)\n \n return target_reward + stabilisation_reward\n"
}
] | 2 |
sws144/learning-python
|
https://github.com/sws144/learning-python
|
29974537f789fe7c9c2b5496dcd5d62627f862af
|
feba2777f3ab40f32cfc7b7077d2a16819b40a59
|
d11f518b0b073f605c766b8d7345b56a6f74aa8b
|
refs/heads/master
| 2023-01-12T00:27:38.765549 | 2022-01-17T21:34:27 | 2022-01-17T21:34:27 | 193,995,470 | 0 | 0 | null | 2019-06-27T00:28:41 | 2022-01-07T02:58:02 | 2022-12-27T16:27:51 |
Jupyter Notebook
|
[
{
"alpha_fraction": 0.7022411823272705,
"alphanum_fraction": 0.7065101265907288,
"avg_line_length": 20.79069709777832,
"blob_id": "2d0f9a3cf8ea0a573363f139211984687f871332",
"content_id": "1596831ce02fda0e82066f811da78024fe5f2aae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 937,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 43,
"path": "/dockerized-scalable-ml/api/iris_svm_train.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# %% [markdown]\n# Train model to classify images\n\n# %% load required packages\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.svm import SVC\n\n# %% load data\n\niris = datasets.load_iris()\n# understand dataset\n# print(\"Iris dataset description: \", iris['DESCR'])\n\nX = iris.data # Features\ny = iris.target # Target variable\ntgt = iris.target_names\nprint(tgt)\ndesc = iris.DESCR\nprint(desc)\n\n# %% split data\nX_train , X_test , y_train, y_test = train_test_split(X, y,test_size=0.2,random_state = 10)\n\n# %% Use SVM classifier\nmodel = SVC(kernel='linear').fit(X_train,y_train)\n\n# %% Calculate test prediction\ny_pred = model.predict(X_test)\nprint(\"model score: \" + str(model.score(X_test, y_test.ravel())))\n\n# %% save model\njoblib.dump(model,'model/iris_svm_model.pkl',compress=True) \n\n\n\n\n\n# %%\n"
},
{
"alpha_fraction": 0.6775068044662476,
"alphanum_fraction": 0.6991869807243347,
"avg_line_length": 36,
"blob_id": "8985c000537c2ee02e26184451ede573aaf8a2d3",
"content_id": "45688c83471d291735e3232ead887a88e2c8e906",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 10,
"path": "/PullDataFromMSAccess.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# https://stackoverflow.com/questions/39835770/read-data-from-pyodbc-to-pandas\n\nimport pyodbc\nimport pandas\ncnxn = pyodbc.connect(r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=C:\\users\\bartogre\\desktop\\data.mdb;')\nsql = \"Select sum(CYTM), sum(PYTM), BRAND From data Group By BRAND\"\ndata = pandas.read_sql(sql,cnxn)\n\n# not working yet"
},
{
"alpha_fraction": 0.5818815231323242,
"alphanum_fraction": 0.588850200176239,
"avg_line_length": 12.666666984558105,
"blob_id": "eff52f09a5a1c67ed38b477c677b8b30aa9d8927",
"content_id": "0ac3e92eceb93890c5a994349d0a46bcc3b67447",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 287,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 21,
"path": "/glmm_lossdev/Pipfile",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "[[source]]\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\npandas = \"*\"\nchainladder = \"*\"\nmatplotlib = \"*\"\nipykernel = \"*\"\nstatsmodels = \"*\"\ngpboost = \"*\"\nshap = \"*\"\npygam = \"*\"\nblack = \"*\"\nyellowbrick = \"*\"\n\n[dev-packages]\n\n[requires]\npython_version = \"3.8\"\n"
},
{
"alpha_fraction": 0.6606975793838501,
"alphanum_fraction": 0.678337574005127,
"avg_line_length": 42.25433349609375,
"blob_id": "57aab0aa9e5510a7ccdabdfd866fbb3c3659c5d7",
"content_id": "7091c446d6a6217a2800495ebefd572afdd05fe1",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7483,
"license_type": "permissive",
"max_line_length": 172,
"num_lines": 173,
"path": "/lime-experiments-master/data_trusting.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "import sys\nimport copy\nimport os\nimport numpy as np\nimport scipy as sp\nimport json\nimport random\nimport sklearn\nfrom sklearn import ensemble\nfrom sklearn import svm\nfrom sklearn import tree\nfrom sklearn import neighbors\nimport pickle\nimport explainers\nimport parzen_windows\nimport embedding_forest\nfrom load_datasets import *\nimport argparse\nimport collections\n \ndef get_classifier(name, vectorizer):\n if name == 'logreg':\n return linear_model.LogisticRegression(fit_intercept=True)\n if name == 'random_forest':\n return ensemble.RandomForestClassifier(n_estimators=1000, random_state=1, max_depth=5, n_jobs=10)\n if name == 'svm':\n return svm.SVC(probability=True, kernel='rbf', C=10,gamma=0.001)\n if name == 'tree':\n return tree.DecisionTreeClassifier(random_state=1)\n if name == 'neighbors':\n return neighbors.KNeighborsClassifier()\n if name == 'embforest':\n return embedding_forest.EmbeddingForest(vectorizer)\n\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate some explanations')\n parser.add_argument('--dataset', '-d', type=str, required=True,help='dataset name')\n parser.add_argument('--algorithm', '-a', type=str, required=True, help='algorithm_name')\n parser.add_argument('--num_features', '-k', type=int, required=True, help='num features')\n parser.add_argument('--percent_untrustworthy', '-u', type=float, required=True, help='percentage of untrustworthy features. like 0.1')\n parser.add_argument('--num_rounds', '-r', type=int, required=True, help='num rounds')\n args = parser.parse_args()\n dataset = args.dataset\n train_data, train_labels, test_data, test_labels, class_names = LoadDataset(dataset)\n vectorizer = CountVectorizer(lowercase=False, binary=True) \n train_vectors = vectorizer.fit_transform(train_data)\n test_vectors = vectorizer.transform(test_data)\n terms = np.array(list(vectorizer.vocabulary_.keys()))\n indices = np.array(list(vectorizer.vocabulary_.values()))\n inverse_vocabulary = terms[np.argsort(indices)]\n\n np.random.seed(1)\n classifier = get_classifier(args.algorithm, vectorizer)\n classifier.fit(train_vectors, train_labels)\n\n\n np.random.seed(1)\n untrustworthy_rounds = []\n all_features = range(train_vectors.shape[1])\n num_untrustworthy = int(train_vectors.shape[1] * args.percent_untrustworthy)\n for _ in range(args.num_rounds):\n untrustworthy_rounds.append(np.random.choice(all_features, num_untrustworthy, replace=False))\n \n rho = 25\n kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho ** 2))\n LIME = explainers.GeneralizedLocalExplainer(kernel, explainers.data_labels_distances_mapping_text, num_samples=15000, return_mean=True, verbose=False, return_mapped=True)\n\n parzen = parzen_windows.ParzenWindowClassifier()\n cv_preds = sklearn.cross_validation.cross_val_predict(classifier, train_vectors, train_labels, cv=5)\n parzen.fit(train_vectors, cv_preds)\n sigmas = {'multi_polarity_electronics': {'neighbors': 0.75, 'svm': 10.0, 'tree': 0.5,\n 'logreg': 0.5, 'random_forest': 0.5, 'embforest': 0.75},\n 'multi_polarity_kitchen': {'neighbors': 1.0, 'svm': 6.0, 'tree': 0.75,\n 'logreg': 0.25, 'random_forest': 6.0, 'embforest': 1.0},\n 'multi_polarity_dvd': {'neighbors': 0.5, 'svm': 0.75, 'tree': 8.0, 'logreg':\n 0.75, 'random_forest': 0.5, 'embforest': 5.0}, 'multi_polarity_books':\n {'neighbors': 0.5, 'svm': 7.0, 'tree': 2.0, 'logreg': 1.0, 'random_forest':\n 1.0, 'embforest': 3.0}}\n parzen.sigma = sigmas[dataset][args.algorithm]\n\n random = explainers.RandomExplainer()\n exps = {}\n explainer_names = ['LIME', 'random', 'greedy', 'parzen']\n for expl in explainer_names:\n exps[expl] = []\n\n predictions = classifier.predict(test_vectors)\n predict_probas = classifier.predict_proba(test_vectors)[:,1]\n for i in range(test_vectors.shape[0]):\n print i\n sys.stdout.flush()\n exp, mean = LIME.explain_instance(test_vectors[i], 1, classifier.predict_proba, args.num_features)\n exps['LIME'].append((exp, mean))\n exp = parzen.explain_instance(test_vectors[i], 1, classifier.predict_proba, args.num_features, None) \n mean = parzen.predict_proba(test_vectors[i])[1]\n exps['parzen'].append((exp, mean))\n\n exp = random.explain_instance(test_vectors[i], 1, None, args.num_features, None)\n exps['random'].append(exp)\n\n exp = explainers.explain_greedy_martens(test_vectors[i], predictions[i], classifier.predict_proba, args.num_features)\n exps['greedy'].append(exp)\n\n precision = {}\n recall = {}\n f1 = {}\n for name in explainer_names:\n precision[name] = []\n recall[name] = []\n f1[name] = []\n flipped_preds_size = []\n for untrustworthy in untrustworthy_rounds:\n t = test_vectors.copy()\n t[:, untrustworthy] = 0\n mistrust_idx = np.argwhere(classifier.predict(t) != classifier.predict(test_vectors)).flatten()\n print 'Number of suspect predictions', len(mistrust_idx)\n shouldnt_trust = set(mistrust_idx)\n flipped_preds_size.append(len(shouldnt_trust))\n mistrust = collections.defaultdict(lambda:set())\n trust = collections.defaultdict(lambda: set())\n trust_fn = lambda prev, curr: (prev > 0.5 and curr > 0.5) or (prev <= 0.5 and curr <= 0.5)\n trust_fn_all = lambda exp, unt: len([x[0] for x in exp if x[0] in unt]) == 0\n for i in range(test_vectors.shape[0]):\n exp, mean = exps['LIME'][i]\n prev_tot = predict_probas[i]\n prev_tot2 = sum([x[1] for x in exp]) + mean\n tot = prev_tot2 - sum([x[1] for x in exp if x[0] in untrustworthy])\n trust['LIME'].add(i) if trust_fn(tot, prev_tot) else mistrust['LIME'].add(i)\n\n exp, mean = exps['parzen'][i]\n prev_tot = mean\n tot = mean - sum([x[1] for x in exp if x[0] in untrustworthy])\n trust['parzen'].add(i) if trust_fn(tot, prev_tot) else mistrust['parzen'].add(i)\n exp = exps['random'][i]\n trust['random'].add(i) if trust_fn_all(exp, untrustworthy) else mistrust['random'].add(i)\n\n exp = exps['greedy'][i]\n trust['greedy'].add(i) if trust_fn_all(exp, untrustworthy) else mistrust['greedy'].add(i)\n\n for expl in explainer_names:\n # switching the definition\n false_positives = set(trust[expl]).intersection(shouldnt_trust)\n true_positives = set(trust[expl]).difference(shouldnt_trust)\n false_negatives = set(mistrust[expl]).difference(shouldnt_trust)\n true_negatives = set(mistrust[expl]).intersection(shouldnt_trust)\n\n try:\n prec= len(true_positives) / float(len(true_positives) + len(false_positives))\n except:\n prec= 0\n try:\n rec= float(len(true_positives)) / (len(true_positives) + len(false_negatives))\n except:\n rec= 0\n precision[expl].append(prec)\n recall[expl].append(rec)\n f1z = 2 * (prec * rec) / (prec + rec) if (prec and rec) else 0\n f1[expl].append(f1z)\n\n print 'Average number of flipped predictions:', np.mean(flipped_preds_size), '+-', np.std(flipped_preds_size)\n print 'Precision:'\n for expl in explainer_names:\n print expl, np.mean(precision[expl]), '+-', np.std(precision[expl]), 'pvalue', sp.stats.ttest_ind(precision[expl], precision['LIME'])[1].round(4)\n print\n print 'Recall:'\n for expl in explainer_names:\n print expl, np.mean(recall[expl]), '+-', np.std(recall[expl]), 'pvalue', sp.stats.ttest_ind(recall[expl], recall['LIME'])[1].round(4)\n print \n print 'F1:'\n for expl in explainer_names:\n print expl, np.mean(f1[expl]), '+-', np.std(f1[expl]), 'pvalue', sp.stats.ttest_ind(f1[expl], f1['LIME'])[1].round(4)\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6543415188789368,
"alphanum_fraction": 0.6622351408004761,
"avg_line_length": 42.76363754272461,
"blob_id": "53ae48eed0dcc9ddbe7b087387ea48e35efd2a45",
"content_id": "8a61455c998a322ec885384a1e9fa6fd914aa377",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2407,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 55,
"path": "/lime-experiments-master/load_datasets.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "import random\nimport os\nimport re\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import linear_model\nfrom sklearn import tree\nfrom sklearn import svm\n# PUT POLARITY DATASET PATH HERE\nPOLARITY_PATH = '/Users/marcotcr/phd/datasets/multi_domain_polarity/'\ndef LoadDataset(dataset_name):\n if dataset_name.endswith('ng'):\n if dataset_name == '2ng':\n cats = ['alt.atheism', 'soc.religion.christian']\n class_names = ['Atheism', 'Christianity']\n if dataset_name == 'talkng':\n cats = ['talk.politics.guns', 'talk.politics.misc']\n class_names = ['Guns', 'PoliticalMisc']\n if dataset_name == '3ng':\n cats = ['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.windows.x']\n class_names = ['windows.misc', 'ibm.hardware', 'windows.x']\n newsgroups_train = fetch_20newsgroups(subset='train',categories=cats)\n newsgroups_test = fetch_20newsgroups(subset='test',categories=cats)\n train_data = newsgroups_train.data\n train_labels = newsgroups_train.target\n test_data = newsgroups_test.data\n test_labels = newsgroups_test.target\n return train_data, train_labels, test_data, test_labels, class_names\n if dataset_name.startswith('multi_polarity_'):\n name = dataset_name.split('_')[2]\n return LoadMultiDomainDataset(POLARITY_PATH + name)\ndef LoadMultiDomainDataset(path_data, remove_bigrams=True):\n random.seed(1)\n pos = []\n neg = []\n def get_words(line, remove_bigrams=True):\n z = [tuple(x.split(':')) for x in re.findall('\\w*?:\\d', line)]\n if remove_bigrams:\n z = ' '.join([' '.join([x[0]] * int(x[1])) for x in z if '_' not in x[0]])\n else:\n z = ' '.join([' '.join([x[0]] * int(x[1])) for x in z])\n return z\n for line in open(os.path.join(path_data, 'negative.review')):\n neg.append(get_words(line, remove_bigrams))\n for line in open(os.path.join(path_data, 'positive.review')):\n pos.append(get_words(line, remove_bigrams))\n random.shuffle(pos)\n random.shuffle(neg)\n split_pos = int(len(pos) * .8)\n split_neg = int(len(neg) * .8)\n train_data = pos[:split_pos] + neg[:split_neg]\n test_data = pos[split_pos:] + neg[split_neg:]\n train_labels = [1] * len(pos[:split_pos]) + [0] * len(neg[:split_neg])\n test_labels = [1] * len(pos[split_pos:]) + [0] * len(neg[split_neg:])\n return train_data, np.array(train_labels), test_data, np.array(test_labels), ['neg', 'pos']\n"
},
{
"alpha_fraction": 0.5242967009544373,
"alphanum_fraction": 0.6099744439125061,
"avg_line_length": 22.02941131591797,
"blob_id": "9ac948c0df1197ecc6b955ea054c3b2b19f719e1",
"content_id": "e13a74b3482f580419beb37bb794e5ea2e4600c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 782,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 34,
"path": "/dockerized-scalable-ml/README.md",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Model inception to deployment example\n\n[Source](https://medium.com/datadriveninvestor/from-model-inception-to-deployment-adce1f5ed9d6)\n\nRun ```docker-compose build``` and then ```docker-compose up``` to run\n\n## Test 1 \n\nThen go to website http://192.168.99.100:8000 per nginx.conf file (the docker-machine virtual ip)\n\n## Test 2 api\n \nUse Postman \nhttp://192.168.99.100:8000/predict\n\n\n```bash\ncurl --location --request POST 'http://192.168.99.100:8000/predict' \\\n--header 'Content-Type: application/json' \\\n--data-raw '[\n {\n \"sepal_length\": 6.3,\n \"sepal_width\": 2.3,\n \"petal_length\": 4.4,\n \"petal_width\": 1.3\n } , \n {\n \"sepal_length\": 6.3,\n \"sepal_width\": 2.3,\n \"petal_length\": 4.4,\n \"petal_width\": 5.3\n }\n]'\n```"
},
{
"alpha_fraction": 0.6096600890159607,
"alphanum_fraction": 0.6168157458305359,
"avg_line_length": 32.25,
"blob_id": "bc80d22ee2c820ebf53545161f142e6886904ae0",
"content_id": "2f9d3fd89df6a5136851a876f52a108950b827eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2795,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 84,
"path": "/simfin.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# simfin data example 1\n# https://simfin.com/data/access/download\n# output-mixeddet-quarters-gaps-publish-semicolon-wide\n\n# SW API key\n# 6BEqsSZGmXpbrRjS06PoHU8l78R3gBqS\n\n# https://github.com/SimFin/api-tutorial\n\n\n# import pandas\n# location = 'C:/Users/SW/Downloads/output-mixeddet-quarters-gaps-publish-semicolon-wide/'\n# firsttest = pandas.read_csv(location + 'output-semicolon-wide.csv', \n# sep=';', nrows = 5)\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\ninstall(\"requests\")\n\nimport requests\n\napi_key = \"6BEqsSZGmXpbrRjS06PoHU8l78R3gBqS\"\ntickers = [\"AAPL\",\"NVDA\",\"WMT\"]\n\nsim_ids = []\nfor ticker in tickers:\n request_url = f'https://simfin.com/api/v1/info/find-id/ticker/{ticker}?api-key={api_key}'\n content = requests.get(request_url)\n data = content.json()\n if \"error\" in data or len(data) < 1:\n sim_ids.append(None)\n else:\n sim_ids.append(data[0]['simId'])\nprint(sim_ids)\n\n# define time periods for financial statement data\nstatement_type = \"pl\"\ntime_periods = [\"Q1\",\"Q2\",\"Q3\",\"Q4\"]\nyear_start = 2013\nyear_end = 2018\n\n# prep writer\ninstall(\"pandas\")\nimport pandas as pd\ninstall(\"xlsxwriter\")\nimport xlsxwriter\n\nwriter = pd.ExcelWriter(\"simfin_data.xlsx\", engine='xlsxwriter')\ndata = {}\n\n# get standardized financial statement\ndata = {}\nfor idx, sim_id in enumerate(sim_ids):\n d = data[tickers[idx]] = {\"Line Item\": []}\n if sim_id is not None:\n for year in range(year_start, year_end + 1):\n for time_period in time_periods:\n period_identifier = time_period + \"-\" + str(year)\n if period_identifier not in d:\n d[period_identifier] = []\n request_url = f'https://simfin.com/api/v1/companies/id/{sim_id}/statements/standardised?stype={statement_type}&fyear={year}&ptype={time_period}&api-key={api_key}'\n content = requests.get(request_url)\n statement_data = content.json()\n # collect line item names once, they are the same for all companies with the standardised data\n if len(d['Line Item']) == 0:\n d['Line Item'] = [x['standardisedName'] for x in statement_data['values']]\n if 'values' in statement_data:\n for item in statement_data['values']:\n d[period_identifier].append(item['valueChosen'])\n else:\n # no data found for time period\n d[period_identifier] = [None for _ in d['Line Item']]\n\n # saving to xlsx\n # convert to pandas dataframe\n df = pd.DataFrame(data=d)\n # save in the XLSX file configured earlier\n df.to_excel(writer, sheet_name=tickers[idx])\n writer.save()\nwriter.close()\n\n\n"
},
{
"alpha_fraction": 0.8089887499809265,
"alphanum_fraction": 0.8089887499809265,
"avg_line_length": 29,
"blob_id": "f8c5870ae326780761ab72987876c8abcf74d168",
"content_id": "cb2dcbcf787a7ad817adba6b10ad86a88537ff37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 3,
"path": "/flash-app/README.md",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Docker example\n\nhttps://github.com/docker/labs/blob/master/beginner/chapters/webapps.md"
},
{
"alpha_fraction": 0.7134886980056763,
"alphanum_fraction": 0.7216934561729431,
"avg_line_length": 32.130435943603516,
"blob_id": "b499eb6652249181c90bafe465a7b64c46ef9823",
"content_id": "65c54929b19b0baba6a0cf0ade4518cd55472185",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3047,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 92,
"path": "/alpha_vantage_1m.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Wrapper for alpha_vantage\n# https://backtest-rookies.com/2018/04/20/replacing-quandl-wiki-data-with-alpha-vantage/\n# https://github.com/RomelTorres/alpha_vantage\n\n'''\nAuthor: www.backtest-rookies.com\n\nMIT License\n\nCopyright (c) 2018 backtest-rookies.com\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\ninstall(\"alpha_vantage\")\ninstall(\"argparse\")\ninstall(\"pandas\")\ninstall(\"pprint\")\ninstall(\"matplotlib\")\n\nfrom alpha_vantage.timeseries import TimeSeries\nimport argparse\nimport pandas as pd\nfrom pprint import pprint\n\nimport matplotlib.pyplot as plt #for\n\n\"\"\" def parse_args():\n parser = argparse.ArgumentParser(description='CCXT Market Data Downloader')\n\n parser.add_argument('-s','--symbol',\n type=str,\n required=True,\n help='The Symbol of the Instrument/Currency Pair To Download')\n\n parser.add_argument('-o', '--outfile',\n type=str,\n required=True,\n help='The output directory and file name to save the data')\n\n return parser.parse_args()\n \"\"\"\n\n# Get our arguments\nargs_symbol = input(\"Enter symbol: \")\nprint(args_symbol)\n\nargs_outputCSV = input(\"Enter output csv filename prefix: \") + \"_\" + args_symbol + \".csv\"\n\n# Submit our API and create a session\nalpha_ts = TimeSeries(key='BCVTGY0TFDT3W7IV', output_format='pandas')\n\n# Get the data\ndata, meta_data = alpha_ts.get_intraday(symbol=args_symbol,interval='1min', outputsize='full')\npprint(data.head(2))\n\n# Save the data\ndata.to_csv(args_outputCSV)\n\n# Plotting price\ndata['4. close'].plot()\nplt.title('Intraday Times Series for the MSFT stock (1 min)')\nplt.show()\n\n# Plotting indicators\n\nti = TechIndicators(key='YOUR_API_KEY', output_format='pandas')\ndata, meta_data = ti.get_bbands(symbol='MSFT', interval='60min', time_period=60)\ndata.plot()\nplt.title('BBbands indicator for MSFT stock (60 min)')\nplt.show()"
},
{
"alpha_fraction": 0.44936707615852356,
"alphanum_fraction": 0.6772152185440063,
"avg_line_length": 14.899999618530273,
"blob_id": "6c3269fa5b8ebb495a314e24e182c72416c6c770",
"content_id": "523fe691eb860d1d2e720bbb017c1be38cd428e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 10,
"path": "/dockerized-scalable-ml/api/requirements.txt",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "Cython==0.25.2\nFlask==1.0.0\nFlask-Cors==3.0.2\nnumpy==1.15.2\nnumpydoc==0.6.0\npandas==0.20.1\nrequests==2.20.0\nscipy==1.1.0\nscikit-learn==0.22.1\ngunicorn==19.9.0"
},
{
"alpha_fraction": 0.6872082352638245,
"alphanum_fraction": 0.7170868515968323,
"avg_line_length": 29.600000381469727,
"blob_id": "704748743194a5c22fb0154c4d31f75f4b42ff69",
"content_id": "6cdcb109cff3de0ca69a519257de3400f9635e42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1071,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 35,
"path": "/plottingexample.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# plotting matplot lib\n# http://actuarialdatascience.com/matplotlib-nice-plot.html\n\n# %% packages\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#%% check settings and style \nmatplotlib.get_configdir()\nmatplotlib.style.use('classic') #to make sure is white\n# matplotlib.style.use('dark_background') #backup/default is dark\n\n# %% read data \n# assumes file is in \\\\learning-python\nxlswb = 'data_zero_bond_yield_curves.xlsx' \ndata = pd.read_excel(xlswb, index_col='year')\n\n#%% create plot\nfig, ax = plt.subplots()\nfig.set_dpi(720)\n\nax.plot(data.index, data['Zero jan 2018'])\nax.plot(data.index, data['Zero dec 2018'])\nax.set_title('In 2018 pension funds suffered from decreasing rates')\nax.set_xlabel('maturity (years)')\nax.set_ylabel('rate (%)')\nax.set_xlim(xmin=0)\nax.set_ylim(ymin=-0.5, ymax=2.75)\nax.annotate(s='January', xy=(82, 2.35), color='tab:blue', size=8)\nax.annotate(s='December', xy=(82, 1.8), color='tab:orange', size=8)\n\n#%% save plot\n\nfig.savefig('learning-python/plottingexample.png') #backslash only needs one, can use \\\\ as well "
},
{
"alpha_fraction": 0.7270233035087585,
"alphanum_fraction": 0.7379972338676453,
"avg_line_length": 25.071428298950195,
"blob_id": "54311273dc5613fbc29d6ba8cffcbdf19ac9eba3",
"content_id": "8a29f5fc51d4ec8a3c6f52316a736666c6a8cd0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 28,
"path": "/.ipynb_checkpoints/modelingseverity-checkpoint.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# modeling severity\n# https://statcompute.wordpress.com/2015/12/06/modeling-severity-in-operational-losses-with-python/\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n# install packages\ninstall(\"pandas\")\ninstall(\"numpy\")\ninstall(\"statsmodels\")\n\n# import\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\ndf = pd.read_csv(\"Autocollision.csv\") # need to get larger dataset\ndf.head()\n\n# fit a gamma regression\ngamma = smf.glm(formula = \"Severity ~ Age + Vehicle_Use\", data = df, \n family = sm.families.Gamma(sm.families.links.log) )\ntype(gamma)\ngamma.fit().summary()"
},
{
"alpha_fraction": 0.6024198532104492,
"alphanum_fraction": 0.6191891431808472,
"avg_line_length": 41.827274322509766,
"blob_id": "6f9e53da229d3bc7ee46f5139cd35d6f61c5248f",
"content_id": "5a465b0286c383a3d03bec03bd7539ac3cded2eb",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4711,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 110,
"path": "/lime-experiments-master/parzen_windows.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport scipy as sp\nimport argparse\nimport evaluate_explanations\nimport sys\nimport xgboost\nsys.path.append('..')\nfrom sklearn import ensemble\nfrom sklearn import neighbors\nimport embedding_forest\ndef get_classifier(name, vectorizer):\n if name == 'logreg':\n return linear_model.LogisticRegression(fit_intercept=True)\n if name == 'random_forest':\n return ensemble.RandomForestClassifier(n_estimators=1000, random_state=1, max_depth=5, n_jobs=10)\n if name == 'svm':\n return svm.SVC(probability=True, kernel='rbf', C=10,gamma=0.001)\n if name == 'tree':\n return tree.DecisionTreeClassifier(random_state=1)\n if name == 'neighbors':\n return neighbors.KNeighborsClassifier()\n if name == 'embforest':\n return embedding_forest.EmbeddingForest(vectorizer)\nclass ParzenWindowClassifier:\n def __init__(self):\n #self.kernel = lambda x, sigma : np.exp(-.5 * x.dot(x.T)[0,0] / sigma ** 2) / (np.sqrt(2 * np.pi * sigma **2))\n self.kernel = lambda x, sigma: np.array(np.exp(-.5 * x.power(2).sum(axis=1) / sigma ** 2) / (np.sqrt(2 * np.pi * sigma **2))).flatten()\n def fit(self, X, y):\n self.X = X.toarray()\n self.y = y\n self.ones = y==1\n self.zeros = y==0\n def predict(self, x):\n b = sp.sparse.csr_matrix(x - self.X)\n #pr = np.array([self.kernel(z, self.sigma) for z in b])\n pr = self.kernel(b, self.sigma)\n prob = sum(pr[self.ones]) / sum(pr)\n #print prob\n return int(prob > .5)\n def predict_proba(self, x):\n b = sp.sparse.csr_matrix(x - self.X)\n #pr = np.array([self.kernel(z, self.sigma) for z in b])\n pr = self.kernel(b, self.sigma)\n prob = sum(pr[self.ones]) / sum(pr)\n return np.array([1 - prob, prob])\n def find_sigma(self, sigmas_to_try, cv_X, cv_y):\n self.sigma = sigmas_to_try[0]\n best_mistakes = 2**32 - 1\n best_sigma = self.sigma\n for sigma in sorted(sigmas_to_try):\n self.sigma = sigma\n preds = []\n for i in range(cv_X.shape[0]):\n preds.append(self.predict(cv_X[i]))\n mistakes = sum(cv_y != np.array(preds))\n print (sigma + mistakes)\n sys.stdout.flush()\n if mistakes < best_mistakes:\n best_mistakes = mistakes\n best_sigma = sigma\n print ('Best sigma achieves ' + best_mistakes + 'mistakes. Disagreement= ' + float(best_mistakes) / cv_X.shape[0])\n self.sigma = best_sigma\n def explain_instance(self, x, _, __,num_features,___=None):\n minus = self.X - x\n b = sp.sparse.csr_matrix(minus)\n ker = self.kernel(b, self.sigma)\n #ker = np.array([self.kernel(z, self.sigma) for z in b])\n times = np.multiply(minus, ker[:,np.newaxis])\n sumk_0= sum(ker[self.zeros])\n sumk_1= sum(ker[self.ones])\n sumt_0 = sum(times[self.zeros])\n sumt_1 = sum(times[self.ones])\n sumk_total = sumk_0 + sumk_1\n exp = (sumk_0 * sumt_1 - sumk_1 * sumt_0) / (self.sigma **2 * sumk_total ** 2)\n features = x.nonzero()[1]\n values = np.array(exp[0, x.nonzero()[1]])[0]\n return sorted(zip(features, values), key=lambda x:np.abs(x[1]), reverse=True)[:num_features]\ndef main():\n parser = argparse.ArgumentParser(description='Visualize some stuff')\n parser.add_argument('--dataset', '-d', type=str, required=True,help='dataset name')\n parser.add_argument('--algorithm', '-a', type=str, required=True, help='algorithm_name')\n args = parser.parse_args()\n\n train_data, train_labels, test_data, test_labels, _ = LoadDataset(args.dataset)\n vectorizer = CountVectorizer(lowercase=False, binary=True)\n train_vectors = vectorizer.fit_transform(train_data)\n num_train = int(train_vectors.shape[0] * .8)\n indices = np.random.choice(range(train_vectors.shape[0]), train_vectors.shape[0], replace=False)\n train_v = train_vectors[indices[:num_train]]\n y_v = train_labels[indices[:num_train]]\n train_cv = train_vectors[indices[num_train:]]\n y_cv = train_labels[indices[num_train:]]\n print('train_size' + train_v.shape[0])\n print ('cv_size', train_cv.shape[0])\n classifier = get_classifier(args.algorithm, vectorizer)\n classifier.fit(train_v, y_v)\n print ('train accuracy: ' )\n print (accuracy_score(y_v, classifier.predict(train_v)))\n print ('cv accuracy:')\n print (accuracy_score(y_cv, classifier.predict(train_cv)))\n yhat_v = classifier.predict(train_v)\n yhat_cv = classifier.predict(train_cv)\n p = ParzenWindowClassifier()\n p.fit(train_v, yhat_v)\n p.find_sigma([0.1, .25, .5, .75, 1,2,3,4,5,6,7,8,9,10], train_cv, yhat_cv)\n print ('Best sigma:')\n print (p.sigma)\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5818337798118591,
"alphanum_fraction": 0.5869751572608948,
"avg_line_length": 30.351350784301758,
"blob_id": "e8e4590aae094d451af8bc341d44d5b6572ef86a",
"content_id": "ca9ca126e6496c852696bc8bda386cc4ff927393",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1167,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 37,
"path": "/dockerized-scalable-ml/api/app.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "from sklearn.externals import joblib\nimport numpy as np\nimport pandas as pd\nfrom flask import Flask, jsonify, request\nimport gunicorn\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef hello():\n return \"Hola!\"\n\[email protected]('/predict',methods=['POST'])\ndef api_call():\n try:\n test_json = request.get_json()\n val = []\n # print(test_json)\n for dic in test_json:\n row = []\n row.append(dic['sepal_length'])\n row.append(dic['sepal_width'])\n row.append(dic['petal_length'])\n row.append(dic['petal_width'])\n val.append(row)\n #load model\n loaded_model = joblib.load('model/iris_svm_model.pkl')\n y_pred = loaded_model.predict(np.array(val))\n pred_dict = {}\n for i, pred in enumerate(y_pred):\n pred_dict['prediction_' + str(i)] = int(pred)\n responses = jsonify(predictions=pred_dict)\n responses.status_code = 200\n except Exception as e:\n responses = jsonify(predictions={'error':'some error occured, please try again later', 'json': str(test_json)})\n responses.status_code = 404\n return (responses) "
},
{
"alpha_fraction": 0.6466652154922485,
"alphanum_fraction": 0.6715664863586426,
"avg_line_length": 49.64444351196289,
"blob_id": "14436b5b527ee8da8683b760587be4ea70cc56a1",
"content_id": "a551822096cc398123fd69c88f1c2a101c70296d",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9116,
"license_type": "permissive",
"max_line_length": 257,
"num_lines": 180,
"path": "/lime-experiments-master/generate_data_for_compare_classifiers.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "import sys\nimport copy\nsys.path.append('..')\nimport time\nimport numpy as np\nimport scipy as sp\nimport sklearn\nimport xgboost\nimport xgboost.sklearn\nimport explainers\nfrom load_datasets import *\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import ensemble, cross_validation\nimport pickle\nimport parzen_windows\nimport argparse\ndef get_random_indices(labels, class_, probability):\n nonzero = (labels == class_).nonzero()[0]\n if nonzero.shape[0] == 0 or probability == 0:\n return []\n return np.random.choice(nonzero, int(probability * len(nonzero)) , replace=False)\ndef add_corrupt_feature(feature_name, clean_train, clean_test, dirty_train,\n train_labels, test_labels, class_probs_dirty, class_probs_clean, fake_prefix='FAKE'):\n \"\"\"clean_train, clean_test, dirty_train will be corrupted\"\"\"\n for class_ in set(train_labels):\n indices = get_random_indices(train_labels, class_, class_probs_clean[class_])\n for i in indices:\n clean_train[i] += ' %s%s%s' % (fake_prefix, feature_name, fake_prefix)\n indices = get_random_indices(train_labels, class_, class_probs_dirty[class_])\n for i in indices:\n dirty_train[i] += ' %s%s%s' % (fake_prefix, feature_name, fake_prefix)\n indices = get_random_indices(test_labels, class_, class_probs_clean[class_])\n for i in indices:\n clean_test[i] += ' %s%s%s' % (fake_prefix, feature_name, fake_prefix)\ndef corrupt_dataset(independent_features, train_data, train_labels, test_data, test_labels):\n # independent_features: list [([.3, .8],[.5,.5], 3), ([.1, .1],[0, 0], 1)\n # ...]. Each element in list is a tuple (l,l2, n) where l a list\n # representing the probability of seeing the feature in each class in the\n # dirty train data, l2 is a list representing the probability of seeing the\n # feature in each class the clean test data and n is the number of features\n # with this distribution to add.\n # returns (clean_train, dirty_train, clean_test)\n dirty_train = copy.deepcopy(train_data)\n clean_train = copy.deepcopy(train_data)\n clean_test = copy.deepcopy(test_data)\n idx = 0\n for probs, probs2, n in independent_features:\n for i in range(n):\n add_corrupt_feature('%d' % idx, clean_train, clean_test, dirty_train, train_labels, test_labels, probs, probs2)\n idx += 1\n return clean_train, dirty_train, clean_test\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate some explanations')\n parser.add_argument('--dataset', '-d', type=str, required=True,help='dataset name')\n parser.add_argument('--output_folder', '-o', type=str, required=True, help='output folder')\n parser.add_argument('--num_features', '-k', type=int, required=True, help='num features')\n parser.add_argument('--num_rounds', '-r', type=int, required=True, help='num rounds')\n parser.add_argument('--start_id', '-i', type=int, default=0,required=False, help='output start id')\n args = parser.parse_args()\n dataset = args.dataset\n train_data, train_labels, test_data, test_labels, class_names = LoadDataset(dataset)\n rho = 25\n kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho ** 2))\n local = explainers.GeneralizedLocalExplainer(kernel, explainers.data_labels_distances_mapping_text, num_samples=15000, return_mean=True, verbose=False, return_mapped=True)\n # Found through cross validation\n sigmas = {'multi_polarity_electronics': {'neighbors': 0.75, 'svm': 10.0, 'tree': 0.5,\n 'logreg': 0.5, 'random_forest': 0.5, 'embforest': 0.75},\n 'multi_polarity_kitchen': {'neighbors': 1.0, 'svm': 6.0, 'tree': 0.75,\n 'logreg': 0.25, 'random_forest': 6.0, 'embforest': 1.0},\n 'multi_polarity_dvd': {'neighbors': 0.5, 'svm': 0.75, 'tree': 8.0, 'logreg':\n 0.75, 'random_forest': 0.5, 'embforest': 5.0}, 'multi_polarity_books':\n {'neighbors': 0.5, 'svm': 7.0, 'tree': 2.0, 'logreg': 1.0, 'random_forest':\n 1.0, 'embforest': 3.0}}\n parzen1 = parzen_windows.ParzenWindowClassifier()\n parzen1.sigma = sigmas[dataset]['random_forest']\n parzen2 = parzen_windows.ParzenWindowClassifier()\n parzen2.sigma = sigmas[dataset]['random_forest']\n random = explainers.RandomExplainer()\n\n for Z in range(args.num_rounds):\n exps1 = {}\n exps2 = {}\n explainer_names = ['lime', 'parzen', 'random', 'greedy', 'mutual']\n for expl in explainer_names:\n exps1[expl] = []\n exps2[expl] = []\n print 'Round', Z\n sys.stdout.flush()\n fake_features_z = [([.1, .2], [.1,.1], 10)]#, ([.2, .1], [.1,.1], 10)]\n clean_train, dirty_train, clean_test = corrupt_dataset(fake_features_z, train_data, train_labels, test_data, test_labels)\n vectorizer = CountVectorizer(lowercase=False, binary=True) \n dirty_train_vectors = vectorizer.fit_transform(dirty_train)\n clean_train_vectors = vectorizer.transform(clean_train)\n test_vectors = vectorizer.transform(clean_test)\n terms = np.array(list(vectorizer.vocabulary_.keys()))\n indices = np.array(list(vectorizer.vocabulary_.values()))\n inverse_vocabulary = terms[np.argsort(indices)]\n tokenizer = vectorizer.build_tokenizer() \n c1 = ensemble.RandomForestClassifier(n_estimators=30, max_depth=5)\n c2 = ensemble.RandomForestClassifier(n_estimators=30, max_depth=5)\n untrustworthy = [i for i, x in enumerate(inverse_vocabulary) if x.startswith('FAKE')]\n train_idx, test_idx = tuple(cross_validation.ShuffleSplit(dirty_train_vectors.shape[0], 1, 0.2))[0]\n train_acc1 = train_acc2 = test_acc1 = test_acc2 = 0\n print 'Trying to find trees:'\n sys.stdout.flush()\n iteration = 0\n found_tree = True\n while np.abs(train_acc1 - train_acc2) > 0.001 or np.abs(test_acc1 - test_acc2) < 0.05: \n iteration += 1\n c1.fit(dirty_train_vectors[train_idx], train_labels[train_idx])\n c2.fit(dirty_train_vectors[train_idx], train_labels[train_idx])\n train_acc1 = accuracy_score(train_labels[test_idx], c1.predict(dirty_train_vectors[test_idx]))\n train_acc2 = accuracy_score(train_labels[test_idx], c2.predict(dirty_train_vectors[test_idx]))\n test_acc1 = accuracy_score(test_labels, c1.predict(test_vectors))\n test_acc2 = accuracy_score(test_labels, c2.predict(test_vectors))\n if iteration == 3000:\n found_tree = False\n break\n if not found_tree:\n print 'skipping iteration', Z\n continue\n print 'done'\n print 'Train acc1:', train_acc1, 'Train acc2:', train_acc2\n print 'Test acc1:', test_acc1, 'Test acc2:', test_acc2\n sys.stdout.flush()\n predictions = c1.predict(dirty_train_vectors)\n predictions2 = c2.predict(dirty_train_vectors)\n predict_probas = c1.predict_proba(dirty_train_vectors)[:,1]\n predict_probas2 = c2.predict_proba(dirty_train_vectors)[:,1]\n cv_preds1 = cross_validation.cross_val_predict(c1, dirty_train_vectors[train_idx], train_labels[train_idx], cv=5)\n cv_preds2 = cross_validation.cross_val_predict(c2, dirty_train_vectors[train_idx], train_labels[train_idx], cv=5)\n parzen1.fit(dirty_train_vectors[train_idx], cv_preds1)\n parzen2.fit(dirty_train_vectors[train_idx], cv_preds2)\n pp = []\n pp2 = []\n true_labels = []\n iteration = 0\n for i in test_idx:\n if iteration % 50 == 0:\n print iteration\n sys.stdout.flush()\n iteration += 1\n pp.append(predict_probas[i])\n pp2.append(predict_probas2[i])\n true_labels.append(train_labels[i])\n exp, mean = local.explain_instance(dirty_train_vectors[i], 1, c1.predict_proba, args.num_features)\n exps1['lime'].append((exp, mean))\n\n exp = parzen1.explain_instance(dirty_train_vectors[i], 1, c1.predict_proba, args.num_features, None) \n mean = parzen1.predict_proba(dirty_train_vectors[i])[1]\n exps1['parzen'].append((exp, mean))\n\n exp = random.explain_instance(dirty_train_vectors[i], 1, None, args.num_features, None)\n exps1['random'].append(exp)\n\n exp = explainers.explain_greedy_martens(dirty_train_vectors[i], predictions[i], c1.predict_proba, args.num_features)\n exps1['greedy'].append(exp)\n\n\n # Classifier 2\n exp, mean = local.explain_instance(dirty_train_vectors[i], 1, c2.predict_proba, args.num_features)\n exps2['lime'].append((exp, mean))\n\n exp = parzen2.explain_instance(dirty_train_vectors[i], 1, c2.predict_proba, args.num_features, None) \n mean = parzen2.predict_proba(dirty_train_vectors[i])[1]\n exps2['parzen'].append((exp, mean))\n\n exp = random.explain_instance(dirty_train_vectors[i], 1, None, args.num_features, None)\n exps2['random'].append(exp)\n\n exp = explainers.explain_greedy_martens(dirty_train_vectors[i], predictions2[i], c2.predict_proba, args.num_features)\n exps2['greedy'].append(exp)\n\n out = {'true_labels' : true_labels, 'untrustworthy' : untrustworthy, 'train_acc1' : train_acc1, 'train_acc2' : train_acc2, 'test_acc1' : test_acc1, 'test_acc2' : test_acc2, 'exps1' : exps1, 'exps2': exps2, 'predict_probas1': pp, 'predict_probas2': pp2}\n pickle.dump(out, open(os.path.join(args.output_folder, 'comparing_%s_%s_%d.pickle' % (dataset, args.num_features, Z + args.start_id)), 'w'))\n\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.8013985753059387,
"alphanum_fraction": 0.8013985753059387,
"avg_line_length": 22.09677505493164,
"blob_id": "f792f53c68b8a8ff5e337df2f37e93f572d887a8",
"content_id": "1e83d75dc39259690f028ddb204c7f1767215a28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 715,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 31,
"path": "/ForecastingStockswithProphet/ForecastStocks.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Prophet for stock forecasting\n\n#%% packages\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime as dt\n\n# REQUIRES Microsoft Visual C++ Build Tools for Prophet\nfrom fbprophet import Prophet\n\nimport statsmodels.api as sm\nfrom scipy import stats\nfrom pandas.core import datetools\n\nfrom plotly import tools\nimport plotly.plotly as py\nimport plotly.figure_factory as ff\nimport plotly.tools as tls\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\ninit_notebook_mode(connected=True)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# plt.style.available\nplt.style.use(\"seaborn-whitegrid\")\n\n# todo...."
},
{
"alpha_fraction": 0.7000244855880737,
"alphanum_fraction": 0.7252263426780701,
"avg_line_length": 22.227272033691406,
"blob_id": "561001c2673aca28e1221d52d58ac093ece3a693",
"content_id": "5fe7cbe1936603b1044f87934626099256e423f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4087,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 176,
"path": "/learning-ml-packages.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# https://www.geeksforgeeks.org/best-python-libraries-for-machine-learning/\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\ninstall(\"numpy\")\n\n# NumPy is a very popular python library for large multi-dimensional array and matrix processing, \n# with the help of a large collection of high-level mathematical functions. \n# It is very useful for fundamental scientific computations in Machine Learning. \n# It is particularly useful for linear algebra, Fourier transform, and random number capabilities. \n# High-end libraries like TensorFlow uses NumPy internally for manipulation of Tensors.\n\nimport numpy as np\n\n# creating two arrays of rank 2\nx = np.array([[1,2],[3,4]])\ny = np.array([[5,6], [7,8]])\n\n# two arrays of rank 1\nv = np.array([9,10])\nw = np.array([11,12])\n\n#inner producct\nprint(np.dot(v,w), \"\\n\")\n\n# matrix and vector product\nprint(np.dot(x,v), \"\\n\")\n\n# matrix and matrix product\nprint(np.dot(x,y))\n\n\n# Scipy for mathematics/optimization/statistics, incl. image manipulation\n# consider using skimage later\ninstall(\"scipy\")\ninstall(\"imageio\")\ninstall(\"visvis\")\n\nfrom scipy.misc import imread, imsave, imresize \nimport imageio\nimport visvis as vv\n\n\n# Read a JPEG image into a numpy array\nimg = imageio.imread('C:\\Stuff\\Important\\CareerNCollege\\Ad Hoc\\Git\\learning-python\\\\fruit.jpg')\n\n# print image\nvv.imshow(img)\n\n# Tinting the image (using R G B notation)\nimg_tint = img * [1, 0.45, 0.3] \n\n# Saving the tinted image \nimageio.imwrite('C:\\Stuff\\Important\\CareerNCollege\\Ad Hoc\\Git\\learning-python\\\\fruit_tinted.jpg', img_tint) \n\n# print tinted image\nvv.imshow(img_tint)\n\n# Resizing the tinted image to be 300 x 300 pixels \nimg_tint_resize = imresize(img_tint, (300, 300)) \n\n# Saving the resized tinted image \nimageio.imwrite('C:\\Stuff\\Important\\CareerNCollege\\Ad Hoc\\Git\\learning-python\\\\fruit_tinted_rezised.jpg', img_tint)\nvv.imshow(img_tint_resize, 2) \n\n\n# scikit-learn\n# classical ml algorithms \n\ninstall(\"sklearn\")\n\n# sample Decision Tree Classifier\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier\n\n# load iris datasets\ndataset = datasets.load_iris()\n\n# fit a CART model to data\nmodel = DecisionTreeClassifier()\nmodel.fit(dataset.data, dataset.target)\nprint(model)\n\n# make predictions\nexpected = dataset.target\npredicted = model.predict(dataset.data)\n\n# summarize the fit of the model\nprint(metrics.classification_report(expected, predicted))\nprint(metrics.confusion_matrix(expected, predicted))\n\n# Theano #\n# mathematical expressions for large datasets\n\ninstall(\"theano\")\n\nimport theano \nimport theano.tensor as T\nx = T.dmatrix('x')\ns = 1 / (1+T.exp(-x))\nlogistic = theano.function([x],s)\nlogistic([[0,1],[-1,-2]])\n\n\n# Tensorflow\n# Google-based high performance computing\n\ninstall(\"tensorflow\")\nimport tensorflow as tf\n\nx1 = tf.constant([1,2,3,4])\nx2 = tf.constant([5,6,7,8])\n\n# Multiply \nresult = tf.multiply(x1,x2)\n\n# Initialize session\nsess = tf.Session()\n\nprint(sess.run(result))\n\n# Close session\nsess.close()\n\n# Keras\n# ML library on top of others\n\n# Pytorch\n# Computer vision or NLP\n# Example for 2 layer network \n\n# see https://pytorch.org/get-started/locally/#anaconda-1\nimport torch\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") Uncomment to run on GPU\n\n## TODO later\n\n\n# Pandas \n# for data analysis\n\ninstall(\"pandas\")\nimport pandas as pd \n\n# to make it dictionary\ndata = {\"country\": [\"Brazil\", \"Russia\", \"India\", \"China\", \"South Africa\"], \n \"capital\": [\"Brasilia\", \"Moscow\", \"New Dehli\", \"Beijing\", \"Pretoria\"], \n \"area\": [8.516, 17.10, 3.286, 9.597, 1.221], \n \"population\": [200.4, 143.5, 1252, 1357, 52.98] } \n\ndata_table = pd.DataFrame(data) # to make it into pandas dataframe\n\nprint(data_table)\n\n# Matplotlib\n# for linear plot \n\ninstall(\"matplotlib\")\ninstall(\"numpy\")\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(0,10,100)\n\nplt.plot(x,x,label='linear')\nplt.show(block = False) #show without stopping code\nplt.legend()"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 27,
"blob_id": "c29c24c5c30111352bb246e968ce55196f5423d9",
"content_id": "d1c8ebbd6414a6581421aa62040bb50760662235",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 3,
"path": "/PracticalBusinessPython-MonteCarlo.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Practical business python - monte carlo\n# https://pbpython.com/monte-carlo.html\n# "
},
{
"alpha_fraction": 0.6414901614189148,
"alphanum_fraction": 0.6769583821296692,
"avg_line_length": 29.625,
"blob_id": "756bd60c06cb4c3e9524931e594cb8c83c73031b",
"content_id": "ece085c3efe4279ab76007226bc8d1b863ef454f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3919,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 128,
"path": "/CohortAnalysisWithPython.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCohort analysis with python \nhttp://www.gregreda.com/2015/08/23/cohort-analysis-with-python/\n\"\"\"\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\ninstall(\"pandas\")\ninstall(\"numpy\")\ninstall(\"matplotlib\")\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\npd.set_option('max_columns', 50)\nmpl.rcParams['lines.linewidth'] = 2\n\n#%matplotlib inline\n\n# import data\ndf = pd.read_excel('chapter-12-relay-foods.xlsx', sheet_name=\"Pilot Study Data\")\ndf.head()\n\n# 1 create period column based on order date\ndf['OrderPeriod'] = df.OrderDate.apply(lambda x : x.strftime('%Y-%m'))\ndf.head()\n\n# 2 determine user's cohort group\ndf.set_index('UserId', inplace = True)\ntype(df)\ndf['CohortGroup'] = df.groupby(level=0)['OrderDate'].min().apply(lambda x : x.strftime('%Y-%m'))\ndf.reset_index(inplace=True)\ndf.head()\n\n# 3 Rollup data by CohortGroup & OrderPeriod\ngrouped = df.groupby(['CohortGroup', 'OrderPeriod'])\n\n# count the unique users, orders, and total revenue per (group+period)\ncohorts = grouped.agg({'UserId': pd.Series.nunique,\n 'Order Id': pd.Series.nunique,\n 'Total Charges': np.sum})\n\n# make column more meaningful\ncohorts.rename(columns={'UserId' : 'TotalUsers',\n 'Order Id' : 'TotalOrders'}, inplace = True)\ncohorts.head()\n\n# function for time\ndef cohort_period(df):\n \"\"\"\n Creates a `CohortPeriod` column, which is the Nth period based on the user's first purchase.\n \n Example\n -------\n Say you want to get the 3rd month for every user:\n df.sort(['UserId', 'OrderTime', inplace=True)\n df = df.groupby('UserId').apply(cohort_period)\n df[df.CohortPeriod == 3]\n \"\"\"\n df['CohortPeriod'] = np.arange(len(df)) + 1\n return df\n\ncohorts = cohorts.groupby(level=0).apply(cohort_period)\ncohorts.head()\n\n# 5 Make sure we did that right\nx = df[(df.CohortGroup == '2009-01') & (df.OrderPeriod == '2009-01')]\ny = cohorts.ix[('2009-01', '2009-01')] #uses labels for new data\n\n# shows nothing if true, these are checks\nassert(x['UserId'].nunique() == y['TotalUsers']) \nassert(x['Total Charges'].sum().round(2) == y['Total Charges'].round(2))\nassert(x['Order Id'].nunique() == y['TotalOrders'])\n\nx = df[(df.CohortGroup == '2009-01') & (df.OrderPeriod == '2009-09')]\ny = cohorts.ix[('2009-01', '2009-09')]\n\nassert(x['UserId'].nunique() == y['TotalUsers'])\nassert(x['Total Charges'].sum().round(2) == y['Total Charges'].round(2))\nassert(x['Order Id'].nunique() == y['TotalOrders'])\n\nx = df[(df.CohortGroup == '2009-05') & (df.OrderPeriod == '2009-09')]\ny = cohorts.ix[('2009-05', '2009-09')]\n\nassert(x['UserId'].nunique() == y['TotalUsers'])\nassert(x['Total Charges'].sum().round(2) == y['Total Charges'].round(2))\nassert(x['Order Id'].nunique() == y['TotalOrders'])\n\n# user retention by Cohort Group\n\n# reindex the DataFrame\ncohorts.reset_index(inplace=True)\ncohorts.set_index(['CohortGroup','CohortPeriod'], inplace=True)\n\ncohort_group_size = cohorts['TotalUsers'].groupby(level=0).first()\ncohort_group_size.head()\n\ncohorts['TotalUsers'].head()\n\n# unstack, like pivoting\ncohorts['TotalUsers'].unstack(0).head()\n\n# utilize broadcasting to divide each column by corresponding cohort_group_size\nuser_retention = cohorts['TotalUsers'].unstack(0).divide(cohort_group_size, axis=1)\nuser_retention.head(10)\n\n# see plots\nuser_retention[['2009-02', '2009-03', '2009-04']].plot(figsize=(10,5))\nplt.title('Cohorts: User Retention')\nplt.xticks( np.arange(1,21.1,1))\nplt.xlim(1,20)\nplt.ylabel('% of cohort purchasing')\nplt.show(block=False)\n\n#for heatmaps\ninstall(\"seaborn\")\nimport seaborn as sns\n\nplt.figure(figsize=(12,8))\nplt.title('Cohort: User Retention')\nsns.heatmap(user_retention.T, mask=user_retention.T.isnull(),annot=True, fmt='.0%') #.T is tranposed\nplt.show(block=False)"
},
{
"alpha_fraction": 0.6980592012405396,
"alphanum_fraction": 0.7117403745651245,
"avg_line_length": 26.09482765197754,
"blob_id": "e2700ee9977bde0183162db5183a7c66bab0acc9",
"content_id": "d4d904968b3a529c69510055fe81c1927e6ec459",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3143,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 116,
"path": "/python-machine-learning-tutorial-scikit-learn.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# https://elitedatascience.com/python-machine-learning-tutorial-scikit-learn\n\n# run from command line if missing\n# pip install \"sklearn\" # for example\n\n# %% import packages\nimport sklearn;\nprint(sklearn.__version__)\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\n# random forest models\nfrom sklearn.ensemble import RandomForestRegressor\n\n# cross validation\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import GridSearchCV\n\n# evaluation metrics\nfrom sklearn.metrics import mean_squared_error,r2_score\n\n# persist model for future use\nimport joblib #used to be from sklearn\n\n# %% load data\ndataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'\ndata = pd.read_csv(dataset_url)\n\nprint(data.head()) # shows issue bc csv uses ;\n\ndata = pd.read_csv(dataset_url, \";\")\n\nprint(data.head()) # fixed\n\n# understand data\nprint(data.shape)\nprint(data.describe())\n\ny = data.quality\nx = data.drop('quality',axis=1)\n\nX_train, X_test, Y_train, Y_test = train_test_split(x,y,test_size = 0.2,\n random_state=123,\n stratify=y)\n\n# %% Need to transform data for fitting so have mean 0 & std 1\n\n# lazy way of scaling data (not used)\nX_train_scaled_lazy = preprocessing.scale(X_train)\nprint(X_train_scaled_lazy)\n# check dataset scaling\nprint(X_train_scaled_lazy.mean(axis=0))\nprint(X_train_scaled_lazy.std(axis=0))\n\n# instead, use transformer API\nscaler = preprocessing.StandardScaler().fit(X_train)\nX_train_scaled = scaler.transform(X_train)\nprint(X_train_scaled.mean(axis=0))\nprint(X_train_scaled.std(axis=0))\n\n# applying transformer\nX_test_scaled = scaler.transform(X_test)\nprint(X_test_scaled.mean(axis=0))\nprint(X_test_scaled.std(axis=0))\n# should be not mean 0, std 1, as using scaling from train\n\n# but in process, don't even need above, just need to use the scaling object\npipeline = make_pipeline(preprocessing.StandardScaler(),\n RandomForestRegressor(n_estimators=100))\n\n# %% step 6 tune hyper parameters\n# hyper parameters cannot be trained by model itself\n# for random forest, can use mean squared error or mean abs error\nprint(pipeline.get_params())\n\n# python dictionary\nhyperparameters = {'randomforestregressor__max_features': ['auto', 'sqrt', 'log2'],\n 'randomforestregressor__max_depth': [None, 5,3,1]}\n\n# %% 7 tune model using cross validation\nclf = GridSearchCV(pipeline, hyperparameters, cv=10)\n\n# fit & tune model\nclf.fit(X_train,Y_train)\n\n# see best params\nprint(clf.best_params_)\n\n# %% 8 refit on training set\n# is done by default by GridSearch\nprint(clf.refit)\n\n# %% 9 eval model pipeline on test data\nY_pred = clf.predict(X_test)\n\nprint(r2_score(Y_test,Y_pred))\n# 0.4686...\n\nprint(mean_squared_error(Y_test,Y_pred))\n# 0.35\n\n# but is model good enough?\n\n# %% 10 export model\njoblib.dump(clf, 'rf_regressor.pkl')\n\n#repull, can type %reset in ipython interpreter\nimport joblib\nclf2 = joblib.load('rf_regressor.pkl')\n\n#predict\nclf2.predict(X_test)\n"
},
{
"alpha_fraction": 0.6211395263671875,
"alphanum_fraction": 0.6384451389312744,
"avg_line_length": 37.523075103759766,
"blob_id": "6b8d55da8f6413704c5c79776f9b3f028d554f1b",
"content_id": "54a09a6c4cbfe62892178e5751537e29a8c99310",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7512,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 195,
"path": "/lime-experiments-master/compare_classifiers.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "import sys\nimport copy\nsys.path.append('..')\nimport time\nimport numpy as np\nimport scipy as sp\nimport scipy.stats\nimport sklearn\nimport xgboost\nimport xgboost.sklearn\nimport explainers\nfrom load_datasets import *\nimport glob\nimport argparse\nimport collections\nfrom sklearn import ensemble, cross_validation\nimport pickle\nimport parzen_windows\n\ndef submodular_fn(explanations, feature_value):\n \"\"\"TODO: Detail this\"\"\"\n z_words = set()\n for exp in explanations:\n z_words = z_words.union([x[0] for x in exp])\n normalizer = sum([feature_value[w] for w in z_words])\n def fnz(x):\n all_words = set()\n for doc in x:\n all_words = all_words.union([x[0] for x in explanations[doc]])\n return sum([feature_value[w] for w in all_words]) / normalizer\n fnz.num_items = len(explanations)\n return fnz\ndef greedy(submodular_fn, k, chosen=[]):\n chosen = copy.deepcopy(chosen)\n all_items = range(submodular_fn.num_items)\n current_value = 0\n while len(chosen) != k:\n best_gain = 0\n best_item = all_items[0]\n for i in all_items:\n gain= submodular_fn(chosen + [i]) - current_value\n if gain > best_gain:\n best_gain = gain\n best_item = i\n chosen.append(best_item)\n all_items.remove(best_item)\n current_value += best_gain \n return chosen\n# A pick function takes in the whole map, returns two lists of tuples with instance\n# ids and weights, one for each classifier. This won't work later if I want it to be interactive.\ndef submodular_pick(pickled_map, explainer, B, use_explanation_weights=False,\nalternate=False):\n def get_function(exps):\n feature_value = collections.defaultdict(float)\n for exp in exps:\n for f, v in exp:\n if not use_explanation_weights:\n v = 1\n feature_value[f] += np.abs(v)\n for f in feature_value:\n feature_value[f] = np.sqrt(feature_value[f])\n submodular = submodular_fn(exps, feature_value)\n return submodular\n out = greedy(submodular, B)\n return out\n if explainer in ['parzen', 'lime']:\n exps1 = [x[0] for x in pickled_map['exps1'][explainer]]\n exps2 = [x[0] for x in pickled_map['exps2'][explainer]]\n else:\n exps1 = pickled_map['exps1'][explainer]\n exps2 = pickled_map['exps2'][explainer]\n fn1 = get_function(exps1)\n fn2 = get_function(exps2)\n if not alternate:\n return greedy(fn1, B), greedy(fn2, B)\n else:\n ret = []\n for i in range(B):\n fn = fn1 if i % 2 == 0 else fn2\n ret = greedy(fn, i + 1, ret)\n return ret \n \n #return get_list(exps1), get_list(exps2)\n\ndef all_pick(pickled_map, explainer, B):\n list_ = range(len(pickled_map['exps1'][explainer]))\n return list_, list_\n\ndef random_pick(pickled_map, explainer, B):\n list_ = np.random.choice(range(len(pickled_map['exps1'][explainer])), B, replace=False)\n return list_, list_\n\ndef find_untrustworthy(explainer, exps, instances, untrustworthy):\n found = set()\n for i in instances:\n if explainer in ['lime', 'parzen']:\n exp, mean = exps[i]\n else:\n exp = exps[i]\n found = found.union([x[0] for x in exp if x[0] in untrustworthy])\n return found\n\ndef tally_mistrust(explainer, exps, predict_probas, untrustworthy):\n trust_fn = lambda prev, curr: (prev > 0.5 and curr > 0.5) or (prev <= 0.5 and curr <= 0.5)\n trust_fn_all = lambda exp, unt: len([x[0] for x in exp if x[0] in unt]) == 0\n mistrust = 0\n for i in range(len(exps)):\n if explainer in ['lime', 'parzen']:\n exp, mean = exps[i]\n if explainer == 'lime':\n prev_tot = sum([x[1] for x in exp]) + mean\n elif explainer == 'parzen':\n prev_tot = mean\n tot = prev_tot - sum([x[1] for x in exp if x[0] in untrustworthy])\n if not trust_fn(tot, prev_tot):\n mistrust += 1\n else:\n exp = exps[i]\n if not trust_fn_all(exp, untrustworthy):\n mistrust += 1\n return mistrust \n\n\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate some explanations')\n parser.add_argument('--dataset', '-d', type=str, required=True,help='dataset name')\n parser.add_argument('--output_folder', '-o', type=str, required=True, help='output folder')\n parser.add_argument('--num_features', '-k', type=int, required=True, help='num features')\n parser.add_argument('--pick', '-p', type=str, required=False, default='all', help='all, submodular, submodular2 or random')\n parser.add_argument('--num_instances', '-n', type=int, required=False, default=1, help='number of instances to look at')\n parser.add_argument('--num_rounds', '-r', type=int, required=False, default=10, help='num rounds')\n #parser.add_argument('--start_id', '-i', type=int, required=True, help='output start id')\n args = parser.parse_args()\n dataset = args.dataset\n got_right = lambda test1, test2, mistrust1, mistrust2: mistrust1 < mistrust2 if test1 > test2 else mistrust1 > mistrust2\n names = ['lime', 'parzen', 'random', 'greedy']\n num_exps = 0\n B = args.num_instances\n rounds = 1\n if args.pick == 'all':\n pick_function = all_pick\n elif args.pick == 'submodular':\n pick_function = lambda a,b,c : submodular_pick(a,b,c, use_explanation_weights=True)\n elif args.pick == 'random':\n pick_function = random_pick\n rounds =args.num_rounds\n accuracy = collections.defaultdict(lambda: [])\n right = collections.defaultdict(lambda: [])\n for r in range(rounds):\n right = collections.defaultdict(lambda: [])\n for filez in glob.glob(os.path.join(args.output_folder, 'comparing_%s*' % args.dataset))[:800]:\n num_exps += 1\n pickled_map = pickle.load(open(filez))\n predict_probas = pickled_map['predict_probas1']\n predict_probas2 = pickled_map['predict_probas2']\n test1 = pickled_map['test_acc1']\n test2 = pickled_map['test_acc2']\n untrustworthy = pickled_map['untrustworthy']\n for explainer in names:\n if explainer.startswith('lime'):\n pick1, pick2 = pick_function(pickled_map, 'lime', B)\n exps1 = pickled_map['exps1']['lime']\n exps2 = pickled_map['exps2']['lime']\n elif explainer.startswith('parzen'):\n pick1, pick2 = pick_function(pickled_map, 'parzen', B)\n exps1 = pickled_map['exps1']['parzen']\n exps2 = pickled_map['exps2']['parzen']\n else:\n pick1, pick2 = pick_function(pickled_map, explainer, B)\n exps1 = pickled_map['exps1'][explainer]\n exps2 = pickled_map['exps2'][explainer]\n if args.pick != 'all':\n unt1 = find_untrustworthy(explainer, exps1, pick1, untrustworthy)\n unt2 = find_untrustworthy(explainer, exps2, pick2, untrustworthy)\n else:\n unt1 = unt2 = untrustworthy\n mistrust1 = tally_mistrust(explainer, exps1, predict_probas, unt1)\n mistrust2 = tally_mistrust(explainer, exps2, predict_probas2, unt2)\n while mistrust1 == mistrust2:\n mistrust1 = np.random.randint(0,10) \n mistrust2 = np.random.randint(0,10)\n #print explainer, mistrust1, mistrust2\n right[explainer].append(int(got_right(test1, test2, mistrust1, mistrust2)))\n right['random_choice'].append(int(got_right(test1, test2, np.random.random(), np.random.random())))\n #print [(x[0], sum(x[1])) for x in right.iteritems()]\n #print filez\n for name in right:\n accuracy[name].append(np.mean(right[name]))\n print 'Mean accuracy:'\n for name in right:\n print name, np.mean(accuracy[name])\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.690986156463623,
"alphanum_fraction": 0.718932569026947,
"avg_line_length": 38.86758041381836,
"blob_id": "67a09a968bd01cefdc209faad9844b879de3ca22",
"content_id": "a6a5dedbe17697d4027e471abfa9e24239f5513d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8741,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 219,
"path": "/GLMwithH2O.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Learning H2O\n# https://h2o-release.s3.amazonaws.com/h2o/rel-ueno/2/docs-website/h2o-docs/booklets/GLMBooklet.pdf\n# http://h2o-release.s3.amazonaws.com/h2o/rel-xia/4/docs-website/h2o-docs/booklets/GLMBooklet.pdf\n\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n# install packages\ninstall(\"H2O\")\n\nimport h2o\n\n# Start H2O on local machine\nh2o.init()\n\n# Get help\n# help(h2o.estimators.glm.H2OGeneralizedLinearEstimator)\n# help(h2o.estimators.gbm.H2OGradientBoostingEstimator)\n# help(h2o.estimators.deeplearning.H2ODeepLearningEstimator)\n\n# Show a demo\n# h2o.demo(\"glm\")\n# h2o.demo(\"gbm\")\n# h2o.demo(\"deeplearning\")\n\nh2o.init(ip = \"123.45.67.89\", port = 54321)\n\n# linear regression \nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\nh2o.init()\n\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\ngaussian_fit = H2OGeneralizedLinearEstimator(family = \"gaussian\")\ngaussian_fit.train(y = \"VOL\", x = [\"AGE\", \"RACE\", \"PSA\", \"GLEASON\"],training_frame = h2o_df)\n\n# logistic regression\nbinomial_fit = H2OGeneralizedLinearEstimator(family = \"binomial\")\nbinomial_fit.train(y = \"CAPSULE\", x = [\"AGE\", \"RACE\", \"PSA\", \"GLEASON\"], training_frame = h2o_df)\n\n# multinomial (by column number)\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\nmultinomial_fit = H2OGeneralizedLinearEstimator(family = \"multinomial\")\nmultinomial_fit.train(y = 4, x = [0,1,2,3], training_frame = h2o_df)\n\n# Poisson models\n# use swedish insurance data\nh2o_df = h2o.import_file(\n \"http://h2o-public-test-data.s3.amazonaws.com/smalldata/glm_test/Motor_insurance_sweden.txt\", sep = '\\t')\npoisson_fit = H2OGeneralizedLinearEstimator(family = \"poisson\")\npoisson_fit.train(y=\"Claims\", x= [\"Payment\", \"Insured\", \"Kilometres\", \"Zone\", \"Bonus\", \"Make\"], training_frame = h2o_df)\npoisson_fit.coef()\n\n# Gamma models\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\ngamma_inverse = H2OGeneralizedLinearEstimator(family =\"gamma\", link = \"inverse\")\ngammma_inverse.train(y = \"DPROS\", x = [\"AGE\", \"RACE\", \"CAPSULE\", \"DCAPS\", \"PSA\", \"VOL\"], training_frame = h2o_df)\n\ngamma_log = H2OGeneralizedLinearEstimator(family = \"gamma\", link = \"log\")\ngamma_log.train(y=\"DPROS\", x= [\"AGE\", \"RACE\", \"CAPSULE\", \"DCAPS\", \"PSA\", \"VOL\"], training_frame = h2o_df)\n\n# Tweedie \n# p= 0: Normal\n# p= 1: Poisson \n# p∈(1,2): Compound Poisson, non-negative with mass at zero\n# p= 2: Gamma\n# p= 3: Inverse-Gaussian\n# p >2: Stable, with support on the positive reals\n\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/glm_test/auto.csv\")\ntweedie_fit = H2OGeneralizedLinearEstimator(family = \"tweedie\")\ntweedie_fit.train(y = \"y\", x = h2o_df.col_names[1:], training_frame=h2o_df)\n\n# Building GLM models\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\nh2o_df[\"CAPSULE\"] = h2o_df[\"CAPSULE\"].asfactor()\nh2o_df.summary()\n\n# choosing model\n# L-BGFS for larger # of predictors\n# IRLSM fewer predictors\n\n\n# stopping criteria\nh2o_df = h2o.import_file(\"http://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip\")\n\n# stops the model when we reach 10 predictors \nmodel = H2OGeneralizedLinearEstimator(family = \"binomial\", lambda_search = True, max_active_predictors = 10)\nmodel.train(y = \"IsDepDelayed\", x = [\"Year\", \"Origin\"] , training_frame = h2o_df)\nprint(model)\n\n# k-fold validation\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\n# h2o.export_file(h2o_df, \"test.csv\")\nh2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()\nbinomial_fit = H2OGeneralizedLinearEstimator(family = \"binomial\" , nfolds = 5, fold_assignment = \"Random\")\nbinomial_fit.train(y = \"CAPSULE\", x = [\"AGE\", \"RACE\", \"PSA\", \"GLEASON\"], training_frame = h2o_df)\nprint (\"training auc: \") , binomial_fit.auc(train=True)\nprint (\"cross-validation auc: \"), binomial_fit.auc(xval=True)\n\n# grid search over alphas (to weight between lasso 1 & ridge 2)\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\nh2o_df[’CAPSULE’] = h2o_df[’CAPSULE’].asfactor()\nalpha_opts = [0.0, 0.25, 0.5, 1.0]\nhyper_parameters = {\"alpha\": alpha_opts}\n\n# import grid search\nfrom h2o.grid.grid_search import H2OGridSearch\n\ngrid = H2OGridSearch(H2OGeneralizedLinearEstimator(family=\"binomial\"),hyper_params = hyper_parameters)\ngrid.train(y=\"CAPSULE\", x = [\"AGE\", \"RACE\", \"PSA\", \"GLEASON\"], training_frame = h2o_df)\nfor m in grid:\n print(\"Model ID:\" + m.model_id + \" auc: \" , m.auc())\n print(m.summary())\n print(\"\\n\\n\")\n\n# grid search over lambda (for regularization)\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\nh2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()\nlambda_opts = [1, 0.5, 0.1, 0.01, 0.001, 0.0001, 0.00001, 0]\nhyper_parameters = {\"lambda:\": lambda_opts}\n\ngrid = H2OGridSearch(H2OGeneralizedLinearEstimator(family=\"binomial\"), hyper_params = hyper_parameters)\ngrid.train(y = \"CAPSULE\", x = [\"AGE\", \"RACE\", \"PSA\", \"GLEASON\"], training_frame = h2o_df)\nfor m in grid:\n print(\"Model ID:\" + m.model_id + \" auc: \" , m.auc())\n print(m.summary())\n print(\"\\n\\n\")\n\n# GLM output logistic\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\nh2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()\n\n#make train & validation sets\nr = h2o_df[0].runif(seed=1234)\ntrain = h2o_df[r <= 0.8]\nvalid = h2o_df[r > 0.8]\nbinomial_fit = H2OGeneralizedLinearEstimator(family = 'binomial')\nbinomial_fit.train(y = \"CAPSULE\", x = [\"AGE\", \"RACE\",\"PSA\", \"GLEASON\"], training_frame = train, validation_frame = valid)\nprint(binomial_fit)\n\n\n# coefficients\nbinomial_fit.pprint_coef()\nsorted(binomial_fit.coef_norm().items(), key=lambda x:x[1], reverse=True)\n\n# model statistics\nbinomial_fit.summary()\nbinomial_fit._model_json[\"output\"][\"model_summary\"].__getitem__('number_of_iterations')\nbinomial_fit.null_degrees_of_freedom(train=True, valid=True)\nbinomial_fit.residual_degrees_of_freedom(train=True, valid=True)\nbinomial_fit.mse(train=True, valid=True)\nbinomial_fit.r2(train=True, valid=True)\nbinomial_fit.logloss(train=True, valid=True)\nbinomial_fit.auc(train=True, valid=True)\n# binomial_fit.giniCoef(train=True, valid=True) #doesn't work\nbinomial_fit.null_deviance(train=True, valid=True)\nbinomial_fit.aic(train=True, valid=True)\n\n# confusion matrix\nbinomial_fit.confusion_matrix(valid = False)\nbinomial_fit.confusion_matrix(valid = True)\n\n# scoring\nbinomial_fit.scoring_history\n\n# making predictions\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\nh2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()\n\nrand_vec = h2o_df.runif(1234)\n\ntrain = h2o_df[rand_vec <= 0.8]\nvalid = h2o_df[(rand_vec > 0.8) & (rand_vec <= 0.9)]\ntest = h2o_df[rand_vec > 0.9]\n\nbinomial_fit = H2OGeneralizedLinearEstimator(family = \"binomial\")\nbinomial_fit.train(y=\"CAPSULE\", x= [\"AGE\", \"RACE\", \"PSA\", \"GLEASON\"], training_frame = train, \n validation_frame = valid)\n\n#make & export predictions\npred = binomial_fit.predict(test)\nh2o.export_file(pred, \"pred.csv\", force=True)\n# or you can export prediction to hdfs:\n# h2o.exportFile(pred, \"hdfs://namenode/path/to/file.csv\")\n\n# calculate metrics\nbinomial_fit.model_performance(test)\n\n# remove response column inorder to test\n# use threshold for max than f1\nnewdata = test\nnewdata['CAPSULE'] = None\nnewpred = binomial_fit.predict(newdata)\nnewpred\n\n# manually define threshold for predictions to 0.3\nimport pandas as pd\npred = binomial_fit.predict(h2o_df)\npred['predict'] = pred['p1'] > 0.3\n\n# POJO visualization object\nh2o_df = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv\")\nh2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()\nbinomial_fit = H2OGeneralizedLinearEstimator(family =\"binomial\")\nbinomial_fit.train(y = \"CAPSULE\", x = [\"AGE\", \"RACE\",\"PSA\", \"GLEASON\"], training_frame = h2o_df)\nh2o.download_pojo(binomial_fit)\n\n# Verifying model results\nh2o_df = h2o.import_file(\"http://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip\")\nmodel = H2OGeneralizedLinearEstimator(family = \"binomial\", nfolds = 5)\nmodel.train(y = \"IsDepDelayed\", x = [\"Year\", \"Origin\"], training_frame=h2o_df)\nprint(\"full model training auc:\", model.auc())\nprint(\"full model validation auc:\", model.auc(xval=True))\nfor model_ in model.get_xval_models():\n print(model_.model_id, \"training auc:\", model_.auc(), \"validation auc:\", model_.auc(valid=True))\n"
},
{
"alpha_fraction": 0.6432432532310486,
"alphanum_fraction": 0.6684684753417969,
"avg_line_length": 16.90322494506836,
"blob_id": "4ae14b6ca2568f900593428e0d3a46d7ad417091",
"content_id": "0349489655028d5bc968986a02ef550255e6beb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 555,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 31,
"path": "/fedexwebscrape/fedex.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "\n# %%\n## imports\n \nimport requests\n\n# %%\n\norig_zip = '30097'\ndest_zip = '11217'\ncountry = 'us'\n\nrequest_str = f'http://www.fedex.com/ratetools/RateToolsMain.do?method=FindZones&origPostalCd={orig_zip}&destCountryCd=us&destPostalCd={dest_zip}&destCountryCd={country}'\n\nr = requests.get(request_str)\n\n\n# %%\n\nimport re\nfinder = re.findall(r'expressZone = \"\\d\"', r.text)\nexpressZone = int(finder[0].split('\"')[1])\nprint(expressZone)\n\n# %%\n# \nfinder = re.findall(r'groundZone = \"\\d\"', r.text)\ngroundZone = int(finder[0].split('\"')[1])\nprint(groundZone)\n\n# %%\n#"
},
{
"alpha_fraction": 0.6076642274856567,
"alphanum_fraction": 0.6238269209861755,
"avg_line_length": 26.80434799194336,
"blob_id": "398802f13801d1258fc01cd1bc870a2cf0ff2a29",
"content_id": "7a302aaaa5cc0b0f941f9dfb9a5ee21a52aad2d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3836,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 138,
"path": "/TensorFlow2TextClass.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Text Classification\n# https://www.tensorflow.org/tutorials/keras/basic_text_classification\n\nfrom __future__ import absolute_import, division, print_function\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n# check for install\ninstall(\"tensorflow\")\ninstall(\"numpy\")\ninstall(\"matplotlib\")\n\n# import relevant packages\nimport tensorflow as tf \nfrom tensorflow import keras \nimport numpy as np\n\nprint(\"tensorflow version: \" + tf.__version__)\n\n# Download dataset\nimdb = keras.datasets.imdb \n(train_data, train_labels) , (test_data, test_labels) = imdb.load_data(num_words=10000)\n\n# Explore the data\nprint(\"Training entries: {}, labels: {}\".format(len(train_data),len(train_labels)))\nprint(train_data[0])\n\nlen(train_data[0]),len(train_data[1])\n\n# Convert integers back to words \nword_index = imdb.get_word_index()\n\n# The first indices\nword_index = {k:(v+3) for k, v in word_index.items()}\nword_index[\"<PAD>\"] = 0\nword_index[\"<START>\"] = 1\nword_index[\"<UNK>\"] = 2 # unknown\nword_index[\"<UNUSED>\"] = 3\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i,'?') for i in text])\n\n# show first review in words\ndecode_review(train_data[0])\n\n# Prepare data\ntrain_data = keras.preprocessing.sequence.pad_sequences(train_data,\n value=word_index[\"<PAD>\"],\n padding = 'post',\n maxlen=256)\ntest_data = keras.preprocessing.sequence.pad_sequences(test_data,\n value=word_index[\"<PAD>\"],\n padding = 'post',\n maxlen=256)\n\nlen(train_data[0]), len(train_data[1])\nprint(train_data[0])\n\n# Build the model\nvocab_size = 10000\n\nmodel = keras.Sequential()\nmodel.add(keras.layers.Embedding(vocab_size,16))\nmodel.add(keras.layers.GlobalAveragePooling1D())\nmodel.add(keras.layers.Dense(16,activation=tf.nn.relu))\nmodel.add(keras.layers.Dense(1,activation=tf.nn.sigmoid))\n\nmodel.summary()\n\n# Add loss function to model\nmodel.compile(\n optimizer = 'adam',\n loss = 'binary_crossentropy', \n metrics=['acc']\n )\n\n# Create a validation set to finetune model\nx_val = train_data[:10000]\npartial_x_train = train_data[10000:]\n\ny_val = train_labels[:10000]\npartial_y_train = train_labels[10000:]\n\n# Train model\nhistory = model.fit(partial_x_train,\n partial_y_train,\n epochs = 40,\n batch_size = 512,\n validation_data = (x_val,y_val),\n verbose=1\n )\n\n# Evaluate model\nresults = model.evaluate(test_data, test_labels)\n\nprint(results)\n\n# Graph of accuracy and history over time for training\n# import plotting package\nimport matplotlib.pyplot as plt \n\nhistory_dict = history.history\nhistory_dict.keys()\n\nacc = history_dict['acc']\nval_acc = history_dict['val_acc']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1,len(acc)+1)\n\n# \"bo\" is for blue dot\nplt.plot(epochs, loss, 'bo', label = 'Training Loss')\n# b is for solid blue line\nplt.plot(epochs, val_loss, 'b', label = 'Validation Loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show(block=False)\n\nplt.clf() #clear figure\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.legend()\n\nplt.show(block=False)"
},
{
"alpha_fraction": 0.676417887210846,
"alphanum_fraction": 0.6895522475242615,
"avg_line_length": 25.866310119628906,
"blob_id": "c493ae6970f8db1466270999777eef22b3722a21",
"content_id": "3d36d59799c0a71040797a6482fc09599f3aadb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5025,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 187,
"path": "/TensorFlow3Regression.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Regression TensorFlow\n# https://www.tensorflow.org/tutorials/keras/basic_regression\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n# use seaborn for pairplot\n# install packages\ninstall(\"tensorflow\")\ninstall(\"pathlib\")\ninstall(\"matplotlib\")\ninstall(\"pandas\")\ninstall(\"seaborn\")\n\n#import packages\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport pathlib\nimport matplotlib.pyplot as plt \nimport pandas as pd \nimport seaborn as sns \n\nimport tensorflow as tf \nfrom tensorflow import keras \nfrom tensorflow.keras import layers \n\nprint(tf.__version__)\n\n# get data\n# other sets https://archive.ics.uci.edu/ml/datasets.php\ndataset_path = keras.utils.get_file(\"auto-mpg.data\", \"http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data\")\ndataset_path\n\ncolumn_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year', 'Origin']\nraw_dataset = pd.read_csv(dataset_path, names=column_names, \n na_values=\"?\", comment = '\\t' , sep = \" \", skipinitialspace=True)\ndataset = raw_dataset.copy()\ndataset.tail()\ndataset.head()\n\ndataset.isna().sum()\n\ndataset = dataset.dropna()\n\n#convert origin to multiple binary variables\norigin = dataset.pop('Origin')\n\ndataset['USA'] = (origin == 1)*1.0\ndataset['Europe'] = (origin == 2)*1.0\ndataset['Japan'] = (origin == 3)*1.0\ndataset.tail()\n\n# split into train and test\ntrain_dataset = dataset.sample(frac=0.8, random_state = 0)\ntest_dataset = dataset.drop(train_dataset.index)\n\n# inspect data\nsns.pairplot(train_dataset[[\"MPG\", \"Cylinders\", \"Displacement\", \"Weight\"]], diag_kind=\"kde\")\nplt.show(block = False)\n\n# overall statistics\ntrain_stats = train_dataset.describe()\ntrain_stats.pop(\"MPG\")\ntrain_stats = train_stats.transpose()\ntrain_stats\ntype(train_stats)\n\n# split features from labels\ntrain_labels = train_dataset.pop(\"MPG\")\ntest_labels = test_dataset.pop(\"MPG\")\n\n# normalize data\ndef norm(x):\n return (x - train_stats['mean']) / train_stats['std']\nnormed_train_data = norm(train_dataset)\nnormed_test_data = norm(test_dataset)\n\n# the model\ndef build_model():\n model = keras.Sequential([\n layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n layers.Dense(64, activation=tf.nn.relu),\n layers.Dense(1)\n ])\n\n optimizer = tf.keras.optimizers.RMSprop(0.001)\n\n model.compile(\n loss='mean_squared_error',\n optimizer=optimizer, \n metrics=['mean_absolute_error', 'mean_squared_error'] \n )\n\n return model\n\nmodel = build_model()\n\n# inspect model\nmodel.summary()\n\nexample_batch = normed_train_data[:10]\nexample_result = model.predict(example_batch)\nexample_result\n\n# train model\n\n# Display training progress by printing a single dot for each completed epoch\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch , logs):\n if epoch % 100 == 0: print('')\n print('.', end='.')\n\nEPOCHS = 1000\n\nhistory = model.fit(\n normed_train_data, train_labels,\n epochs=EPOCHS, validation_split = 0.2, verbose = 0,\n callbacks=[PrintDot()]\n)\n\n# visualize progress\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [MPG]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'], label ='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label ='Val Error')\n plt.ylim([0,5])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$MPG^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n plt.ylim([0,20])\n plt.legend()\n plt.show(block=False)\n\nplot_history(history)\n\n# early stopping\nmodel = build_model()\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\nhistory = model.fit(normed_train_data, train_labels, epochs=EPOCHS,\n validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\nplot_history(history)\n\n# test set\nloss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)\nprint(\"Test set Mean Abs Error; {:5.2f} MPG\".format(mae))\n\n# make predictions\n\ntest_predictions = model.predict(normed_test_data).flatten()\n\n# test results\nplt.figure() #new figure\nplt.scatter(test_labels, test_predictions)\nplt.xlabel('True Values [MPG]')\nplt.ylabel('Predictions [MPG]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\nplt.plot([-100, 100], [-100, 100])\nplt.show(block=False)\n\n# error of test\nplt.figure()\nerror = test_predictions - test_labels\nplt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [MPG]\")\nplt.ylabel(\"Count\")\nplt.show(block=False)\n\n"
},
{
"alpha_fraction": 0.8316831588745117,
"alphanum_fraction": 0.8316831588745117,
"avg_line_length": 32.66666793823242,
"blob_id": "b596bf949d04ee2dd0130c8494c08b6c8d3723ce",
"content_id": "1f7281c55d6b41bb89001c07372a9e9c4a308d66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 3,
"path": "/README.md",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# learning-python\n\nVarious examples from introductions, machine/deep learning learning, data pulling\n"
},
{
"alpha_fraction": 0.616203248500824,
"alphanum_fraction": 0.6361140012741089,
"avg_line_length": 18.682432174682617,
"blob_id": "1142b1906c5abb0ce9b6981585dd1639de5adf79",
"content_id": "c56d1a2c87f5c73c7bbe08bfd0567deca8dba16a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2913,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 148,
"path": "/glmm_lossdev/pygam_prototype.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# %% [markdown]\n## # pyGAM practice\n\n# %%\n## import packages\n\nimport chainladder as cl\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom pygam import LinearGAM, s, f\n\n\n# %%\n## pull sample data\n\nraa = cl.load_sample(\"raa\")\n\n# %%\n## view data\n\nraa\n\n# %%\n## data to regular df\n\ndf = raa.to_frame(keepdims=True)\ndf[\"origin_year\"] = df[\"origin\"].dt.year\ndf.head()\n\nX_train = df[[\"development\", \"origin_year\"]]\ny_train = df[\"values\"]\n\n# %%\n## see ultimates from traditional method\n\nchainladderult = cl.Chainladder().fit(raa).ultimate_\nchainladderult\n\n# %%\n## create ultimate dataset to validate on\n\ndf_first = raa[(raa.development <= 12)].to_frame().rename(columns={12: \"values\"})\ndf_first[\"development\"] = 120\ndf_first = df_first.reset_index().rename(columns={\"index\": \"origin_year\"})\ndf_first[\"origin_year\"] = df_first[\"origin_year\"].dt.year\ndf_first\n\ndf_first.head()\n\nX_test = df_first[[\"development\", \"origin_year\"]]\ny_test = df_first[\"values\"]\n\n# %%\n# hyperparam grid\nlam = np.logspace(-1, 2, 4)\nlams = [lam] * 2\n\n# override for development to ensure best fit\n# lams[0] = np.linspace(0, 0, 4) # leads to overfitting\n\n# %%\n## fit model\n\n# gam = LinearGAM(s('development',constraints='monotonic_inc')).fit(df[['development','origin_year']], df['values'])\n\n# gam = LinearGAM(s(0,constraints='monotonic_inc') + s(1))\\\n# .fit(df[['development','origin_year']], df['values'])\n\n# ,constraints='monotonic_inc'\n\ngam = LinearGAM(s(0) + s(1)).gridsearch(\n df[[\"development\", \"origin_year\"]].to_numpy(), df[\"values\"], lam=lams\n)\n\ngam.summary()\n\n# %%\n## predict\n\nult_pred = pd.Series(\n gam.predict(df_first[[\"development\", \"origin_year\"]]), name=\"values\"\n)\n\nult_pred\n\n# %%\n## see original data\n\nraa\n# %%\n## partial dependence\n\nplt.figure()\nfig, axs = plt.subplots(1, 2)\n\ntitles = [\"development\", \"origin_year\"]\nfor i, ax in enumerate(axs):\n XX = gam.generate_X_grid(term=i)\n ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX))\n ax.plot(\n XX[:, i], gam.partial_dependence(term=i, X=XX, width=0.95)[1], c=\"r\", ls=\"--\"\n )\n # if i == 0:\n # ax.set_ylim(-30,30)\n ax.set_title(titles[i])\n\n# %%\n## test predict out of sample\ngam.predict([[150, 2000]])\n\n# %%\n## validate results\n\n# predict for original\ny_pred = gam.predict(df[[\"development\", \"origin_year\"]])\ny_true = df[\"values\"]\n\n# %%\n## validation plots\n\nfig, ax = plt.subplots(1, 1)\n# plt.figure(figsize=(10,10))\nax.scatter(y_pred, y_true, c=\"crimson\")\nax.axline([0, 0], [np.max(y_pred), np.max(y_pred)])\nax.set_title(\"Y: actual vs X: predicted\")\n\nfig, ax = plt.subplots(1, 1)\n# plt.figure(figsize=(10,10))\nax.scatter(y_pred, y_true - y_pred, c=\"crimson\")\nax.axline([0, 0], [np.max(y_pred), 0])\nax.set_title(\"X: predicted vs Y: residuals\")\n\n# %%\n# metrics\n\nfrom sklearn import metrics\n\nresult_r2 = metrics.r2_score(y_true, y_pred)\n\nprint(f\"R^2 in training set: {result_r2}\")\n\n\n# %%\n# TODO grid search\n\n# %%\n"
},
{
"alpha_fraction": 0.6848739385604858,
"alphanum_fraction": 0.7190876603126526,
"avg_line_length": 31.038461685180664,
"blob_id": "caf30e548fc053b96ff1913f102d51eb1ce73801",
"content_id": "7cdc1ad12e7a9f86b28097754e20324e29b2bccc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1666,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 52,
"path": "/Python-For-Finance/Python-For-Finance.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# https://towardsdatascience.com/python-for-finance-stock-portfolio-analyses-6da4c3e61054\n\n# %% 1 Import initial libraries\nimport pandas as pd\nimport xlrd #for excel import\n\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport plotly.graph_objs as go\n\n# Imports in order to be able to use Plotly offline.\nfrom plotly import __version__\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\n\nprint(__version__) # requires version >= 1.9.0\n\n#init_notebook_mode(connected=True) # for jupyter notebook\n\n# Import the Sample worksheet with acquisition dates and initial cost basis:\n\n# %% 2 read in data\nportfolio_df = pd.read_excel('Sample_stocks_acquisition_dates_costs.xlsx', \n sheet_name='Sample')\nportfolio_df.head(10)\n\n\n# Date Ranges for SP 500 and for all tickers\n# Modify these date ranges each week.\n\n# The below will pull back stock prices from the start date until end date specified.\nstart_sp = datetime.datetime(2013, 1, 1)\nend_sp = datetime.datetime(2018, 3, 9)\n\n# This variable is used for YTD performance.\nend_of_last_year = datetime.datetime(2017, 12, 29)\n\n# These are separate if for some reason want different date range than SP.\nstocks_start = datetime.datetime(2013, 1, 1)\nstocks_end = datetime.datetime(2018, 3, 9)\n\n# Leveraged from the helpful Datacamp Python Finance trading blog post.\nfrom pandas_datareader import data as pdr\nsp500 = pdr.get_data_yahoo('^GSPC', \n start_sp,\n end_sp)\n \nsp500.head()\n\n# %% 3 Generate a dynamic list of tickers\n# to pull from Yahoo Finance API based on the imported file with tickers.\n"
},
{
"alpha_fraction": 0.7608200311660767,
"alphanum_fraction": 0.7744874954223633,
"avg_line_length": 26.375,
"blob_id": "d156bb0df2041b193e14e6e03bf5fff597f338d5",
"content_id": "2d45641cebcfa738427792760daecee6e7582a58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 16,
"path": "/logisticwspark.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# logistic regression with apache spark\n# https://medium.com/fuzz/understanding-logistic-regression-w-apache-spark-python-c32eae4d614e\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n# install packages\ninstall(\"numpy\")\ninstall(\"pyspark\")\n\nimport numpy as np \nfrom numpy import array \nfrom pyspark.mllib.regression import LabeledPoint \n"
},
{
"alpha_fraction": 0.6334661245346069,
"alphanum_fraction": 0.6389442086219788,
"avg_line_length": 18.881187438964844,
"blob_id": "256fc148831fbc08d3e4969b3ce8edbdde8420e1",
"content_id": "fbc9ad48040e9876bbaec939944f7b761a28b8dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2008,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 101,
"path": "/glmm_lossdev/glmm_practice.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# %% [markdown]\n## # GLMM practice\n\n# %% \n## import packages \n\nimport chainladder as cl\nimport pandas as pd\n\n# for mixed models\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n\n# %%\n## pull sample data\n\nraa = cl.load_sample('raa')\n\n# %%\n## view data\n\nraa\n\n# %%\n## data to regular df\n\ndf = raa.to_frame(keepdims=True)\ndf['origin_year'] = df['origin'].dt.year\ndf.head()\n\n# %%\n## see ultimates from traditional method\n\nchainladderult = cl.Chainladder().fit(raa).ultimate_\nchainladderult\n\n# %%\n## create ultimate dataset to validate on\n\ndf_first = raa[(raa.development<=12)].to_frame().rename(columns={12:'values'})\ndf_first['development'] = 120\ndf_first = df_first.reset_index().rename(columns={'index':'origin_year'})\ndf_first['origin_year'] = df_first['origin_year'].dt.year\ndf_first\n\ndf_first.head()\n\n# %% \n## data processing\n\n# df['origin_year'] = df['origin_year'].astype(str)\n# df_first['origin_year'] = df_first['origin_year'].astype(str)\n\n# %%\n## selections for modeling\n\nselections = []\nlist_of_selections = [\n # formula , groups , re_formula\n # later formula has formula + (re_formula | groups)\n (\"values ~ development\", \"origin_year\" ,\"1\" ),\n # (\"values ~ development\", \"origin_year\" ,None ),\n # (\"values ~ development\", \"origin_year\" ,\"development\" ),\n # (\"values ~ development\", \"origin_year\" ,\"~development\" ),\n # (\"values ~ origin_year\", \"development\" ,None ),\n]\n\nfor l in list_of_selections:\n selections.append(l) \n\n\n# %%\n## fit on mixed models & show results\n\n# vc = {'development': 'development'}\nres_list = []\nfor s in selections:\n print(s)\n md = smf.mixedlm(s[0], df, groups=df[s[1]], re_formula=s[2]) \n\n mdf = md.fit(method=[\"lbfgs\"])\n print(mdf.summary())\n \n res = mdf.predict(df_first)\n print(res)\n res_list.append(res_list)\n\n# %% [markdown]\n# **predictions don't work bc only predict Fixed effects, Not random**\n\n# %%\n## predict based on model\n\n\n\n# %%\n## compare to chain ladder ultimate\n\nchainladderult\n# %%\n"
},
{
"alpha_fraction": 0.6482545733451843,
"alphanum_fraction": 0.6558250188827515,
"avg_line_length": 42.23030471801758,
"blob_id": "26ef1ab371ac05d99e16f1cfd879ab4281c0435c",
"content_id": "f4d62b7f29f39718c873875becf6273478ff1e01",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7133,
"license_type": "permissive",
"max_line_length": 180,
"num_lines": 165,
"path": "/lime-experiments-master/evaluate_explanations.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# From lime paper, with sw additions\n\n# code to auto add packages\nimport subprocess\nimport sys\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\ninstall(\"numpy\")\ninstall(\"scipy\")\ninstall(\"scikit-learn\")\ninstall(\"xgboost\")\n\nimport sys\nimport copy\nsys.path.append('..')\nimport argparse\nimport explainers\nimport parzen_windows\nimport numpy as np\nimport pickle\nimport sklearn\nfrom load_datasets import *\nfrom sklearn.metrics import accuracy_score\n\ndef get_tree_explanation(tree, v):\n t = tree.tree_\n nonzero = v.nonzero()[1]\n current = 0\n left_child = t.children_left[current]\n exp = set()\n while left_child != sklearn.tree._tree.TREE_LEAF:\n left_child = t.children_left[current]\n right_child = t.children_right[current]\n f = t.feature[current]\n if f in nonzero:\n exp.add(f)\n if v[0,f] < t.threshold[current]:\n current = left_child\n else:\n current = right_child\n return exp\nclass ExplanationEvaluator:\n def __init__(self, classifier_names=None):\n self.classifier_names = classifier_names\n if not self.classifier_names:\n self.classifier_names = ['l1logreg', 'tree']\n self.classifiers = {}\n def init_classifiers(self, dataset):\n self.classifiers[dataset] = {}\n for classifier in self.classifier_names:\n if classifier == 'l1logreg':\n try_cs = np.arange(.1,0,-.01)\n for c in try_cs:\n self.classifiers[dataset]['l1logreg'] = linear_model.LogisticRegression(penalty='l1', fit_intercept=True, C=c)\n self.classifiers[dataset]['l1logreg'].fit(self.train_vectors[dataset], self.train_labels[dataset])\n lengths = [len(x.nonzero()[0]) for x in self.classifiers[dataset]['l1logreg'].transform(self.train_vectors[dataset])]\n if np.max(lengths) <= 10:\n #print 'Logreg for ', dataset, ' has mean length', np.mean(lengths), 'with C=', c\n #print 'And max length = ', np.max(lengths)\n break\n if classifier == 'tree':\n self.classifiers[dataset]['tree'] = tree.DecisionTreeClassifier(random_state=1)\n self.classifiers[dataset]['tree'].fit(self.train_vectors[dataset], self.train_labels[dataset])\n lengths = [len(get_tree_explanation(self.classifiers[dataset]['tree'], self.train_vectors[dataset][i])) for i in range(self.train_vectors[dataset].shape[0])]\n #print 'Tree for ', dataset, ' has mean length', np.mean(lengths)\n def load_datasets(self, dataset_names):\n self.train_data = {}\n self.train_labels = {}\n self.test_data = {}\n self.test_labels = {}\n for dataset in dataset_names:\n self.train_data[dataset], self.train_labels[dataset], self.test_data[dataset], self.test_labels[dataset], _ = LoadDataset(dataset)\n def vectorize_and_train(self):\n self.vectorizer = {}\n self.train_vectors = {}\n self.test_vectors = {}\n self.inverse_vocabulary = {}\n print ('Vectorizing...'), \n for d in self.train_data:\n self.vectorizer[d] = CountVectorizer(lowercase=False, binary=True)\n self.train_vectors[d] = self.vectorizer[d].fit_transform(self.train_data[d])\n self.test_vectors[d] = self.vectorizer[d].transform(self.test_data[d])\n terms = np.array(list(self.vectorizer[d].vocabulary_.keys()))\n indices = np.array(list(self.vectorizer[d].vocabulary_.values()))\n self.inverse_vocabulary[d] = terms[np.argsort(indices)]\n print ('Done')\n print ('Training...')\n for d in self.train_data:\n print (d)\n self.init_classifiers(d)\n print ('Done')\n print\n def measure_explanation_hability(self, explain_fn, max_examples=None):\n \"\"\"Asks for explanations for all predictions in the train and test set, with\n budget = size of explanation. Returns two maps (train_results,\n test_results), from dataset to classifier to list of recalls\"\"\"\n budget = 10\n train_results = {}\n test_results = {}\n for d in self.train_data:\n train_results[d] = {}\n test_results[d] = {}\n print ('Dataset: '+ d)\n for c in self.classifiers[d]:\n train_results[d][c] = []\n test_results[d][c] = []\n if c == 'l1logreg':\n c_features = self.classifiers[d][c].coef_.nonzero()[1]\n print ('classifier:' + c) \n for i in range(len(self.test_data[d])):\n if c == 'l1logreg':\n true_features = set([x for x in self.test_vectors[d][i].nonzero()[1] if x in c_features])\n elif c == 'tree':\n true_features = get_tree_explanation(self.classifiers[d][c], self.test_vectors[d][i])\n if len(true_features) == 0:\n continue\n to_get = budget\n exp_features = set(map(lambda x:x[0],\n explain_fn(self.test_vectors[d][i], self.test_labels[d][i] ,self.classifiers[d][c], to_get, d)))\n test_results[d][c].append(float(len(true_features.intersection(exp_features))) / len(true_features))\n if max_examples and i >= max_examples:\n break\n return train_results, test_results\n\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate some explanations')\n parser.add_argument('--dataset', '-d', type=str, required=True,help='dataset name')\n parser.add_argument('--algorithm', '-a', type=str, required=True, help='algorithm_name')\n parser.add_argument('--explainer', '-e', type=str, required=True, help='explainer name')\n args = parser.parse_args()\n dataset = args.dataset\n algorithm = args.algorithm\n evaluator = ExplanationEvaluator(classifier_names=[algorithm])\n evaluator.load_datasets([dataset])\n evaluator.vectorize_and_train()\n explain_fn = None\n if args.explainer == 'lime':\n rho = 25\n kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho ** 2))\n explainer = explainers.GeneralizedLocalExplainer(kernel, explainers.data_labels_distances_mapping_text, num_samples=15000, return_mean=False, verbose=False, return_mapped=True)\n explain_fn = explainer.explain_instance\n elif args.explainer == 'parzen':\n sigmas = {'multi_polarity_electronics': {'tree': 0.5,\n 'l1logreg': 1},\n 'multi_polarity_kitchen': {'tree': 0.75, 'l1logreg': 2.0},\n 'multi_polarity_dvd': {'tree': 8.0, 'l1logreg': 1},\n 'multi_polarity_books': {'tree': 2.0, 'l1logreg': 2.0}}\n\n explainer = parzen_windows.ParzenWindowClassifier()\n cv_preds = sklearn.cross_validation.cross_val_predict(evaluator.classifiers[dataset][algorithm], evaluator.train_vectors[dataset], evaluator.train_labels[dataset])\n explainer.fit(evaluator.train_vectors[dataset], cv_preds)\n explainer.sigma = sigmas[dataset][algorithm]\n explain_fn = explainer.explain_instance\n elif args.explainer == 'greedy':\n explain_fn = explainers.explain_greedy\n elif args.explainer == 'random':\n explainer = explainers.RandomExplainer()\n explain_fn = explainer.explain_instance\n train_results, test_results = evaluator.measure_explanation_hability(explain_fn)\n print ('Average test: '+ np.mean(test_results[dataset][algorithm]))\n out = {'train': train_results[dataset][algorithm], 'test' : test_results[dataset][algorithm]}\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6418300867080688,
"alphanum_fraction": 0.6564270257949829,
"avg_line_length": 25.674419403076172,
"blob_id": "a8c80058be7c86e19a026b9e5688b93803f20a1e",
"content_id": "deeee6247a15145fe51da0f8515a3d2b315850ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4590,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 172,
"path": "/glmm_lossdev/gpboost_example2.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# %%\n## GPBoost working example sklearn wrapper\n\n\"\"\"\nExamples on how to use the scikit-learn wrapper interface for the GPBoost\nalgorithm for combining tree-boosting with random effects and Gaussian\nprocess models\n@author: Fabio Sigrist\n\"\"\"\n\n# %%\n## imports \nimport numpy as np\nimport gpboost as gpb\nimport random\nimport matplotlib.pyplot as plt\n\nimport chainladder as cl\nimport pandas as pd\n\n\n# %%\n## deprecated sample data\n# print('Simulating data...')\n# # Simulate data\n# n = 5000 # number of samples\n# m = 500 # number of groups\n# # Simulate grouped random effects\n# np.random.seed(1)\n# # simulate grouped random effects\n# group = np.arange(n) # grouping variable\n# for i in range(m):\n# group[int(i * n / m):int((i + 1) * n / m)] = i\n# b1 = np.random.normal(size=m) # simulate random effects\n# eps = b1[group]\n# # simulate fixed effects\n# def f1d(x):\n# \"\"\"Non-linear function for simulation\"\"\"\n# return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))\n# X = np.random.rand(n, 2)\n# f = f1d(X[:, 0])\n# xi = np.sqrt(0.01) * np.random.normal(size=n) # simulate error term\n# y = f + eps + xi # observed data\n\n# %%\n## pull sample data\n\nraa = cl.load_sample('raa')\n\n# %%\n## view data\n\nraa\n\n# %%\n## data to regular df\n\ndf = raa.to_frame(keepdims=True)\ndf['origin_year'] = df['origin'].dt.year\ndf.head()\n\n# %%\n## see ultimates from traditional method\n\nchainladderult = cl.Chainladder().fit(raa).ultimate_\nchainladderult\n\n# %%\n## create ultimate dataset to validate on\n\ndf_first = raa[(raa.development<=12)].to_frame().rename(columns={12:'values'})\ndf_first['development'] = 120\ndf_first = df_first.reset_index().rename(columns={'index':'origin_year'})\ndf_first['origin_year'] = df_first['origin_year'].dt.year\ndf_first\n\ndf_first.head()\n\n# %%\n## build model\n\nprint('Starting training...')\n# define GPModel\ngp_model = gpb.GPModel(group_data=df[['origin_year']], likelihood=\"gaussian\")\n# train\nbst = gpb.GPBoostRegressor(max_depth=10,\n learning_rate=1,\n min_data_in_leaf=1,\n n_estimators=100)\nbst.fit(df[['development']], df[['values']], gp_model=gp_model)\nprint(\"Estimated random effects model\")\ngp_model.summary()\n\n\n# %%\n## predict\n\nprint('Starting predicting...')\n\ngroup_test = df_first[['origin_year']]\nXtest = df_first[['development']]\n# Xtest[:, 0] = np.linspace(0, 1, m)\npred = bst.predict(X=Xtest, group_data_pred=group_test)\ny_pred = pred['fixed_effect'] + pred['random_effect_mean']\ny_pred = pd.Series(y_pred, name='values')\n\nprint(y_pred)\n\n# %%\n## explain using shap NOT WORKING FOR ALL BELOW\n\n# for use in explainer below\ndef predict_test(X):\n return bst.predict(X,group_test)\n\nimport shap\nbst_explainer = shap.Explainer(predict_test, Xtest)\nbst_explainer(Xtest)\n# shap_obj = bst_explainer(df[['development']])\n# shap_values = shap.Explainer(bst.predict\n# ).shap_values(df[['development']])\n# shap.summary_plot(shap_values, df[['development']])\n\n# %%\n## explain on partial dependence\n\nshap.dependence_plot(\"development\", shap_values, df[['development']])\n\n\n# %%\n# \n# Compare true and predicted random effects\nplt.figure(\"Comparison of true and predicted random effects\")\nplt.scatter(b1, pred['random_effect_mean'])\nplt.title(\"Comparison of true and predicted random effects\")\nplt.xlabel(\"truth\")\nplt.ylabel(\"predicted\")\nplt.show()\n# Fixed effect\nplt.figure(\"Comparison of true and fitted fixed effect\")\nplt.scatter(Xtest[:, 0], pred['fixed_effect'], linewidth=2, color=\"b\", label=\"fit\")\nx = np.linspace(0, 1, 200, endpoint=True)\nplt.plot(x, f1d(x), linewidth=2, color=\"r\", label=\"true\")\nplt.title(\"Comparison of true and fitted fixed effect\")\nplt.legend()\nplt.show()\n\n# feature importances\nprint('Feature importances:', list(bst.feature_importances_))\n\n# Using validation set\nprint('Using validation set...')\n# split into training an test data\ntrain_ind = random.sample(range(n), int(n / 2))\ntest_ind = [x for x in range(n) if (x not in train_ind)]\nX_train = X[train_ind, :]\ny_train = y[train_ind]\ngroup_train = group[train_ind]\nX_test = X[test_ind, :]\ny_test = y[test_ind]\ngroup_test = group[test_ind]\n# train\ngp_model = gpb.GPModel(group_data=group_train, likelihood=\"gaussian\")\ngp_model.set_prediction_data(group_data_pred=group_test)\nbst = gpb.GPBoostRegressor(max_depth=6,\n learning_rate=0.05,\n min_data_in_leaf=5,\n n_estimators=100)\nbst.fit(X_train, y_train, gp_model=gp_model,\n eval_set=[(X_test, y_test)],\n eval_metric='l1',\n early_stopping_rounds=5)\n\n\n"
},
{
"alpha_fraction": 0.6561900973320007,
"alphanum_fraction": 0.6653890609741211,
"avg_line_length": 24.086538314819336,
"blob_id": "c285ce84e089e4bb05bcd8a4eb4604a15f7582d4",
"content_id": "e5ed259809b67ae742e278da3279e5d509e90775",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2609,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 104,
"path": "/glmm_lossdev/gpboost_prototype.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# %% [markdown] \n## # Testing gpboost\n\n\n# %% \n## import packages \n\nimport chainladder as cl\nimport pandas as pd\n\n# for mixed models\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n# for gpboost\nimport gpboost as gpb\n\n\n# %%\n## pull sample data\n\nraa = cl.load_sample('raa')\n\n# %%\n## view data\n\nraa\n\n# %%\n## data to regular df\n\ndf = raa.to_frame(keepdims=True)\ndf['origin_year'] = df['origin'].dt.year\ndf.head()\n\n# %%\n## see ultimates from traditional method\n\nchainladderult = cl.Chainladder().fit(raa).ultimate_\nchainladderult\n\n# %%\n## create ultimate dataset to validate on\n\ndf_first = raa[(raa.development<=12)].to_frame().rename(columns={12:'values'})\ndf_first['development'] = 120\ndf_first = df_first.reset_index().rename(columns={'index':'origin_year'})\ndf_first['origin_year'] = df_first['origin_year'].dt.year\ndf_first\n\ndf_first.head()\n\n# %%\n## fit model\n\n\"\"\"\nBoosting with two crossed firm and year grouped random effects\n\"\"\"\n# Define random effects model \ngp_model = gpb.GPModel(group_data=df[['origin_year']])\n# Create dataset for gpb.train\ndata_train = gpb.Dataset(data=df[['development']], label=df['values'])\n# Specify boosting parameters as dict\n# Note: no attempt has been done to optimaly choose tuning parameters\nparams = { 'objective': 'regression_l2',\n 'learning_rate': 1,\n 'max_depth': 6,\n 'min_data_in_leaf': 1,\n 'verbose': 0 }\n# Train GPBoost model\nbst = gpb.train(params=params,\n train_set=data_train,\n gp_model=gp_model,\n num_boost_round=100)\n# Estimated random effects model (variances of random effects)\ngp_model.summary()\n\n# %%\n# predict\nbst.predict(data=df_first[['development','origin_year']])\n\n\n# %%\n## cross validation\n\n# Cross-validation for determining number of boosting iterations\ngp_model = gpb.GPModel(group_data=data[['firm', 'year']])\ndata_train = gpb.Dataset(data=data[['value', 'capital']], label=data['invest'])\ncvbst = gpb.cv(params=params, train_set=data_train,\n gp_model=gp_model, use_gp_model_for_validation=True,\n num_boost_round=5000, early_stopping_rounds=5,\n nfold=2, verbose_eval=True, show_stdv=False, seed=1)\nprint(\"Best number of iterations: \" + str(np.argmin(cvbst['l2-mean'])))\n\n\n\"\"\"\nLinear mixed effecst model with two crossed firm and year grouped random effects\n\"\"\"\nlin_gp_model = gpb.GPModel(group_data=data[['firm', 'year']])\n# Add interecept for linear model\nX = data[['value', 'capital']]\nX['intercept'] = 1\nlin_gp_model.fit(y=data['invest'], X=X, params={\"std_dev\": True})\nlin_gp_model.summary()\n"
},
{
"alpha_fraction": 0.6521386504173279,
"alphanum_fraction": 0.6916683316230774,
"avg_line_length": 33.9929084777832,
"blob_id": "3a62d2b713ad691058d0493a50b1558623d2892c",
"content_id": "7c2f909e08722fd4c47a2c3ee2057313a6885802",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4933,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 141,
"path": "/probability-distributions.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "# Probability Distributions \n# https://www.mikulskibartosz.name/monte-carlo-simulation-in-python/\n\n# required packages\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn\n\n# Titanic dataset\ndataset = seaborn.load_dataset('titanic')\nRANDOM_STATE = 31415\n\nages = dataset.age.dropna()\n\n# %% Uniform distribution\nfrom scipy.stats import uniform\nuniform_dist = uniform(loc = 0, scale = 20)\nuniform_dist.rvs(size = 10, random_state = RANDOM_STATE)\nx = np.linspace(-5, 25,100) \n_, ax = plt.subplots(1,1) #only use second argument so skip first\nax.plot(x,uniform_dist.pdf(x),'r--',lw=2)\nplt.title('Uniform distribution of values between 0 and 20')\nplt.show(block=False) \n\n# %% Bernoulli\nfrom scipy.stats import bernoulli\ncountSurvived = dataset[dataset.survived ==1].survived.count()\ncountAll = dataset.survived.count()\nsurvived_dist = bernoulli(countSurvived / countAll)\n# the given value is prob of outcome == 1 , call it p\n_, ax = plt.subplots(1,1)\nax.vlines(0,0, survived_dist.pmf(0), colors='r',linestyles = '-', lw= 5, label = 'prob of death')\nax.vlines(1,0,survived_dist.pmf(1), colors = 'b', linestyles = '-', lw = 5, label = 'prob of survival')\nax.legend(loc = 'best', frameon=False)\nplt.title('Bernoulli dist')\nplt.show(block=False)\n\n# Discrete random variable\nfrom scipy.stats import rv_discrete\npclass_probability = pd.DataFrame({'probability': dataset.groupby(by = \"pclass\", as_index = False).size() \n / dataset.pclass.count()}).reset_index()\nvalues = pclass_probability.pclass\nprobabilities = pclass_probability.probability\ncustom_discrete_dist = rv_discrete(values=(values,probabilities))\nx = [0,0.9,1,2,2.5,3,4]\n_ , ax = plt.subplots(1,1)\nax.plot(x, custom_discrete_dist.pmf(x), 'ro', lw=2)\nplt.title('Custom discrete distribution of values bw 0 and 4')\nplt.show(block=False)\n\n# normal distribution \nfrom scipy.stats import norm\nmean = 3\nstandard_deviation = 2\nnormal_distribution = norm(loc = mean, scale = standard_deviation)\nx = np.linspace(-6,12,200)\n_ , ax = plt.subplots(1,1)\nax.plot(x,normal_distribution.pdf(x), '-', lw = 2)\nplt.title('Norm dist')\nplt.show(block=False)\n\n# gamma distribution\nfrom scipy.stats import gamma\ngamma_distribution = gamma(loc = 3, scale = 3, a = 3)\nx = np.linspace(0,12,200)\n_ , ax = plt.subplots(1,1)\nax.plot(x, gamma_distribution.pdf(x), '-', lw = 2)\nplt.title('Exponential (gamma with a = 1)')\nplt.show(block=False)\n\n# fit distribution to data\ndef fit_and_plot(dist): \n params = dist.fit(ages)\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n x = np.linspace(0,80,80)\n _ , ax = plt.subplots(1,1)\n plt.hist(ages,bins=80, range=(0,80))\n ax2 = ax.twinx()\n ax2.plot(x, dist.pdf(x,loc=loc,scale=scale, *arg),'-',color=\"r\",lw=2)\n plt.show(block=False)\n return dist, loc, scale, arg\n\nimport scipy\nfit_and_plot(scipy.stats.norm)\nfit_and_plot(scipy.stats.gamma)\n\n#choose best dist using Kolmogorov-Smirnov \nfrom scipy.stats import kstest\ndist, log, scale, arg = fit_and_plot(scipy.stats.norm)\nd , pvalue = kstest(ages.tolist(), lambda x: dist.cdf(x, loc = loc, scale = scale , *arg), alternative=\"two-sided\")\npvalue\n\ndef fit_and_plot_cdf(dist):\n params = dist.fit(ages)\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n x = np.linspace(0,80,80)\n _ , ax = plt.subplots(1,1)\n counts, bin_edges = np.histogram(ages, bins = 80, ) #normed=True)\n cdf = np.cumsum(counts)\n plt.plot(bin_edges[1:],cdf)\n ax2 = ax.twinx()\n ax2.plot(x,dist.cdf(x,loc=loc,scale=scale,*arg), '-', color = 'r', lw =2)\n plt.show(block=False)\n return dist, loc, scale, arg\n\nfit_and_plot_cdf(scipy.stats.norm)\n\n# monte carlo simulation\nnp.random.seed(seed=233423) # make sure results are reproducible\n_90_conf_interval = 3.29 # upper minus lower\nmaintenance = norm(loc = (20+10)/2, scale = (20-10)/ _90_conf_interval)\nlabor = norm(loc = (8+-2)/2, scale = (8- -2) / _90_conf_interval)\nraw_material = norm((9+3)/2, scale = (9- -3)/ _90_conf_interval)\nprod_level = norm(loc = (35000+15000)/2, scale = (35000 - 15000) / _90_conf_interval)\nnumber_of_simulations = 100000\nmaintenance_results = maintenance.rvs(number_of_simulations)\nraw_material_results = raw_material.rvs(number_of_simulations)\nlabor_results = labor.rvs(number_of_simulations)\nprod_level_results = prod_level.rvs(number_of_simulations)\ndata = pd.DataFrame({\n \"maintenance_savings_per_unit\" : maintenance_results,\n \"labor_savings_per_unit\" : labor_results,\n \"raw_material_savings_per_unit\": raw_material_results,\n \"production_level\" : prod_level_results\n})\n\ndata[\"total_savings\"] = (data.maintenance_savings_per_unit \n + data.labor_savings_per_unit\n + data.raw_material_savings_per_unit) * data.production_level\n\nplt.hist(data.total_savings, bins = 100)\nplt.axvline(x = 400000, c =\"r\")\nplt.show(block=False)\n\n# count prob of savings < 400k\ndata[data[\"total_savings\"] < 400000].count()[\"total_savings\"] / 100000"
},
{
"alpha_fraction": 0.5741479396820068,
"alphanum_fraction": 0.5826969146728516,
"avg_line_length": 35.545833587646484,
"blob_id": "cdf35e0a84a152ad1b0093a5d7ca4f62e7c00591",
"content_id": "c91526d437719a00f61d6772b1cf364e8264e380",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8773,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 240,
"path": "/lime-experiments-master/explainers.py",
"repo_name": "sws144/learning-python",
"src_encoding": "UTF-8",
"text": "from abc import ABCMeta, abstractmethod\nimport numpy as np\nimport scipy as sp\nfrom sklearn import linear_model\nimport sklearn.metrics.pairwise\n\n###############################\n## Random Explainer\n###############################\n\nclass RandomExplainer:\n def __init__(self):\n pass\n\n def reset(self):\n pass\n\n def explain_instance(self,\n instance_vector,\n label,\n classifier,\n num_features,\n dataset):\n nonzero = instance_vector.nonzero()[1]\n explanation = np.random.choice(nonzero, num_features)\n return [(x, 1) for x in explanation]\n\n def explain(self,\n train_vectors,\n train_labels,\n classifier,\n num_features,\n dataset):\n i = np.random.randint(0, train_vectors.shape[0])\n explanation = self.explain_instance(train_vectors[i], None, None,\n num_features, dataset)\n return i, explanation\n\n###############################\n## Standalone Explainers\n###############################\n\ndef most_important_word(classifier, v, class_):\n # Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.\n max_index = 0\n max_change = -1\n orig = classifier.predict_proba(v)[0][class_]\n for i in v.nonzero()[1]:\n val = v[0,i]\n v[0,i] = 0\n pred = classifier.predict_proba(v)[0][class_]\n change = orig - pred\n if change > max_change:\n max_change = change\n max_index = i\n v[0,i] = val\n if max_change < 0:\n return -1\n return max_index\n\ndef explain_greedy(instance_vector,\n label,\n classifier,\n num_features,\n dataset=None):\n explanation = []\n z = instance_vector.copy()\n while len(explanation) < num_features:\n i = most_important_word(classifier, z, label)\n if i == -1:\n break\n z[0,i] = 0\n explanation.append(i)\n return [(x, 1) for x in explanation]\ndef most_important_word_martens(predict_fn, v, class_):\n # Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.\n max_index = 0\n max_change = -1\n orig = predict_fn(v)[0,class_]\n for i in v.nonzero()[1]:\n val = v[0,i]\n v[0,i] = 0\n pred = predict_fn(v)[0,class_]\n change = orig - pred\n if change > max_change:\n max_change = change\n max_index = i\n v[0,i] = val\n if max_change < 0:\n return -1, max_change\n return max_index, max_change\n\ndef explain_greedy_martens(instance_vector,\n label,\n predict_fn,\n num_features,\n dataset=None):\n if not hasattr(predict_fn, '__call__'): \n predict_fn = predict_fn.predict_proba\n explanation = []\n z = instance_vector.copy()\n cur_score = predict_fn(instance_vector)[0, label]\n while len(explanation) < num_features:\n i, change = most_important_word_martens(predict_fn, z, label)\n cur_score -= change\n if i == -1:\n break\n explanation.append(i)\n if cur_score < .5:\n break\n z[0,i] = 0\n return [(x, 1) for x in explanation]\n\ndef data_labels_distances_mapping_text(x, classifier_fn, num_samples):\n distance_fn = lambda x : sklearn.metrics.pairwise.cosine_distances(x[0],x)[0] * 100\n features = x.nonzero()[1]\n vals = np.array(x[x.nonzero()])[0]\n doc_size = len(sp.sparse.find(x)[2]) \n sample = np.random.randint(1, doc_size, num_samples - 1) \n data = np.zeros((num_samples, len(features))) \n inverse_data = np.zeros((num_samples, len(features))) \n data[0] = np.ones(doc_size)\n inverse_data[0] = vals\n features_range = range(len(features)) \n for i, s in enumerate(sample, start=1): \n active = np.random.choice(features_range, s, replace=False) \n data[i, active] = 1\n for j in active:\n inverse_data[i, j] = 1\n sparse_inverse = sp.sparse.lil_matrix((inverse_data.shape[0], x.shape[1]))\n sparse_inverse[:, features] = inverse_data\n sparse_inverse = sp.sparse.csr_matrix(sparse_inverse)\n mapping = features\n labels = classifier_fn(sparse_inverse)\n distances = distance_fn(sparse_inverse)\n return data, labels, distances, mapping\n\n# This is LIME\nclass GeneralizedLocalExplainer:\n def __init__(self,\n kernel_fn,\n data_labels_distances_mapping_fn,\n num_samples=5000,\n lasso=True,\n mean=None,\n return_mean=False,\n return_mapped=False,\n lambda_=None,\n verbose=True,\n positive=False):\n # Transform_classifier, transform_explainer,\n # transform_explainer_to_classifier all take raw data in, whatever that is.\n # perturb(x, num_samples) returns data (perturbed data in f'(x) form),\n # inverse_data (perturbed data in x form) and mapping, where mapping is such\n # that mapping[i] = j, where j is an index for x form.\n # distance_fn takes raw data in. what we're calling raw data is just x\n self.lambda_ = lambda_\n self.kernel_fn = kernel_fn\n self.data_labels_distances_mapping_fn = data_labels_distances_mapping_fn\n self.num_samples = num_samples\n self.lasso = lasso\n self.mean = mean\n self.return_mapped=return_mapped\n self.return_mean = return_mean\n self.verbose = verbose\n self.positive=positive;\n def reset(self):\n pass\n def data_labels_distances_mapping(self, raw_data, classifier_fn):\n data, labels, distances, mapping = self.data_labels_distances_mapping_fn(raw_data, classifier_fn, self.num_samples)\n return data, labels, distances, mapping\n def generate_lars_path(self, weighted_data, weighted_labels):\n X = weighted_data\n alphas, active, coefs = linear_model.lars_path(X, weighted_labels, method='lasso', verbose=False, positive=self.positive)\n return alphas, coefs\n def explain_instance_with_data(self, data, labels, distances, label, num_features):\n weights = self.kernel_fn(distances)\n weighted_data = data * weights[:, np.newaxis]\n if self.mean is None:\n mean = np.mean(labels[:, label])\n else:\n mean = self.mean\n shifted_labels = labels[:, label] - mean\n if self.verbose:\n print ('mean: ' + mean)\n weighted_labels = shifted_labels * weights\n used_features = range(weighted_data.shape[1])\n nonzero = used_features\n alpha = 1\n if self.lambda_:\n classif = linear_model.Lasso(alpha=self.lambda_, fit_intercept=False, positive=self.positive)\n classif.fit(weighted_data, weighted_labels)\n used_features = classif.coef_.nonzero()[0]\n if used_features.shape[0] == 0:\n if self.return_mean:\n return [], mean\n else:\n return []\n elif self.lasso:\n alphas, coefs = self.generate_lars_path(weighted_data, weighted_labels)\n for i in range(len(coefs.T) - 1, 0, -1):\n nonzero = coefs.T[i].nonzero()[0]\n if len(nonzero) <= num_features:\n chosen_coefs = coefs.T[i]\n alpha = alphas[i]\n break\n used_features = nonzero\n debiased_model = linear_model.Ridge(alpha=0, fit_intercept=False)\n debiased_model.fit(weighted_data[:, used_features], weighted_labels)\n if self.verbose:\n print ('Prediction_local' + debiased_model.predict(data[0, used_features].reshape(1, -1)) + mean + 'Right:' + labels[0, label])\n if self.return_mean:\n return sorted(zip(used_features,\n debiased_model.coef_),\n key=lambda x:np.abs(x[1]), reverse=True), mean\n else:\n return sorted(zip(used_features,\n debiased_model.coef_),\n key=lambda x:np.abs(x[1]), reverse=True)\n\n def explain_instance(self,\n raw_data,\n label,\n classifier_fn,\n num_features, dataset=None):\n \n if not hasattr(classifier_fn, '__call__'):\n classifier_fn = classifier_fn.predict_proba\n data, labels, distances, mapping = self.data_labels_distances_mapping(raw_data, classifier_fn)\n if self.return_mapped:\n if self.return_mean:\n exp, mean = self.explain_instance_with_data(data, labels, distances, label, num_features)\n else:\n exp = self.explain_instance_with_data(data, labels, distances, label, num_features)\n exp = [(mapping[x[0]], x[1]) for x in exp]\n if self.return_mean:\n return exp, mean\n else:\n return exp\n return self.explain_instance_with_data(data, labels, distances, label, num_features), mapping\n\n\n"
}
] | 35 |
umahanish/testrepo
|
https://github.com/umahanish/testrepo
|
bf007b5888f05aed5396eaa165500f561f888944
|
c61612fe386ea64be073d661cdc1c3e7d4152d79
|
6bdc2218af89645fd08f694e957022d2b95bc143
|
refs/heads/master
| 2020-06-25T09:41:32.676104 | 2019-01-14T06:17:43 | 2019-01-14T06:17:43 | 96,970,805 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 13,
"blob_id": "a0d76f38a214ff14c89caad0b53ca4a017374516",
"content_id": "1a0983ec868a2b9584efd47ab7779ddd530a72a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 2,
"path": "/README.md",
"repo_name": "umahanish/testrepo",
"src_encoding": "UTF-8",
"text": "# testrepo\n this is my demo\n"
},
{
"alpha_fraction": 0.7027210593223572,
"alphanum_fraction": 0.7163265347480774,
"avg_line_length": 37.71052551269531,
"blob_id": "651ef1323d7a494ccd6c17443c305b3a6d537e88",
"content_id": "5654e054e6b0139dd2fab2c29a042ccc00bfbffe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1470,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 38,
"path": "/getpipversion.py",
"repo_name": "umahanish/testrepo",
"src_encoding": "UTF-8",
"text": "import re\n\nimport requests\n\nmaster_version_file = 'taskcat/master_version'\ndevelop_version_file = 'taskcat/develop_version'\n\ndef get_pip_version(pkginfo_url):\n pkginfo = requests.get(pkginfo_url).text\n for record in pkginfo.split('\\n'):\n if record.startswith('Version'):\n current_version = str(record).split(':', 1)\n return (current_version[1]).strip()\n\n\n#current_develop_version = get_pip_version('https://testpypi.python.org/pypi?name=taskcat&:action=display_pkginfo')\ncurrent_develop_version = '0.dev373.dev3'\nprint(\"PyPi Develop Version is [{}]\".format(current_develop_version))\n#current_master_version = get_pip_version('https://pypi.python.org/pypi?name=taskcat&:action=display_pkginfo')\ncurrent_master_version = '730.73.73'\nprint(\"PyPi Master Version is [{}]\".format(current_master_version))\n\nnew_poduction_release = int(re.findall(r'\\d+', current_master_version)[-1])\nnew_development_release = int(re.findall(r'\\d+', current_develop_version)[-1])\n\nproduction_version =re.sub('\\d$', lambda x: str(int(x.group(0)) + 1), current_master_version)\ndevelopment_version =re.sub('\\d$', lambda x: str(int(x.group(0)) + 1), current_develop_version)\n\nprint(\"current_develop_version\")\nprint(development_version)\nprint(\"current_master_version\")\nprint(production_version)\n\nwith open(master_version_file, 'w') as m:\n m.write(str(current_master_version))\n\nwith open(develop_version_file, 'w') as d:\n d.write(str(current_develop_version))"
}
] | 2 |
whyguu/lbf
|
https://github.com/whyguu/lbf
|
3daae25df7844df234f836c757473a718ff836bd
|
00a2489a6e58bee0785601bd893e2cab779b6a05
|
9de79a41fbb282fb6e8ac4a84f709cff12a7fa51
|
refs/heads/master
| 2021-09-22T01:03:12.091223 | 2018-09-04T07:35:50 | 2018-09-04T07:35:50 | 116,928,044 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6760034561157227,
"alphanum_fraction": 0.6991770267486572,
"avg_line_length": 55.30487823486328,
"blob_id": "f8691d97263112b80e07978f4f62674b14854b96",
"content_id": "1e7907c119f7291ce09a42842f82fed5365c4eb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13852,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 246,
"path": "/mainwindow.py",
"repo_name": "whyguu/lbf",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'mainwindow.ui'\n#\n# Created by: PyQt5 UI code generator 5.4.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.setEnabled(True)\n MainWindow.resize(800, 620)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.label_image = QtWidgets.QLabel(self.centralwidget)\n self.label_image.setGeometry(QtCore.QRect(10, 30, 500, 500))\n self.label_image.setFrameShape(QtWidgets.QFrame.Panel)\n self.label_image.setFrameShadow(QtWidgets.QFrame.Raised)\n self.label_image.setAlignment(QtCore.Qt.AlignCenter)\n self.label_image.setObjectName(\"label_image\")\n self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)\n self.stackedWidget.setGeometry(QtCore.QRect(530, 150, 241, 381))\n font = QtGui.QFont()\n font.setKerning(True)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.stackedWidget.setFont(font)\n self.stackedWidget.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n self.stackedWidget.setFocusPolicy(QtCore.Qt.NoFocus)\n self.stackedWidget.setAcceptDrops(False)\n self.stackedWidget.setToolTipDuration(0)\n self.stackedWidget.setAutoFillBackground(False)\n self.stackedWidget.setInputMethodHints(QtCore.Qt.ImhHiddenText)\n self.stackedWidget.setFrameShape(QtWidgets.QFrame.Box)\n self.stackedWidget.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.stackedWidget.setObjectName(\"stackedWidget\")\n self.region_grow_3 = QtWidgets.QWidget()\n self.region_grow_3.setObjectName(\"region_grow_3\")\n self.label = QtWidgets.QLabel(self.region_grow_3)\n self.label.setGeometry(QtCore.QRect(20, 40, 60, 16))\n self.label.setObjectName(\"label\")\n self.region_grow = QtWidgets.QPushButton(self.region_grow_3)\n self.region_grow.setGeometry(QtCore.QRect(10, 150, 113, 32))\n self.region_grow.setObjectName(\"region_grow\")\n self.std_decay = QtWidgets.QLineEdit(self.region_grow_3)\n self.std_decay.setGeometry(QtCore.QRect(20, 60, 113, 21))\n self.std_decay.setObjectName(\"std_decay\")\n self.stackedWidget.addWidget(self.region_grow_3)\n self.level_set_2 = QtWidgets.QWidget()\n self.level_set_2.setObjectName(\"level_set_2\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.level_set_2)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 211, 331))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_7.setObjectName(\"label_7\")\n self.horizontalLayout_7.addWidget(self.label_7)\n self.time_step = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.time_step.setObjectName(\"time_step\")\n self.horizontalLayout_7.addWidget(self.time_step)\n self.verticalLayout.addLayout(self.horizontalLayout_7)\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_6.setObjectName(\"label_6\")\n self.horizontalLayout_6.addWidget(self.label_6)\n self.epsilon = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.epsilon.setObjectName(\"epsilon\")\n self.horizontalLayout_6.addWidget(self.epsilon)\n self.verticalLayout.addLayout(self.horizontalLayout_6)\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_8.setObjectName(\"label_8\")\n self.horizontalLayout_5.addWidget(self.label_8)\n self.mu = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.mu.setObjectName(\"mu\")\n self.horizontalLayout_5.addWidget(self.mu)\n self.verticalLayout.addLayout(self.horizontalLayout_5)\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_9.setObjectName(\"label_9\")\n self.horizontalLayout_8.addWidget(self.label_9)\n self.lambda1 = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.lambda1.setObjectName(\"lambda1\")\n self.horizontalLayout_8.addWidget(self.lambda1)\n self.verticalLayout.addLayout(self.horizontalLayout_8)\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_4.setObjectName(\"label_4\")\n self.horizontalLayout_3.addWidget(self.label_4)\n self.sigma = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.sigma.setObjectName(\"sigma\")\n self.horizontalLayout_3.addWidget(self.sigma)\n self.verticalLayout.addLayout(self.horizontalLayout_3)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_3.setObjectName(\"label_3\")\n self.horizontalLayout_2.addWidget(self.label_3)\n self.c0 = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.c0.setInputMethodHints(QtCore.Qt.ImhHiddenText)\n self.c0.setObjectName(\"c0\")\n self.horizontalLayout_2.addWidget(self.c0)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setContentsMargins(2, -1, -1, -1)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_2.setObjectName(\"label_2\")\n self.horizontalLayout.addWidget(self.label_2)\n self.iter_num = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.iter_num.setObjectName(\"iter_num\")\n self.horizontalLayout.addWidget(self.iter_num)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.horizontalLayout_9 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_9.setObjectName(\"horizontalLayout_9\")\n self.label_10 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_10.setObjectName(\"label_10\")\n self.horizontalLayout_9.addWidget(self.label_10)\n self.lambda2 = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.lambda2.setObjectName(\"lambda2\")\n self.horizontalLayout_9.addWidget(self.lambda2)\n self.verticalLayout.addLayout(self.horizontalLayout_9)\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_5.setObjectName(\"label_5\")\n self.horizontalLayout_4.addWidget(self.label_5)\n self.nu = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.nu.setObjectName(\"nu\")\n self.horizontalLayout_4.addWidget(self.nu)\n self.verticalLayout.addLayout(self.horizontalLayout_4)\n self.level_set = QtWidgets.QPushButton(self.level_set_2)\n self.level_set.setGeometry(QtCore.QRect(10, 340, 113, 32))\n self.level_set.setObjectName(\"level_set\")\n self.stackedWidget.addWidget(self.level_set_2)\n self.rectify = QtWidgets.QWidget()\n self.rectify.setObjectName(\"rectify\")\n self.gen_polygon = QtWidgets.QPushButton(self.rectify)\n self.gen_polygon.setGeometry(QtCore.QRect(30, 80, 113, 32))\n self.gen_polygon.setObjectName(\"gen_polygon\")\n self.delete_polygon = QtWidgets.QPushButton(self.rectify)\n self.delete_polygon.setGeometry(QtCore.QRect(30, 190, 113, 32))\n self.delete_polygon.setObjectName(\"delete_polygon\")\n self.stackedWidget.addWidget(self.rectify)\n self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)\n self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(550, 100, 184, 31))\n self.horizontalLayoutWidget_2.setObjectName(\"horizontalLayoutWidget_2\")\n self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)\n self.horizontalLayout_10.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_10.setObjectName(\"horizontalLayout_10\")\n self.label_11 = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\n self.label_11.setObjectName(\"label_11\")\n self.horizontalLayout_10.addWidget(self.label_11)\n self.comboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget_2)\n self.comboBox.setObjectName(\"comboBox\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.horizontalLayout_10.addWidget(self.comboBox)\n self.stackedWidget.raise_()\n self.label_image.raise_()\n self.horizontalLayoutWidget_2.raise_()\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))\n self.menubar.setObjectName(\"menubar\")\n self.menufile = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\".SF NS Display\")\n font.setBold(True)\n font.setWeight(75)\n self.menufile.setFont(font)\n self.menufile.setObjectName(\"menufile\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n self.toolBar.setMovable(False)\n self.toolBar.setObjectName(\"toolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.actionopen = QtWidgets.QAction(MainWindow)\n self.actionopen.setObjectName(\"actionopen\")\n self.actionreset = QtWidgets.QAction(MainWindow)\n self.actionreset.setObjectName(\"actionreset\")\n self.menufile.addAction(self.actionopen)\n self.menufile.addSeparator()\n self.menufile.addAction(self.actionreset)\n self.menubar.addAction(self.menufile.menuAction())\n self.toolBar.addAction(self.actionopen)\n self.toolBar.addAction(self.actionreset)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label_image.setText(_translate(\"MainWindow\", \"select a picture\"))\n self.label.setText(_translate(\"MainWindow\", \"std decay\"))\n self.region_grow.setText(_translate(\"MainWindow\", \"region grow\"))\n self.std_decay.setText(_translate(\"MainWindow\", \"400\"))\n self.label_7.setText(_translate(\"MainWindow\", \" timestep:\"))\n self.time_step.setText(_translate(\"MainWindow\", \"0.1\"))\n self.label_6.setText(_translate(\"MainWindow\", \" epsilon:\"))\n self.epsilon.setText(_translate(\"MainWindow\", \"2.0\"))\n self.label_8.setText(_translate(\"MainWindow\", \" mu:\"))\n self.mu.setText(_translate(\"MainWindow\", \"1.0\"))\n self.label_9.setText(_translate(\"MainWindow\", \" lambda1:\"))\n self.lambda1.setText(_translate(\"MainWindow\", \"1.0\"))\n self.label_4.setText(_translate(\"MainWindow\", \" sigma:\"))\n self.sigma.setText(_translate(\"MainWindow\", \"3.0\"))\n self.label_3.setText(_translate(\"MainWindow\", \" steep:\"))\n self.c0.setText(_translate(\"MainWindow\", \"2.0\"))\n self.label_2.setText(_translate(\"MainWindow\", \" iter num:\"))\n self.iter_num.setText(_translate(\"MainWindow\", \"500\"))\n self.label_10.setText(_translate(\"MainWindow\", \" lambda2:\"))\n self.lambda2.setText(_translate(\"MainWindow\", \"1.0\"))\n self.label_5.setText(_translate(\"MainWindow\", \" nu:\"))\n self.nu.setText(_translate(\"MainWindow\", \"0.003\"))\n self.level_set.setText(_translate(\"MainWindow\", \"level set\"))\n self.gen_polygon.setText(_translate(\"MainWindow\", \"gen shape\"))\n self.delete_polygon.setText(_translate(\"MainWindow\", \"delete\"))\n self.label_11.setText(_translate(\"MainWindow\", \"method:\"))\n self.comboBox.setItemText(0, _translate(\"MainWindow\", \"region grow\"))\n self.comboBox.setItemText(1, _translate(\"MainWindow\", \"level set\"))\n self.comboBox.setItemText(2, _translate(\"MainWindow\", \"rectify\"))\n self.menufile.setTitle(_translate(\"MainWindow\", \"file\"))\n self.toolBar.setWindowTitle(_translate(\"MainWindow\", \"toolBar\"))\n self.actionopen.setText(_translate(\"MainWindow\", \"open\"))\n self.actionreset.setText(_translate(\"MainWindow\", \"reset\"))\n\n"
},
{
"alpha_fraction": 0.7560975551605225,
"alphanum_fraction": 0.7560975551605225,
"avg_line_length": 19.5,
"blob_id": "d2b5617fa18acbb7a910a9b5c9206b0be10bb17d",
"content_id": "87494bbd409b54353609f7fea15422d5dd576aca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 2,
"path": "/README.md",
"repo_name": "whyguu/lbf",
"src_encoding": "UTF-8",
"text": "# farm-detection\nlevel-set , region-grow\n"
},
{
"alpha_fraction": 0.5215475559234619,
"alphanum_fraction": 0.5508866310119629,
"avg_line_length": 34.78076934814453,
"blob_id": "a256fe456261b2e174fa8171a431c7761ec941f7",
"content_id": "55ff484a430193f95ae581a9db149a179124ba8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9379,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 260,
"path": "/algorithm/LBF.py",
"repo_name": "whyguu/lbf",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nfrom scipy.ndimage import filters\n# from osgeo import gdal, ogr, osr\n# from shapely.geometry import Polygon\nimport os\nimport shutil\nimport argparse\n\n\nclass LBF(object):\n\n def __init__(self, img, iter_num=500, c0=2, sigma=3.0, nu=0.003, mu=1.0, lambda1=1.0, lambda2=1.0, epsilon=2.0, time_step=0.1):\n self.lambda1 = lambda1\n self.lambda2 = lambda2\n self.iter_num = iter_num\n self.nu = nu*255*255\n self.mu = mu\n self.epsilon = epsilon\n self.sigma = sigma\n self.time_step = time_step\n self.c0 = c0\n self.img = img.copy()\n\n self.phi = None\n self.img_kernel = None\n self.one_kernel = None\n self.kernel_size = round(2*self.sigma)*2+1\n\n def public_data(self):\n r, c = self.img.shape\n self.phi = np.ones(self.img.shape) # level set function\n # _, self.phi = cv2.threshold(self.img, 0, 1, cv2.THRESH_OTSU)\n cv2.circle(self.phi, (c//2, r//2), radius=4, color=0, thickness=-1)\n\n self.phi = self.c0 * 2*(self.phi-0.5)\n self.img = np.float64(self.img)\n self.phi = np.float64(self.phi)\n\n cv2.imwrite('init.bmp', self.phi*255)\n '''\n with open(\"test.csv\", \"w\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(self.phi)\n '''\n self.img_kernel = cv2.GaussianBlur(self.img, (self.kernel_size, self.kernel_size), self.sigma, borderType=cv2.BORDER_CONSTANT)\n self.one_kernel = cv2.GaussianBlur(np.ones(self.img.shape, np.float64), (self.kernel_size, self.kernel_size), self.sigma, borderType=cv2.BORDER_CONSTANT)\n\n def level_set_evolution(self):\n self.public_data()\n\n for i in range(self.iter_num):\n # Neumann Boundary Condition\n self.neum_bound_cond()\n\n # dirac\n dirac_phi = self.dirac()\n\n # local binary fit\n f1, f2 = self.local_bin_fit()\n\n # data force\n lbf = self.data_force(f1, f2)\n\n # curvature central\n curv = self.curvature_central()\n\n # each item\n area_term = -dirac_phi * lbf\n len_term = self.nu * dirac_phi * curv\n\n laplace_operator_phi = filters.laplace(self.phi) # self.phi laplace operator\n penalty = self.mu * (laplace_operator_phi - curv)\n\n self.phi = self.phi + self.time_step * (area_term + penalty + len_term)\n\n # binary phi and then return\n rtn_phi = np.uint8(self.phi > 0)\n\n return rtn_phi*255\n\n def neum_bound_cond(self):\n r, c = self.phi.shape\n self.phi[np.ix_([0, -1], [0, -1])] = self.phi[np.ix_([2, -3], [2, -3])]\n self.phi[np.ix_([0, -1], np.arange(1, c-1))] = self.phi[np.ix_([2, -3], np.arange(1, c-1))]\n self.phi[np.ix_(np.arange(1, r-1), [0, -1])] = self.phi[np.ix_(np.arange(1, r-1), [2, -3])]\n\n def heaviside(self):\n heav_phi = 0.5 * (1+(2/3.1415926)*np.arctan(self.phi/self.epsilon))\n return heav_phi\n\n def dirac(self):\n dirac_phi = (self.epsilon/3.1415926)/(self.epsilon*self.epsilon+self.phi*self.phi)\n return dirac_phi\n\n def local_bin_fit(self):\n heav_phi = self.heaviside()\n image = self.img * heav_phi\n c1 = cv2.GaussianBlur(heav_phi, (self.kernel_size, self.kernel_size), self.sigma, borderType=cv2.BORDER_CONSTANT)\n c2 = cv2.GaussianBlur(image, (self.kernel_size, self.kernel_size), self.sigma, borderType=cv2.BORDER_CONSTANT)\n f1 = c2 / c1\n f2 = (self.img_kernel-c2) / (self.one_kernel-c1)\n return f1, f2\n\n def data_force(self, f1, f2):\n s1 = self.lambda1 * f1 * f1 - self.lambda2 * f2 * f2\n s2 = self.lambda1 * f1 - self.lambda2 * f2\n gs1 = cv2.GaussianBlur(s1, (self.kernel_size, self.kernel_size), self.sigma, borderType=cv2.BORDER_CONSTANT)\n gs2 = cv2.GaussianBlur(s2, (self.kernel_size, self.kernel_size), self.sigma, borderType=cv2.BORDER_CONSTANT)\n lbf = (self.lambda1-self.lambda2) * self.one_kernel * self.img * self.img + gs1 - 2.0 * self.img * gs2\n\n return lbf\n\n def curvature_central(self):\n ux, uy = np.gradient(self.phi)\n norm = np.sqrt(ux*ux + uy*uy + 1e-10)\n nx = ux / norm\n ny = uy / norm\n\n nxx, _ = np.gradient(nx)\n _, nyy = np.gradient(ny)\n\n curv = nxx + nyy\n return curv\n\n\nclass MyAlgorithm(object):\n def __init__(self):\n pass\n\n @staticmethod\n def douglas(con):\n def douglas_impl(dog, con, mark, start, end):\n # 函数出口\n if start == end - 1:\n return\n # 计算直线 Ax+By+C=0\n x1, y1 = con[start, 0]\n x2, y2 = con[end, 0]\n A = y2 - y1\n B = x1 - x2\n C = x2 * y1 - x1 * y2\n # 计算个点到直线的距离\n max_dist = 0\n for i in range(start + 1, end - 1):\n tp_dist = abs(A * con[i][0][0] + B * con[i][0][1] + C) / np.sqrt(pow(A, 2) + pow(B, 2))\n if tp_dist > max_dist:\n index = i\n max_dist = tp_dist\n\n if max_dist > dog:\n douglas_impl(dog, con, mark, start, index)\n douglas_impl(dog, con, mark, index, end)\n else:\n mark[start + 1:end, 0] = 0\n\n # con: a list which are generated by function cv2.findContours() of ndarrays\n dog = 1 # douglas parameter\n for i in range(len(con)):\n start = 0\n end = len(con[i]) - 1\n mark = np.ones((len(con[i]), 1))\n douglas_impl(dog, con[i], mark, start, end)\n con[i] = con[i][mark[:, 0] == 1]\n # return con\n\n @staticmethod\n def gen_shape_file(file_path, img_path, polygon, layer_name):\n # polygon: a list which are generated by function cv2.findContours() of ndarrays\n if not file_path or not img_path:\n print('the shape file path or the image path may be not accessible !')\n return\n # if os.path.exists(file_path):\n if os.path.exists(file_path) and len(os.listdir(file_path)) > 1:\n shutil.rmtree(file_path)\n\n driver = ogr.GetDriverByName('Esri Shapefile')\n ds = driver.CreateDataSource(file_path)\n\n layer = ds.CreateLayer(layer_name, None, ogr.wkbPolygon)\n # Add one attribute\n layer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))\n defn = layer.GetLayerDefn()\n dataset = gdal.Open(img_path)\n trans = dataset.GetGeoTransform()\n no_geo_info = 0\n img_row = 0\n if trans == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):\n img_row = dataset.RasterYSize\n no_geo_info = 1\n print('the chosen image may not contain geometry info , shape file will be useless !') ###########################\n feat = ogr.Feature(defn)\n for iter1 in range(len(polygon)):\n tp_poly = polygon[iter1][:, 0, :]\n poly = []\n if tp_poly.shape[0] < 3:\n continue\n for iter2 in range(tp_poly.shape[0]):\n # 此处应该得到 tp_poly[iter2, :] 对应的 地理坐标 ########################################\n cvx = tp_poly[iter2, 0] # opencv 中的 x\n cvy = tp_poly[iter2, 1] # opencv 中的 y\n if no_geo_info:\n cvy = img_row - tp_poly[iter2, 1] # opencv 中的 y\n\n px = trans[0] + cvx * trans[1] + cvy * trans[2]\n py = trans[3] + cvx * trans[4] + cvy * trans[5]\n\n poly.append((px, py))\n poly.append(poly[0])\n poly = Polygon(poly)\n\n # Create a new feature (attribute and geometry)\n feat.SetField('id', iter1)\n\n # Make a geometry, from Shapely object\n geom = ogr.CreateGeometryFromWkb(poly.wkb)\n feat.SetGeometry(geom)\n\n layer.CreateFeature(feat)\n feat = geom = None # destroy these\n # Save and close everything\n ds = layer = feat = geom = None\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-image_path', type=str, required=True)\n parser.add_argument('-file_path', type=str, required=True)\n return parser\n\n\nif __name__ == \"__main__\":\n # path = './mypictures/test01.jpg'\n # path = '/Users/whyguu/Desktop/WechatIMG10.jpeg'\n parser = arg_parser()\n args = parser.parse_args()\n path = args.image_path\n file_path = args.file_path\n\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n cont1 = []\n cont2 = []\n # roi_win = cv2.namedWindow('select ROI', cv2.WINDOW_NORMAL)\n\n # 1.evolution\n lbf = LBF(img=img, iter_num=500)\n bw = lbf.level_set_evolution()\n\n # cv2.imwrite('rst.bmp', bw)\n # 2.find contours\n _, contours1, _ = cv2.findContours(bw.copy(), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)\n _, contours2, _ = cv2.findContours(255 - bw, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)\n\n cont1 += contours1\n cont2 += contours2\n # 4.generate shape file\n MyAlgorithm.gen_shape_file(os.path.join(file_path, 'whitefile'), path, cont1, layer_name='white')\n MyAlgorithm.gen_shape_file(os.path.join(file_path, 'blackfile'), path, cont2, layer_name='black')\n\n print('finished!')\n\n\n"
},
{
"alpha_fraction": 0.546778678894043,
"alphanum_fraction": 0.5657027363777161,
"avg_line_length": 40.99106979370117,
"blob_id": "efb5f8bce4318602fe9b46328bc67a14d09f8752",
"content_id": "aedcc63be8fa80d09dbe29132ed99312418b3110",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9454,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 224,
"path": "/slots.py",
"repo_name": "whyguu/lbf",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nimport os\nfrom mainwindow import Ui_MainWindow\nfrom algorithm.LBF import LBF\nfrom algorithm.region_grow import RegionGrow\n\n\nclass FarmWindow(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(FarmWindow, self).__init__(parent)\n self.setupUi(self)\n self.menubar.setNativeMenuBar(False)\n # window event connect\n self.stackedWidget.setCurrentIndex(0)\n self.level_set.clicked.connect(self.exec_level_set)\n self.comboBox.currentIndexChanged['int'].connect(self.change_method)\n self.region_grow.clicked.connect(self.exec_region_grow)\n self.delete_polygon.clicked.connect(self.exec_delete_polygon)\n self.gen_polygon.clicked.connect(self.exec_gen_polygon)\n self.actionopen.triggered.connect(self.open_file)\n self.actionreset.triggered.connect(self.reset_image)\n # define properties\n self.img_path = ''\n self.img_intersect = None\n self.img_intersect_bak = None\n self.img = None\n self.img_mask = None\n self.pixel_map = None\n self.img_clicked_buffer = []\n self.polygon = []\n self.level_set_label = None\n\n # define slot function\n def open_file(self):\n self.img_path, img_type = QtWidgets.QFileDialog.getOpenFileName(None, \"Pick an image\", '/', \"*.png;;*.jpg;;*.jpeg;;*.bmp;;All Files (*)\")\n\n print(self.img_path)\n if self.img_path:\n self.img_intersect = cv2.imread(self.img_path, cv2.IMREAD_ANYCOLOR)\n if len(self.img_intersect.shape) == 3:\n self.img_intersect = cv2.cvtColor(self.img_intersect, cv2.COLOR_BGR2RGB)\n self.img = cv2.cvtColor(self.img_intersect, cv2.COLOR_BGR2GRAY)\n else:\n self.img = self.img_intersect.copy()\n self.img_intersect_bak = self.img_intersect.copy()\n self.img_mask = np.zeros(self.img.shape, np.uint8)\n self.level_set_label = np.zeros(self.img.shape, np.uint8)\n\n self.pixel_map = QtGui.QPixmap(self.img_path)\n self.label_image.setPixmap(self.pixel_map) # QPixmap=pixelmap 不行 见鬼了\n\n def reset_image(self):\n self.label_image.setPixmap(self.pixel_map) # QPixmap=pixelmap 不行 见鬼了\n self.img_clicked_buffer = []\n self.img_intersect = self.img_intersect_bak.copy()\n\n def change_method(self):\n haha = self.comboBox.currentIndex()\n # print(haha)\n self.stackedWidget.setCurrentIndex(haha)\n self.img_clicked_buffer = []\n\n def exec_level_set(self):\n print('execute level set')\n time_step = float(self.time_step.text())\n epsilon = float(self.epsilon.text())\n mu = float(self.mu.text())\n lambda1 = float(self.lambda1.text())\n lambda2 = float(self.lambda2.text())\n sigma = float(self.sigma.text())\n c0 = float(self.c0.text())\n iter_num = int(self.iter_num.text())\n nu = float(self.nu.text())\n # 1.evolution\n if self.img is None:\n QtWidgets.QMessageBox.information(self, # 使用infomation信息框\n \"warnning\",\n \"please select a image first !\",\n QtWidgets.QMessageBox.Yes) # | QtWidgets.QMessageBox.No)\n return\n\n lbf = LBF(img=self.img, iter_num=iter_num, c0=c0, sigma=sigma,\n nu=nu, mu=mu, lambda1=lambda1, lambda2=lambda2, epsilon=epsilon, time_step=time_step)\n bw = lbf.level_set_evolution()\n\n _, contours1, _ = cv2.findContours(bw.copy(), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)\n _, contours2, _ = cv2.findContours(255 - bw, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)\n\n contours1 = sorted(contours1, key=lambda x: x.shape[0])\n for ii in range(len(contours1)):\n if contours1[ii].shape[0] > 20:\n del contours1[0:ii]\n break\n contours2 = sorted(contours2, key=lambda x: x.shape[0])\n for ii in range(len(contours2)):\n if contours2[ii].shape[0] > 20:\n del contours2[0:ii]\n break\n\n cont1 = np.zeros(self.img.shape[0:2], dtype=np.uint8)\n cont2 = np.zeros(self.img.shape[0:2], dtype=np.uint8)\n\n for iter1 in range(len(contours1)):\n # print(iter1)\n tp = contours1[iter1]\n tp = np.squeeze(tp, axis=1)\n contours1[iter1] = tp\n cont1[tp[:, 1], tp[:, 0]] = 255\n for iter1 in range(len(contours2)):\n tp = contours2[iter1]\n tp = np.squeeze(tp, axis=1)\n contours2[iter1] = tp\n cont2[tp[:, 1], tp[:, 0]] = 255\n cv2.imwrite('/Users/whyguu/Desktop/huhu.jpg', cont1)\n cv2.imwrite('/Users/whyguu/Desktop/xixi.jpg', cont2)\n cv2.imwrite('/Users/whyguu/Desktop/haha.jpg', bw)\n\n for it in range(len(contours1)):\n cv2.fillPoly(self.level_set_label, [contours1[it]], color=it+1)\n # np.savez('xixi.npz', cn1=contours1[3], cn2=contours1[5])\n print('level_Set done !')\n cv2.imwrite('mimi.jpg', self.level_set_label)\n\n def exec_region_grow(self):\n # region grow and 划出结果\n if len(self.img_clicked_buffer) == 0:\n QtWidgets.QMessageBox.information(self, \"warnning\", \"please select points first !\", QtWidgets.QMessageBox.Yes)\n return\n region = self.img_clicked_buffer.copy()\n self.img_clicked_buffer.clear()\n std_decay = float(self.std_decay.text())\n print('execute region grow...')\n con = RegionGrow.region_grow(region, self.img, self.img_mask, std_decay)\n print('region grow done !')\n # #################\n for it in range(len(con)-1):\n cv2.line(self.img_intersect, (con[it][0], con[it][1]), (con[it+1][0], con[it+1][1]),\n (123, 34, 189))\n cv2.line(self.img_intersect, (con[0][0], con[0][1]), (con[-1][0], con[-1][1]),\n (123, 34, 189))\n # #################\n pixmap = self.fromNumpy2Pixmap(self.img_intersect)\n self.label_image.clear()\n self.label_image.setPixmap(pixmap)\n\n self.img_mask = np.zeros(self.img.shape, np.uint8)\n self.polygon.append(con)\n print(con)\n\n def exec_gen_polygon(self):\n # from numpy to QPixmap\n qimg = QtGui.QImage(self.img.data, self.img.shape[1], self.img.shape[0], self.img.shape[1],\n QtGui.QImage.Format_Indexed8)\n pixmap = QtGui.QPixmap.fromImage(qimg)\n self.label_image.setPixmap(pixmap)\n\n def exec_delete_polygon(self):\n pass\n\n def mouseReleaseEvent(self, QMouseEvent):\n if QMouseEvent.button() != QtCore.Qt.LeftButton:\n return\n qpoint = QMouseEvent.globalPos()\n # print(qpoint)\n\n # label_image process\n lb_img_pnt = self.label_image.mapFromGlobal(qpoint)\n lb_img_width = self.label_image.contentsRect().width()\n lb_img_height = self.label_image.contentsRect().height()\n # assert in the range of label\n if lb_img_pnt.x() in range(0, lb_img_width) and lb_img_pnt.y() in range(0, lb_img_height):\n if self.img is None:\n QtWidgets.QMessageBox.information(self, # 使用infomation信息框\n \"warnning\",\n \"please select a image first !\",\n QtWidgets.QMessageBox.Yes) # | QtWidgets.QMessageBox.No)\n return\n img_offset_x = (lb_img_width - self.label_image.pixmap().rect().width()) // 2\n img_offset_y = (lb_img_height - self.label_image.pixmap().rect().height()) // 2\n img_x = lb_img_pnt.x() - img_offset_x\n img_y = lb_img_pnt.y() - img_offset_y\n # assert in the range of image\n if img_x not in range(0, self.label_image.pixmap().rect().width()) or img_y not in range(0, self.label_image.pixmap().rect().height()):\n return\n\n self.img_clicked_buffer.append((img_y, img_x))\n r_min = max(img_y - 1, 0)\n r_max = min(img_y + 1, self.img.shape[0] - 1)\n c_min = max(img_x - 1, 0)\n c_max = min(img_x + 1, self.img.shape[1] - 1)\n for tp_r in range(r_min, r_max + 1):\n for tp_c in range(c_min, c_max + 1):\n self.img_intersect[tp_r, tp_c] = 255\n # repaint\n\n pixmap = self.fromNumpy2Pixmap(self.img_intersect)\n self.label_image.clear()\n self.label_image.setPixmap(pixmap)\n\n print(img_x)\n print(img_y)\n\n def fromNumpy2Pixmap(self, img):\n # from numpy to QPixmap\n if len(img.shape) == 3:\n h, w, c = img.shape\n img_format = QtGui.QImage.Format_RGB888\n else:\n h, w = img.shape\n c = 1\n img_format = QtGui.QImage.Format_Indexed8\n qimg = QtGui.QImage(img.data, w, h, c * w, img_format)\n pixmap = QtGui.QPixmap.fromImage(qimg)\n return pixmap\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n farm = FarmWindow()\n farm.show()\n sys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.47893568873405457,
"alphanum_fraction": 0.6053215265274048,
"avg_line_length": 16.038461685180664,
"blob_id": "5d8f9521cbb740a8a12ad72f0a962fe2ae115b65",
"content_id": "f1ae260d019db6bbb317bbb52440eb71efe3a6ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 26,
"path": "/clutter/test_fillPoly.py",
"repo_name": "whyguu/lbf",
"src_encoding": "UTF-8",
"text": "import cv2\nimport os\nimport numpy as np\n\nfilepath = 'xixi.jpg'\n\nmask = np.zeros((330, 380), np.uint8)\n\nct = np.load('xixi.npz')\n\n#print(ct['cn2'])\ncn2 = ct['cn2']\ncn1 = ct['cn1']\nprint(cn2.shape)\ncn2 = cn2.astype(np.int32)\n\n\n# mask[cn2[:,0,1], cn2[:, 0,0]] = 255\n\ncn = []\ncn.append(cn2)\ncn.append(cn1)\n\n# cn2 = np.array( [[[10,10],[100,10],[100,100],[10,100]]], dtype=np.int32 )\ncv2.fillPoly(mask, cn, color=255)\ncv2.imwrite('mimi.jpg', mask)\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.43491506576538086,
"alphanum_fraction": 0.4669010043144226,
"avg_line_length": 34.41493606567383,
"blob_id": "615d2fea26a0986a7da20a31c90cae6237aa3cb1",
"content_id": "2ea2b1b5229a31876d229e618adf2ab2c0979fc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8687,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 241,
"path": "/algorithm/region_grow.py",
"repo_name": "whyguu/lbf",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 7 16:38:11 2017\n\n@author: why\n\"\"\"\n\nimport numpy as np\nimport cv2\n\n\nclass RegionGrow(object):\n def __init__(self):\n pass\n\n @staticmethod\n def gen_seeds(region, img_mask):\n\n if len(region) >= 3:\n # 所选点大于等于3 执行求重心 然后选取种子点\n RegionGrow.generate_seed_points(region, img_mask)\n else:\n # 所选点小于3 则在周围3*3区域内采样种子点\n for i in range(len(region)):\n r_point = region[i]\n r_min = max(r_point[0] - 1, 0)\n r_max = min(r_point[0] + 1, img_mask.shape[0] - 1)\n c_min = max(r_point[1] - 1, 0)\n c_max = min(r_point[1] + 1, img_mask.shape[1] - 1)\n for r, c in zip([r_min, r_max, r_point[0], r_point[0]], [r_point[1], r_point[1], c_min, c_max]):\n if img_mask[r, c] != 255:\n region.append((r, c))\n img_mask[r, c] = 255\n\n @staticmethod\n def gravity_center1(points):\n length = len(points)\n area_points = np.zeros((length - 2, 3))\n x1, y1 = points[0]\n for i in range(length - 2):\n x2, y2 = points[i + 1]\n x3, y3 = points[i + 2]\n\n tp_r = (x1 + x2 + x3) / 3.0\n tp_c = (y1 + y2 + y3) / 3.0\n tp_area = 0.5 * np.abs((x1 * y2 - x2 * y1) + (x2 * y3 - x3 * y2) + (x3 * y1 - x1 * y3))\n\n area_points[i, :] = [tp_r, tp_c, tp_area]\n # print area_points, 'in gravity_center1'\n mass = np.sum(area_points[:, 2])\n r_mass = np.dot(area_points[:, 0], area_points[:, 2])\n c_mass = np.dot(area_points[:, 1], area_points[:, 2])\n r = r_mass / mass\n c = c_mass / mass\n return r, c\n\n @staticmethod\n def get_gauss_para(region, img):\n # transfer total total2 can speed up calculation\n sumx = 0.0\n sumx2 = 0.0\n length = len(region)\n if length == 0:\n return\n for i in range(length):\n sumx += img[region[i]]\n sumx2 += pow(img[region[i]], 2)\n mean = sumx / length\n std = np.sqrt((sumx2 - pow(mean, 2) * length) / (length - 1))\n return mean, std, sumx2, sumx\n\n @staticmethod\n def update_gauss_para(sumx2, sumx, region, counter, img):\n sumx2 += pow(img[region], 2)\n sumx += img[region]\n\n mean = 1.0 * sumx / counter\n try:\n std = np.sqrt((sumx2 - pow(mean, 2) * counter) / (counter - 1))\n except Exception:\n print('(sumx2 - pow(mean, 2) * counter', (sumx2 - pow(mean, 2) * counter))\n print('counter - 1)', counter - 1)\n return mean, std, sumx2, sumx\n\n @staticmethod\n def region_grow(region, img, img_mask, std_decay):\n count = len(region)\n print('count=', count)\n RegionGrow.gen_seeds(region, img_mask)\n counter = len(region)\n print('generate seeds successfully !')\n print('get gaussion para...')\n mean, std, sumx2, sumx = RegionGrow.get_gauss_para(region, img)\n print('get gaussion para done !')\n print(mean, std, sumx, sumx2)\n std_ratio = 2.0\n reg_min = mean - std_ratio * std\n reg_max = mean + std_ratio * std\n print('reg_max, reg_min:', reg_max, reg_min)\n # region grow\n i = 0\n print('enter while .... ')\n while len(region):\n rpoint = region.pop(0)\n # print(rpoint)\n i += 1\n r_min = max(rpoint[0] - 1, 0)\n r_max = min(rpoint[0] + 1, img.shape[0] - 1)\n c_min = max(rpoint[1] - 1, 0)\n c_max = min(rpoint[1] + 1, img.shape[1] - 1)\n\n for r, c in zip([r_min, r_max, rpoint[0], rpoint[0]], [rpoint[1], rpoint[1], c_min, c_max]):\n print('img[r, c] = ', img[r, c])\n if img[r, c] > reg_min and img[r, c] < reg_max and img_mask[r, c] != 255:\n region.append((r, c))\n img_mask[r, c] = 255\n counter += 1\n print('sdjhkskd',counter, img_mask[r, c])\n if i < count * img.shape[0] * img.shape[1] / std_decay:\n\n mean, std, sumx2, sumx = RegionGrow.update_gauss_para(sumx2, sumx, region[-1], counter, img)\n ww = std_ratio * std\n reg_min = mean - ww\n reg_max = mean + ww\n elif std_ratio > 2 / np.log(std):\n std_ratio -= 450.0 / img.shape[0] / img.shape[1]\n # std_ratio = 1.5\n ww = std_ratio * std\n reg_min = mean - ww\n reg_max = mean + ww\n print('out while !')\n cv2.imwrite('hehe.jpg', img_mask)\n con = RegionGrow.draw_region(img_mask)\n return con\n\n @staticmethod\n def generate_seed_points(points, img_mask):\n center = RegionGrow.gravity_center1(points)\n r, c = center\n img_len = np.sqrt(img_mask.shape[0] * img_mask.shape[1]) / 40\n for i in range(len(points)):\n point = points[i]\n if center[0] == point[0]:\n tp = np.abs(center[1] - point[1])\n if tp > img_len * 2:\n l = int(tp / img_len)\n tpmin = min(center[1], point[1])\n for ii in range(1, l):\n\n if img_mask[(int(point[0]), int(tpmin + tp * ii / l))] != 255:\n points.append((int(point[0]), int(tpmin + tp * ii / l)))\n img_mask[points[-1]] = 255\n continue\n\n k, b = RegionGrow.line_parameter(center, point)\n rx = min(r, point[0])\n rl = np.abs(r - point[0])\n p_dist = np.linalg.norm([rl, np.abs(c - point[1])])\n if p_dist > img_len * 2:\n l = int(p_dist / img_len)\n else:\n l = 3\n for ii in range(1, l):\n nr = rx + rl / l * ii\n nc = k * nr + b\n if img_mask[(int(nr), int(nc))] != 255:\n points.append((int(nr), int(nc)))\n img_mask[points[-1]] = 255\n if img_mask[(int(r), int(c))] != 255:\n points.append((int(r), int(c)))\n img_mask[points[-1]] = 255\n\n @staticmethod\n def draw_region(img_mask):\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n img_mask = cv2.morphologyEx(img_mask, cv2.MORPH_CLOSE, kernel, iterations=2)\n cv2.imwrite('haha.jpg', img_mask)\n # contour\n con = RegionGrow.get_contour(img_mask)\n print('con.shape = ', con.shape)\n # douglas(con)\n con = RegionGrow.douglas(con)\n print('con.shape = ', con.shape)\n return con\n\n @staticmethod\n def get_contour(img_mask):\n # 求最外层的边界\n _, con, _ = cv2.findContours(img_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # cv2.CHAIN_APPROX_TC89_KCOS\n # 可能求出多个边界 只要最大的那个\n max_val = con[0].shape[0]\n max_ind = 0\n for i in range(1, len(con)):\n if max_val < con[i].shape[0]:\n max_ind = i\n max_val = con[i].shape[0]\n con = con[max_ind][:, 0, :]\n return con\n\n @staticmethod\n def douglas_impl(dog, con, mark, start, end):\n # 函数出口\n if start == end - 1:\n return\n # 计算直线 Ax+By+C=0\n x1, y1 = con[start]\n x2, y2 = con[end]\n A = y2 - y1\n B = x1 - x2\n C = x2 * y1 - x1 * y2\n # 计算个点到直线的距离\n max_dist = 0\n for i in range(start + 1, end - 1):\n tp_dist = abs(A * con[i][0] + B * con[i][1] + C) / np.sqrt(pow(A, 2) + pow(B, 2))\n if tp_dist > max_dist:\n index = i\n max_dist = tp_dist\n\n if max_dist > dog:\n RegionGrow.douglas_impl(dog, con, mark, start, index)\n RegionGrow.douglas_impl(dog, con, mark, index, end)\n else:\n mark[start + 1:end, 0] = 0\n\n @staticmethod\n def douglas(con):\n dog = 15 # douglas parameter\n start = 0\n end = len(con) - 1\n mark = np.ones((len(con), 1))\n RegionGrow.douglas_impl(dog, con, mark, start, end)\n con = con[mark[:, 0] == 1, :]\n return con\n\n @staticmethod\n def line_parameter(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n k = 1.0 * (y2 - y1) / (x2 - x1)\n b = y1 - k * x1\n return k, b\n"
}
] | 6 |
zakhar-petukhov/MicroserviceFastAPI
|
https://github.com/zakhar-petukhov/MicroserviceFastAPI
|
f6ec023db9fe04ab1b78c1c799073ecd10d1daa3
|
da191014038cf2aeadbacb34fa75b28ab3433b52
|
02fd15036f40a3f4d1f97d5fde1bf5201390612a
|
refs/heads/master
| 2022-12-07T21:50:08.613028 | 2020-08-29T22:15:15 | 2020-08-29T22:15:15 | 291,356,712 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6245487332344055,
"alphanum_fraction": 0.6245487332344055,
"avg_line_length": 24.18181800842285,
"blob_id": "ff3abec5d7b1da5ce7b372ee608afef3b19439f7",
"content_id": "22626b4ea07b7da48fed9ac073c6e53ec3af97a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 11,
"path": "/main.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from common.app import get_app\nfrom offers import router as offers_router\nfrom users import router as users_router\n\napp = get_app(\n \"Web app\",\n routers=[\n {\"router\": users_router, \"prefix\": \"/user\"},\n {\"router\": offers_router, \"prefix\": \"/offer\"},\n ],\n)\n"
},
{
"alpha_fraction": 0.6621713042259216,
"alphanum_fraction": 0.6662171483039856,
"avg_line_length": 31.2391300201416,
"blob_id": "68eb1af3de5c94e25a4535f98e033a5618c7361a",
"content_id": "b59bcecf61fc2d84aaefa5c522bd416019883722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1483,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 46,
"path": "/users/views.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from fastapi import APIRouter, HTTPException\n\nfrom common import settings\nfrom common import utils as common_utils\n\nfrom . import models, schemes, utils\n\nrouter = APIRouter()\n\n\[email protected](\"/registry\", status_code=201, response_model=schemes.DisplayUser)\nasync def register_user(user: schemes.CreateUser):\n with common_utils.insert_model():\n return await utils.user_add_related(await models.User.create(**user.dict()))\n\n\[email protected](\"/auth\", response_model=schemes.AuthResponse)\nasync def authenticate_user(user_scheme: schemes.AuthUser):\n user = await models.User.query.where(\n models.User.username == user_scheme.username\n ).gino.first()\n if not user or not utils.verify_password(\n user_scheme.password, user.hashed_password\n ):\n raise HTTPException(401, detail=\"Unauthorized\")\n token_data = {\"sub\": user.username}\n return {\n \"id\": user.id,\n \"access_token\": utils.get_jwt_token(\n data=token_data,\n token_type=\"access\",\n expires_delta=settings.ACCESS_TOKEN_EXPIRE_MINUTES,\n ),\n \"refresh_token\": utils.get_jwt_token(\n data=token_data,\n token_type=\"refresh\",\n expires_delta=settings.REFRESH_TOKEN_EXPIRE_MINUTES,\n ),\n }\n\n\[email protected](\"/{user_id}\", response_model=schemes.DisplayUser)\nasync def get_user_data(user_id: int):\n return await utils.user_add_related(\n await common_utils.get_model(models.User, user_id)\n )\n"
},
{
"alpha_fraction": 0.6794354915618896,
"alphanum_fraction": 0.6794354915618896,
"avg_line_length": 30,
"blob_id": "aa6cd54a61db5f13c1fa278e19b9721832d651e5",
"content_id": "092f028a82810981e2c13e12c903f0b27c40e54d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 496,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 16,
"path": "/users/models.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from common.models import Column, Integer, String, db\nfrom users.utils import get_password_hash\n\n\nclass User(db.Model):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True, index=True)\n username = Column(String, unique=True)\n email = Column(String)\n hashed_password = Column(String)\n\n @classmethod\n async def create(cls, **kwargs):\n kwargs[\"hashed_password\"] = get_password_hash(kwargs.pop(\"password\", None))\n return await super().create(**kwargs)\n"
},
{
"alpha_fraction": 0.7128205299377441,
"alphanum_fraction": 0.7148718237876892,
"avg_line_length": 24.657894134521484,
"blob_id": "4d815c89c9f342e765e2a86582f5ba156233c631",
"content_id": "75580cb543062aa10c7b8d5461c13a3ebfc04893",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 975,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 38,
"path": "/users/utils.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from datetime import timedelta\n\nimport jwt\nfrom passlib.context import CryptContext\n\nfrom common import settings\nfrom common import utils as common_utils\n\npwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\n\n\ndef verify_password(plain_password, hashed_password):\n return pwd_context.verify(plain_password, hashed_password)\n\n\ndef get_password_hash(password):\n return pwd_context.hash(password)\n\n\ndef get_jwt_token(\n data, token_type=\"access\", expires_delta: timedelta = timedelta(minutes=15)\n):\n to_encode = data.copy()\n expire = common_utils.now() + expires_delta\n to_encode.update({\"exp\": expire, \"token_type\": token_type})\n encoded_jwt = jwt.encode(\n to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM\n )\n return encoded_jwt\n\n\nasync def user_add_related(user):\n from offers import models\n\n user.offers = await models.Offer.query.where(\n models.Offer.user_id == user.id\n ).gino.all()\n return user\n"
},
{
"alpha_fraction": 0.7081760764122009,
"alphanum_fraction": 0.7157232761383057,
"avg_line_length": 21.714284896850586,
"blob_id": "cdb450d199328d096bee8b65f1492433c4a6a9d4",
"content_id": "513dea7c57b22805cbdeea64873ca2491f11e0b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 795,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 35,
"path": "/common/utils.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "import importlib\nfrom contextlib import contextmanager\nfrom datetime import datetime, timezone\n\nimport asyncpg\nfrom fastapi import HTTPException\n\nfrom . import settings\n\n\ndef now():\n return datetime.now(timezone.utc)\n\n\ndef get_target_metadata():\n # Load all apps models\n for app in settings.APPS:\n module = importlib.import_module(f\"{app}.models\")\n # Any module's db object will contain all models\n return module.db\n\n\n@contextmanager\ndef insert_model():\n try:\n yield\n except asyncpg.exceptions.IntegrityConstraintViolationError as e:\n raise HTTPException(422, e.message)\n\n\nasync def get_model(model, model_id):\n model_obj = await model.get(model_id)\n if model_obj is None:\n raise HTTPException(404, detail=\"Not found\")\n return model_obj\n"
},
{
"alpha_fraction": 0.6932006478309631,
"alphanum_fraction": 0.7081260085105896,
"avg_line_length": 13.04651165008545,
"blob_id": "b44181abb58bf7b913a4f095203b4a022fa2c018",
"content_id": "2bae2e627009487bfe8f7ca11ce48e260c57b484",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 43,
"path": "/Makefile",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "GUNICORN = @gunicorn -c gunicorn.conf.py\n\n.PHONY: all $(MAKECMDGOALS)\n\nall: ci\n\nlint:\n\tflake8 --select=C --exit-zero\n\tflake8 --extend-ignore=C901\n\ncheckformat:\n\tblack --check .\n\tisort --check .\n\nformat:\n\tblack .\n\tisort .\n\nmigrate:\n\talembic upgrade head\n\nrollback:\n\talembic downgrade -1\n\nmigration:\n\talembic revision --autogenerate -m \"${MESSAGE}\"\n\ndev:\n\t@uvicorn --reload main:app\n\nproduction:\n\t$(GUNICORN) main:app\n\noffers:\n\t$(GUNICORN) offers:app\n\nusers:\n\t$(GUNICORN) users:app\n\ngeneratekeys:\n\t@echo \"JWT secret is\" `python3 -c \"import secrets; print(secrets.token_urlsafe(48))\"`\n\nci: checkformat lint"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 21,
"blob_id": "7b493038fdac8b5cc08ba08411b6f58d59443d4c",
"content_id": "8be4fb90654b1228c0870a2521688a3c5555e839",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 6,
"path": "/common/schemes.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from pydantic import BaseModel as PydanticBaseModel\n\n\nclass BaseModel(PydanticBaseModel):\n class Config:\n orm_mode = True\n"
},
{
"alpha_fraction": 0.5959183573722839,
"alphanum_fraction": 0.6169096231460571,
"avg_line_length": 31.980770111083984,
"blob_id": "a8652d66a92ef427c6f1c521d114e44ca0da39ee",
"content_id": "01a591a6184d6347faba00204b0ee14d77b03824",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1715,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 52,
"path": "/alembic/versions/9b4a6483c1e8_initial_revision.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "\"\"\"Initial revision\n\nRevision ID: 9b4a6483c1e8\nRevises:\nCreate Date: 2020-08-29 13:39:19.766359\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"9b4a6483c1e8\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"users\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"username\", sa.String(), nullable=True),\n sa.Column(\"email\", sa.String(), nullable=True),\n sa.Column(\"hashed_password\", sa.String(), nullable=True),\n sa.PrimaryKeyConstraint(\"id\", name=op.f(\"users_pkey\")),\n sa.UniqueConstraint(\"username\", name=op.f(\"users_username_key\")),\n )\n op.create_index(op.f(\"users_id_idx\"), \"users\", [\"id\"], unique=False)\n op.create_table(\n \"offers\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"title\", sa.String(), nullable=True),\n sa.Column(\"text\", sa.String(), nullable=True),\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(\n [\"user_id\"], [\"users.id\"], name=op.f(\"offers_user_id_users_fkey\")\n ),\n sa.PrimaryKeyConstraint(\"id\", name=op.f(\"offers_pkey\")),\n )\n op.create_index(op.f(\"offers_id_idx\"), \"offers\", [\"id\"], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"offers_id_idx\"), table_name=\"offers\")\n op.drop_table(\"offers\")\n op.drop_index(op.f(\"users_id_idx\"), table_name=\"users\")\n op.drop_table(\"users\")\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.8040322661399841,
"alphanum_fraction": 0.8048387169837952,
"avg_line_length": 32.98630142211914,
"blob_id": "bac94fcb6a9d5557df74bbdf6f7d73cc8a55a55e",
"content_id": "ae37878461cf51062b07b4e04a0a4765b5bd9b55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3904,
"license_type": "no_license",
"max_line_length": 200,
"num_lines": 73,
"path": "/README.md",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "# Web App\n\n## Использованные библиотеки\nПриложение написано на FastAPI, так как он прекрасно подходит для валидации данных и данной задачи.\n\nВ качестве ORM для работы с БД использован gino, для миграций-alembic.\n\nБлагодаря pydantic очень легко проводить валидацию отсутствующих полей.\n\npyjwt используется для генерации JWT токенов.\n\nuvicorn - ASGI сервер для разработки\n\ngunicorn - сервер для запуска в проде (с воркером от uvicorn)\n\npsycopg2-binary используется только для миграций\n\npyyaml использовался для генерации файла openapi.yaml\n\nemail-validator для валидации email адресов (для pydantic)\n\npasslib[bcrypt] для генерации паролей с помощью хэш-функции bcrypt.\n\n\n## Настройка и установка\n\n```bash\npip install -r requirements.txt\npip install -r dev-requirements.txt\npip install -r requirements/production.txt # для деплоя\nsudo -u postgres createdb webapp # создание базы createdb\nmake migrate # применение миграций\ncp .env.sample .env\n```\n\nДля работы приложения необходит секретный ключ JWT, он может быть сгенерирован с помощью\n\n`make generatekeys`\n\nЗатем, в файле `.env` замените JWT_SECRET_KEY=.... на свой ключ\n\nТак же в файле `.env` можно настроить CONNECTION URL для подключения к постгресу\n\n## Запуск\n\n`make dev` запускает сервер для разработки\n\n`make production` запускает сервер для прода, с двумя микросервисами работающими в одном приложении (ASGI mount)\n\n`make users` запускает микросервис users\n\n`make offers` запускает микросервис offers\n\n## Принятые решения и детали\n\nПриложение разделено на два отдельных микросервиса (offers и users).\nКаждое из них можно запустить вручную.\n\nЕсли запускать микросервисы отдельно, они не используют полный путь, например `/user/registry`. Они используют путь `/registry`. Это сделано для настройки префиксов через веб прокси (nginx, например).\n\nПри запуске сервера с двумя микросервисами сразу, они монтируются под префиками `/user` и `/offer` соответственно.\n\nВесь код отформатирован с помощью black и isort и проверен линтером flake8.\n\nДля запуска всех проверок можно использовать `make ci`.\n\n`make format` можно использовать для форматирования всего кода.\n\nВ проекте использован Makefile для упрощения запуска и настройки приложений.\n\nFastAPI генерирует openapi автоматически, поэтому файл openapi.yaml был так же сгенерирован автоматически.\n\nНастройка APPS в файле `common/settings.py` используется для автоматической загрузки всех моделей для использования автогенерации миграций (`make migration MESSAGE=\"new revision\"`)"
},
{
"alpha_fraction": 0.7300000190734863,
"alphanum_fraction": 0.7300000190734863,
"avg_line_length": 19,
"blob_id": "c66deb46a7485f1d28839eda7606a7dcffc1746d",
"content_id": "d2aaf736a3189be5b36ea38202ac33e189f3ef92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/users/__init__.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from common.app import get_app\n\nfrom .views import router\n\napp = get_app(\"Users\", routers=[router])\n"
},
{
"alpha_fraction": 0.6772983074188232,
"alphanum_fraction": 0.6772983074188232,
"avg_line_length": 19.5,
"blob_id": "c7dd3c6581677f359752ff5dce61c5eaef025356",
"content_id": "af6423db3250d8733c543914390598b42a042317",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 26,
"path": "/offers/schemes.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from typing import Optional\n\nfrom pydantic import root_validator\n\nfrom common.schemes import BaseModel\n\n\nclass CreateOffer(BaseModel):\n title: str\n text: str\n user_id: int\n\n\nclass Offer(CreateOffer):\n id: int\n\n\nclass OfferRequest(BaseModel):\n user_id: Optional[int]\n offer_id: Optional[int]\n\n @root_validator(pre=True)\n def ensure_data(cls, values):\n if not (\"user_id\" in values or \"offer_id\" in values):\n raise ValueError(\"Either of user_id, offer_id must be passed\")\n return values\n"
},
{
"alpha_fraction": 0.7213822603225708,
"alphanum_fraction": 0.7213822603225708,
"avg_line_length": 13.935483932495117,
"blob_id": "7d14ddccaabddc9cfc12bed94531c98844d30ccb",
"content_id": "68a55df8fbc5b41bda1377ea8bce016d3afa7063",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 31,
"path": "/users/schemes.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom pydantic import EmailStr\n\nfrom common.schemes import BaseModel\nfrom offers.schemes import Offer\n\n\nclass BaseUser(BaseModel):\n email: EmailStr\n username: str\n\n\nclass CreateUser(BaseUser):\n password: str\n\n\nclass DisplayUser(BaseUser):\n id: int\n offers: List[Offer]\n\n\nclass AuthUser(BaseModel):\n username: str\n password: str\n\n\nclass AuthResponse(BaseModel):\n id: int\n access_token: str\n refresh_token: str\n"
},
{
"alpha_fraction": 0.7326732873916626,
"alphanum_fraction": 0.7326732873916626,
"avg_line_length": 19.200000762939453,
"blob_id": "3ac05c654e9ace0e5fda767e601ae8202ee1f746",
"content_id": "9f9836bea7507a045fcd618e8660b183938d7318",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 5,
"path": "/offers/__init__.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from common.app import get_app\n\nfrom .views import router\n\napp = get_app(\"Offers\", routers=[router])\n"
},
{
"alpha_fraction": 0.7553957104682922,
"alphanum_fraction": 0.7553957104682922,
"avg_line_length": 16.375,
"blob_id": "088d0c7c6ad6a987d200a71ccf103048418c1062",
"content_id": "6150fc60abaf09d32b58a1939ff98173edfafc1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 8,
"path": "/common/models.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from .db import db\n\n# shortcuts\nColumn = db.Column\nInteger = db.Integer\nString = db.String\nBoolean = db.Boolean\nForeignKey = db.ForeignKey\n"
},
{
"alpha_fraction": 0.7133995294570923,
"alphanum_fraction": 0.7171216011047363,
"avg_line_length": 31.239999771118164,
"blob_id": "8d9fcf6b63ee1e4fd27fb994bc8c6da13cc489dc",
"content_id": "4471f0f4ad3951c795e2b58df46600583e05953e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 806,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 25,
"path": "/offers/views.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from typing import List, Union\n\nfrom fastapi import APIRouter\n\nfrom common import utils as common_utils\n\nfrom . import models, schemes\n\nrouter = APIRouter()\n\n\[email protected](\"/create\", status_code=201, response_model=schemes.Offer)\nasync def create_offer(offer: schemes.CreateOffer):\n with common_utils.insert_model():\n return await models.Offer.create(**offer.dict())\n\n\[email protected](\"/\", response_model=Union[schemes.Offer, List[schemes.Offer]])\nasync def get_offer(offer_request: schemes.OfferRequest):\n if offer_request.offer_id is not None:\n return await common_utils.get_model(models.Offer, offer_request.offer_id)\n if offer_request.user_id is not None:\n return await models.Offer.query.where(\n models.Offer.user_id == offer_request.user_id\n ).gino.all()\n"
},
{
"alpha_fraction": 0.6873949766159058,
"alphanum_fraction": 0.7042016983032227,
"avg_line_length": 23.79166603088379,
"blob_id": "a5481d714b07acb6cc9988d26947f28f14a82d3b",
"content_id": "bf8f9bda01d107fa7df04e79f12a4e37113c4cc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 595,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 24,
"path": "/common/settings.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from datetime import timedelta\n\nfrom starlette.config import Config\n\nconfig = Config(\".env\")\n\n# In format postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}\nCONNECTION_URL = config(\"CONNECTION_URL\", cast=str)\n\n# JWT\nSECRET_KEY = config(\"JWT_SECRET_KEY\", cast=str)\nACCESS_TOKEN_EXPIRE_MINUTES = timedelta(\n minutes=config(\"JWT_EXPIRE\", cast=int, default=15)\n)\nREFRESH_TOKEN_EXPIRE_MINUTES = timedelta(\n minutes=config(\"JWT_REFRESH_EXPIRE\", cast=int, default=60 * 24 * 7)\n)\n\nALGORITHM = config(\"JWT_ALGORITHM\", cast=str, default=\"HS256\")\n\n\n# Apps\n\nAPPS = [\"users\", \"offers\"]\n"
},
{
"alpha_fraction": 0.6383970379829407,
"alphanum_fraction": 0.6402609348297119,
"avg_line_length": 28,
"blob_id": "cc36afb5fc87185c73bbcb6b4445fdd6c0df9b10",
"content_id": "653b8a779e6be46ab90a764be98af3ce2a93eb7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 37,
"path": "/common/app.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "import functools\nimport io\n\nimport yaml\nfrom fastapi import FastAPI, Response\n\nfrom common import settings\nfrom common.db import db\n\n\ndef get_app(title, routers=[], mounts={}):\n main_app = FastAPI(title=title, version=\"1.0\", docs_url=\"/\", redoc_url=\"/redoc\")\n\n @main_app.get(\"/openapi.yaml\", include_in_schema=False)\n @functools.lru_cache()\n def read_openapi_yaml():\n openapi_json = main_app.openapi()\n yaml_s = io.StringIO()\n yaml.dump(openapi_json, yaml_s, sort_keys=False, allow_unicode=True)\n return Response(yaml_s.getvalue(), media_type=\"text/yaml\")\n\n for router in routers:\n prefix = \"\"\n router_to_include = router\n if isinstance(router, dict):\n router_to_include = router[\"router\"]\n prefix = router[\"prefix\"]\n main_app.include_router(router_to_include, prefix=prefix)\n\n for path, app in mounts.items():\n main_app.mount(path, app)\n\n @main_app.on_event(\"startup\")\n async def startup():\n await db.set_bind(settings.CONNECTION_URL)\n\n return main_app\n"
},
{
"alpha_fraction": 0.6967741847038269,
"alphanum_fraction": 0.6967741847038269,
"avg_line_length": 27.18181800842285,
"blob_id": "294cd7240aeb1ce01043201c4a90e1ceb055d48b",
"content_id": "c5806d6518e4258dd1b07aa00fdd39c3734426d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 310,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 11,
"path": "/offers/models.py",
"repo_name": "zakhar-petukhov/MicroserviceFastAPI",
"src_encoding": "UTF-8",
"text": "from common.models import Column, ForeignKey, Integer, String, db\nfrom users.models import User\n\n\nclass Offer(db.Model):\n __tablename__ = \"offers\"\n\n id = Column(Integer, primary_key=True, index=True)\n title = Column(String)\n text = Column(String)\n user_id = Column(Integer, ForeignKey(User.id))\n"
}
] | 18 |
amaozhao/Python-Real-World-Machine-Learning
|
https://github.com/amaozhao/Python-Real-World-Machine-Learning
|
25a9ce7dee2bc4d611fb045c5d9ad53f43458a2a
|
ffd78fe289a5b301790c48f32db5e6abba78fff1
|
8afa721b3fb730cbd0a910989eead08174d6f5a2
|
refs/heads/master
| 2021-01-21T15:44:50.778470 | 2017-05-11T14:05:00 | 2017-05-11T14:05:00 | 81,575,685 | 0 | 0 | null | 2017-02-10T14:59:45 | 2016-12-30T20:04:57 | 2016-10-17T10:18:09 | null |
[
{
"alpha_fraction": 0.6002120971679688,
"alphanum_fraction": 0.6134676337242126,
"avg_line_length": 24.486486434936523,
"blob_id": "6d5d0ef235219ddf67d8a1f08f72ee58001fb646",
"content_id": "0a276f33ef71d7aaf38875daa49cc70d66c4d4c1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1886,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 74,
"path": "/Module 1/Chapter 3/svm.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report\n\nimport utilities\n\n# Load input data\ninput_file = 'data_multivar.txt'\nX, y = utilities.load_data(input_file)\n\n###############################################\n# Separate the data into classes based on 'y'\nclass_0 = np.array([X[i] for i in range(len(X)) if y[i] == 0])\nclass_1 = np.array([X[i] for i in range(len(X)) if y[i] == 1])\n\n# Plot the input data\nplt.figure()\nplt.scatter(\n class_0[:, 0],\n class_0[:, 1],\n facecolors='black',\n edgecolors='black',\n marker='s'\n)\nplt.scatter(\n class_1[:, 0],\n class_1[:, 1],\n facecolors='None',\n edgecolors='black',\n marker='s'\n)\nplt.title('Input data')\n\n###############################################\n# Train test split and SVM training\n\nX_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.25,\n random_state=5\n)\n\nparams = {'kernel': 'linear'}\n# params = {'kernel': 'poly', 'degree': 3}\n# params = {'kernel': 'rbf'}\nclassifier = SVC(**params)\nclassifier.fit(X_train, y_train)\nutilities.plot_classifier(classifier, X_train, y_train, 'Training dataset')\n\ny_test_pred = classifier.predict(X_test)\nutilities.plot_classifier(classifier, X_test, y_test, 'Test dataset')\n\n###############################################\n# Evaluate classifier performance\n\ntarget_names = ['Class-' + str(int(i)) for i in set(y)]\nprint (\"\\n\" + \"#\" * 30)\nprint (\"\\nClassifier performance on training dataset\\n\")\nprint (classification_report(\n y_train,\n classifier.predict(X_train),\n target_names=target_names)\n)\nprint (\"#\" * 30 + \"\\n\")\n\nprint (\"#\" * 30)\nprint (\"\\nClassification report on test dataset\\n\")\nprint (classification_report(y_test, y_test_pred, target_names=target_names))\nprint (\"#\" * 30 + \"\\n\")\n\nplt.show()\n"
},
{
"alpha_fraction": 0.5975428223609924,
"alphanum_fraction": 0.6265822649002075,
"avg_line_length": 26.97916603088379,
"blob_id": "ac2fc2c35acb2ccb30f2328ec6dcab7b18dbe8dc",
"content_id": "dfd0f3dae2c35b2bf40487edd7868847fc12d348",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2686,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 96,
"path": "/Module 1/Chapter 2/income.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import (\n train_test_split,\n cross_val_score\n)\n\ninput_file = 'adult.data.txt'\n\n# Reading the data\nX = []\ny = []\ncount_lessthan50k = 0\ncount_morethan50k = 0\nnum_images_threshold = 30000\nwith open(input_file, 'r') as f:\n for line in f.readlines():\n if '?' in line:\n continue\n\n data = line[:-1].split(', ')\n\n if data[-1] == '<=50K' and count_lessthan50k < num_images_threshold:\n X.append(data)\n count_lessthan50k = count_lessthan50k + 1\n\n elif data[-1] == '>50K' and count_morethan50k < num_images_threshold:\n X.append(data)\n count_morethan50k = count_morethan50k + 1\n\n if (\n count_lessthan50k >= num_images_threshold) and (\n count_morethan50k >= num_images_threshold):\n break\n\nX = np.array(X)\n\n# Convert string data to numerical data\nlabel_encoder = []\nX_encoded = np.empty(X.shape)\nfor i, item in enumerate(X[0]):\n if item.isdigit():\n X_encoded[:, i] = X[:, i]\n else:\n label_encoder.append(preprocessing.LabelEncoder())\n X_encoded[:, i] = label_encoder[-1].fit_transform(X[:, i])\n\nX = X_encoded[:, :-1].astype(int)\ny = X_encoded[:, -1].astype(int)\n\n# Build a classifier\nclassifier_gaussiannb = GaussianNB()\nclassifier_gaussiannb.fit(X, y)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=5)\nclassifier_gaussiannb = GaussianNB()\nclassifier_gaussiannb.fit(X_train, y_train)\ny_test_pred = classifier_gaussiannb.predict(X_test)\n\n# compute F1 score of the classifier\nf1 = cross_val_score(\n classifier_gaussiannb,\n X, y,\n scoring='f1_weighted', cv=5\n)\nprint (\"F1 score: \" + str(round(100 * f1.mean(), 2)) + \"%\")\n\n# Testing encoding on single data instance\ninput_data = [\n '39', 'State-gov',\n '77516', 'Bachelors',\n '13', 'Never-married',\n 'Adm-clerical', 'Not-in-family',\n 'White', 'Male',\n '2174', '0',\n '40', 'United-States']\ncount = 0\ninput_data_encoded = [-1] * len(input_data)\nfor i, item in enumerate(input_data):\n if item.isdigit():\n input_data_encoded[i] = int(input_data[i])\n else:\n input_data_encoded[i] = int(\n label_encoder[count].transform(\n np.array([input_data[i]]).ravel()))\n count = count + 1\n\ninput_data_encoded = np.array(input_data_encoded).reshape(-1, 1)\n\n# Predict and print output for a particular datapoint\noutput_class = classifier_gaussiannb.predict(input_data_encoded)\nprint (label_encoder[-1].inverse_transform(output_class)[0])\n"
},
{
"alpha_fraction": 0.6257206201553345,
"alphanum_fraction": 0.6456762552261353,
"avg_line_length": 25.52941131591797,
"blob_id": "ec164246ddcb9f46fb2cbdbac8eeeffc21bf82cf",
"content_id": "b82a6bd4e75a46994644db9547361bfb0e87c001",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2281,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 85,
"path": "/Module 1/Chapter 2/naive_bayes.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport numpy as np\n# import matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom logistic_regression import plot_classifier\n# from sklearn import cross_validation\n# sklearn 0.18 以后cross_validation模块会修改在.20版本后弃用\nfrom sklearn.model_selection import train_test_split, cross_val_score\n\ninput_file = 'data_multivar.txt'\n\nX = []\ny = []\nwith open(input_file, 'r') as f:\n for line in f.readlines():\n data = [float(x) for x in line.split(',')]\n X.append(data[:-1])\n y.append(data[-1])\n\nX = np.array(X)\ny = np.array(y)\n\nclassifier_gaussiannb = GaussianNB()\nclassifier_gaussiannb.fit(X, y)\ny_pred = classifier_gaussiannb.predict(X)\n\n# compute accuracy of the classifier\naccuracy = 100.0 * (y == y_pred).sum() / X.shape[0]\nprint (\"Accuracy of the classifier =\", round(accuracy, 2), \"%\")\n\nplot_classifier(classifier_gaussiannb, X, y)\n\n###############################################\n# Train test split\n\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=5)\nclassifier_gaussiannb_new = GaussianNB()\nclassifier_gaussiannb_new.fit(X_train, y_train)\ny_test_pred = classifier_gaussiannb_new.predict(X_test)\n\n# compute accuracy of the classifier\naccuracy = 100.0 * (y_test == y_test_pred).sum() / X_test.shape[0]\nprint (\"Accuracy of the classifier =\", round(accuracy, 2), \"%\")\n\nplot_classifier(classifier_gaussiannb_new, X_test, y_test)\n\n###############################################\n# Cross validation and scoring functions\n\nnum_validations = 5\naccuracy = cross_val_score(\n classifier_gaussiannb,\n X, y,\n scoring='accuracy',\n cv=num_validations\n)\nprint (\"Accuracy: \" + str(round(100 * accuracy.mean(), 2)) + \"%\")\n\nf1 = cross_val_score(\n classifier_gaussiannb,\n X, y,\n scoring='f1_weighted',\n cv=num_validations\n)\nprint (\"F1: \" + str(round(100 * f1.mean(), 2)) + \"%\")\n\nprecision = cross_val_score(\n classifier_gaussiannb,\n X, y,\n scoring='precision_weighted',\n cv=num_validations\n)\nprint (\"Precision: \" + str(round(100 * precision.mean(), 2)) + \"%\")\n\nrecall = cross_val_score(\n classifier_gaussiannb,\n X, y,\n scoring='recall_weighted',\n cv=num_validations\n)\nprint (\"Recall: \" + str(round(100 * recall.mean(), 2)) + \"%\")\n"
},
{
"alpha_fraction": 0.7203883528709412,
"alphanum_fraction": 0.7313916087150574,
"avg_line_length": 26.105262756347656,
"blob_id": "8c423e55e2c3b492d4c5c3382f9f9426bc5dbaf2",
"content_id": "5df67f41265868a36eb6359939928acbed08a4b6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1545,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 57,
"path": "/Module 1/Chapter 5/pipeline.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "from sklearn.datasets import samples_generator\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectKBest, f_regression\nfrom sklearn.pipeline import Pipeline\n\n# generate sample data\nX, y = samples_generator.make_classification(\n n_informative=4,\n n_features=20,\n n_redundant=0,\n random_state=5\n)\n\n# Feature selector\nselector_k_best = SelectKBest(f_regression, k=10)\n\n# Random forest classifier\nclassifier = RandomForestClassifier(n_estimators=50, max_depth=4)\n\n# Build the machine learning pipeline\npipeline_classifier = Pipeline(\n [\n ('selector', selector_k_best),\n ('rf', classifier)\n ]\n)\n\n# We can set the parameters using the names we assigned\n# earlier. For example, if we want to set 'k' to 6 in the\n# feature selector and set 'n_estimators' in the Random\n# Forest Classifier to 25, we can do it as shown below\npipeline_classifier.set_params(\n selector__k=6,\n rf__n_estimators=25\n)\n\n# Training the classifier\npipeline_classifier.fit(X, y)\n\n# Predict the output\nprediction = pipeline_classifier.predict(X)\nprint (\"Predictions:\", prediction)\n\n# Print score\nprint (\"Score:\", pipeline_classifier.score(X, y))\n\n# Print the selected features chosen by the selector\nfeatures_status = pipeline_classifier.named_steps['selector'].get_support()\nselected_features = []\nfor count, item in enumerate(features_status):\n if item:\n selected_features.append(count)\n\nprint (\n \"Selected features (0-indexed):\",\n ', '.join([str(x) for x in selected_features])\n)\n"
},
{
"alpha_fraction": 0.5564082860946655,
"alphanum_fraction": 0.5809670090675354,
"avg_line_length": 21.465517044067383,
"blob_id": "f30edaf75a4acfeecd78df8300ecfec08a6c98ee",
"content_id": "1075a0e8c8b10682d2b792d9ae9ef622df43e9a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1303,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 58,
"path": "/Module 1/Chapter 3/svm_confidence.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nimport utilities\n\n# Load input data\ninput_file = 'data_multivar.txt'\nX, y = utilities.load_data(input_file)\n\n###############################################\n# Train test split\n\nX_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.25,\n random_state=5\n)\n\nparams = {'kernel': 'rbf'}\nclassifier = SVC(**params)\nclassifier.fit(X_train, y_train)\n\n###############################################\n# Measure distance from the boundary\n\ninput_datapoints = np.array(\n [\n [2, 1.5],\n [8, 9],\n [4.8, 5.2],\n [4, 4],\n [2.5, 7],\n [7.6, 2],\n [5.4, 5.9]\n ]\n)\nprint (\"\\nDistance from the boundary:\")\nfor i in input_datapoints:\n print (i, '-->', classifier.decision_function(i.reshape(1, -1))[0])\n\n# Confidence measure\nparams = {'kernel': 'rbf', 'probability': True}\nclassifier = SVC(**params)\nclassifier.fit(X_train, y_train)\nprint (\"\\nConfidence measure:\")\nfor i in input_datapoints:\n print (i, '-->', classifier.predict_proba(i.reshape(1, -1))[0])\n\nutilities.plot_classifier(\n classifier,\n input_datapoints,\n [0] * len(input_datapoints),\n 'Input datapoints',\n 'True'\n)\nplt.show()\n"
},
{
"alpha_fraction": 0.5821388959884644,
"alphanum_fraction": 0.6218302249908447,
"avg_line_length": 21.674999237060547,
"blob_id": "631d7e7983d60cb9abc73b66fa03f20ce2522425",
"content_id": "d2a40c9bf99051d409cf16835872e11c758109f0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 939,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 40,
"path": "/Module 1/Chapter 5/function_composition.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport numpy as np\nfrom functools import reduce\n\n\ndef add3(input_array):\n return map(lambda x: x + 3, input_array)\n\n\ndef mul2(input_array):\n return map(lambda x: x * 2, input_array)\n\n\ndef sub5(input_array):\n return map(lambda x: x - 5, input_array)\n\n\ndef function_composer(*args):\n '''这里类似函数嵌套, 从里到外顺序执行'''\n return reduce(lambda f, g: lambda x: f(g(x)), args)\n\n\nif __name__ == '__main__':\n arr = np.array([2, 5, 4, 7])\n\n print (\"Operation: add3(mul2(sub5(arr)))\")\n\n arr1 = add3(arr)\n arr2 = mul2(arr1)\n arr3 = sub5(arr2)\n print (\"Output using the lengthy way:\", list(arr3))\n\n func_composed = function_composer(sub5, mul2, add3)\n print (\"Output using function composition:\", list(func_composed(arr)))\n\n print (\n \"Operation: sub5(add3(mul2(sub5(mul2(arr)))))\\nOutput:\",\n list(function_composer(mul2, sub5, mul2, add3, sub5)(arr))\n )\n"
},
{
"alpha_fraction": 0.6378662586212158,
"alphanum_fraction": 0.6829451322555542,
"avg_line_length": 31.463415145874023,
"blob_id": "d736a30a14b45e90c6d659a4226b1b4efe67cad3",
"content_id": "94eec0483b256617efdbbc5419934daf252a28ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1481,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 41,
"path": "/Module 1/Chapter 1/preprocessing.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport numpy as np\nfrom sklearn import preprocessing\n\ndata = np.array(\n [\n [3, -1.5, 2, -5.4],\n [0, 4, -0.3, 2.1],\n [1, 3.3, -1.9, -4.3]\n ]\n)\n\n# mean removal(去除均值)\ndata_standardized = preprocessing.scale(data)\nprint (\"\\nStandardize(标准化) =\", data_standardized)\nprint (\"\\nMean(均值) =\", data_standardized.mean(axis=0))\nprint (\"Std deviation(标准方差) =\", data_standardized.std(axis=0))\n\n# min max scaling(极值标准化法)\n# 参数 feature_range 表示最小, 最大值\ndata_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\ndata_scaled = data_scaler.fit_transform(data)\nprint (\"\\nMin max scaled data(极值标准化):\\n\", data_scaled)\n\n# normalization(正则化)\ndata_normalized1 = preprocessing.normalize(data, norm='l1')\ndata_normalized2 = preprocessing.normalize(data, norm='l2')\nprint (\"\\nL1 normalized data1:\\n\", data_normalized1)\nprint (\"\\nL2 normalized data2:\\n\", data_normalized2)\n\n# binarization(二值化)\n# threshold 的意思是: 阀值(小于时设置为0, 大于时设置为1)\ndata_binarized = preprocessing.Binarizer(threshold=1.4).transform(data)\nprint (\"\\nBinarized data(二值化):\\n\", data_binarized)\n\n# one hot encoding(独热编码或一位有效编码)\nencoder = preprocessing.OneHotEncoder()\nencoder.fit([[0, 2, 1, 12], [1, 3, 5, 3], [2, 3, 2, 12], [1, 2, 4, 3]])\nencoded_vector = encoder.transform([[2, 3, 5, 3]]).toarray()\nprint (\"\\nEncoded vector(独热编码):\\n\", encoded_vector)\n"
},
{
"alpha_fraction": 0.5407925248146057,
"alphanum_fraction": 0.5967366099357605,
"avg_line_length": 26.677419662475586,
"blob_id": "f02e87f6161fabe3ac1b29d72bde46022cfcfb7f",
"content_id": "be71dce11afe46af7258ea690ef3cb101d8459ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 858,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 31,
"path": "/Module 1/Chapter 2/simple_classifier.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# input data\nX = np.array([[3, 1], [2, 5], [1, 8], [6, 4], [5, 2], [3, 5], [4, 7], [4, -1]])\n\n# labels\ny = [0, 1, 1, 0, 0, 1, 1, 0]\n\n# separate the data into classes based on 'y'\nclass_0 = np.array([X[i] for i in range(len(X)) if y[i] == 0])\nclass_1 = np.array([X[i] for i in range(len(X)) if y[i] == 1])\n\n# plot input data\nplt.figure()\nplt.scatter(class_0[:, 0], class_0[:, 1], color='blue', marker='s')\nplt.scatter(class_1[:, 0], class_1[:, 1], color='black', marker='x')\n\n# draw the separator line\nline_x = range(10)\nline_y = line_x\n\n# plot labeled data and separator line\nplt.figure()\nplt.scatter(class_0[:, 0], class_0[:, 1], color='blue', marker='s')\nplt.scatter(class_1[:, 0], class_1[:, 1], color='black', marker='x')\nplt.plot(line_x, line_y, color='red', linewidth=3)\n\nplt.show()\n"
},
{
"alpha_fraction": 0.7520372271537781,
"alphanum_fraction": 0.7520372271537781,
"avg_line_length": 26.70967674255371,
"blob_id": "5306a7454d9812807cb98cce2e95b42806938d70",
"content_id": "6a22ebf720de6b609168848510fb41a9d8d0194c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 859,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 31,
"path": "/Module 1/Chapter 6/tokenizer.py",
"repo_name": "amaozhao/Python-Real-World-Machine-Learning",
"src_encoding": "UTF-8",
"text": "from nltk.tokenize import sent_tokenize\nfrom nltk.tokenize import word_tokenize\n# from nltk.tokenize import PunktWordTokenizer\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer\nfrom nltk.tokenize import WordPunctTokenizer\n\ntext = (\"Are you curious about tokenization? \" +\n \"Let's see how it works! \" +\n \"We need to analyze a couple of \" +\n \"sentences with punctuations to see it in action.\")\n\n\nsent_tokenize_list = sent_tokenize(text)\nprint (\"Sentence tokenizer:\")\nprint (sent_tokenize_list)\n\n\nprint (\"Word tokenizer:\")\nprint (word_tokenize(text))\n\n# Create a new punkt word tokenizer\n\n\npunkt_sent_tokenizer = PunktSentenceTokenizer()\nprint (\"Punkt word tokenizer:\")\nprint (punkt_sent_tokenizer.tokenize(text))\n\n\nword_punct_tokenizer = WordPunctTokenizer()\nprint (\"Word punct tokenizer:\")\nprint (word_punct_tokenizer.tokenize(text))\n"
}
] | 9 |
lorecioni/WordHints
|
https://github.com/lorecioni/WordHints
|
e843ead304563d5750ad40421514e946cc1c1845
|
b3b4155469c037631d0f6a5bff9eebfcd08790ad
|
323c340f032403fc8f8b295e5ed06ede4316635f
|
refs/heads/master
| 2020-04-13T11:49:10.750948 | 2013-09-08T09:01:07 | 2013-09-08T09:01:07 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4496487081050873,
"alphanum_fraction": 0.4625292718410492,
"avg_line_length": 21.473684310913086,
"blob_id": "30feac64a5f3b9b4a9dbafea1d60de822897c3dc",
"content_id": "f9d4fa79fcf9e6d73d56325dba69fc3de57ffb1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 38,
"path": "/correttore.py",
"repo_name": "lorecioni/WordHints",
"src_encoding": "UTF-8",
"text": "#Created on 08/nov/2012\n\nfrom editDistance import editDistance\nfrom vocabolario import Vocabolario\n\ndef checkError(x, v):\n if x:\n l = len(v)\n found = False\n i = 0\n while (found == False and i < l):\n if v[i] == x:\n found = True\n else:\n i = i +1\n if found == True:\n print 'Parola corretta!'\n else:\n best = 100\n for j in range(len(v)):\n distance = editDistance(x, v[j])\n if(distance < best):\n best = distance\n word = j\n print \"Forse cercavi '\"+ v[word] + \"'\" \n\ndef testInput():\n x = raw_input(\"Inserisci una parola: \")\n print \"Parola inserita '\" + x +\"'\"\n checkError(x, V)\n\n\n\n\n'''Test'''\nglobal V\nV = Vocabolario()\ntestInput()\n"
},
{
"alpha_fraction": 0.45954692363739014,
"alphanum_fraction": 0.46278315782546997,
"avg_line_length": 21.095237731933594,
"blob_id": "2e4e5d37e575b12ade5fd4d574a00983c532bfcb",
"content_id": "2f219ef470b76fede3a2f54f525f52283dd2e96c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 927,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 42,
"path": "/vocabolario.py",
"repo_name": "lorecioni/WordHints",
"src_encoding": "UTF-8",
"text": "from data import*\n\nclass Vocabolario:\n\n def __init__(self):\n self.V = initVocabulary()\n self.count = len(self.V)\n \n def addWord(self, w):\n r = self.findWord(\"DEL\")\n if r == -1:\n self.V.append(w)\n else:\n self.V[r] = w\n \n\n def findWord(self, w):\n for i in range(self.count):\n if self.V[i] == w:\n return i\n return -1\n\n def delWord(self, w):\n r = self.findWord(w)\n if r != -1:\n self.V[r]= \"DEL\"\n else:\n print \"Parola non presente nel dizionario!\"\n\n def printVoc(self):\n print \"Vocabolario\"\n for i in range(self.count):\n print str(self.V[i])\n\n def __len__(self):\n return len(self.V) \n\n def __setitem__(self, w): \n self.addWord(w)\n\n def __getitem__(self, w): # supports v = T[k]\n return self.V[w]"
},
{
"alpha_fraction": 0.7761732935905457,
"alphanum_fraction": 0.7761732935905457,
"avg_line_length": 38.57143020629883,
"blob_id": "93c3e5d4f036404152adc45a14f01441e191ded7",
"content_id": "2bbd73eb77591473b1446e07cd7718dafca0d82c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 278,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 7,
"path": "/README.md",
"repo_name": "lorecioni/WordHints",
"src_encoding": "UTF-8",
"text": "WordHints\n=========\n\nCorrettore di parole in Python. \n\nIl programma prende in input una parola e, tramite l'algoritmo di edit-distance, verifica se la parola è corretta o meno. In caso di errore fornisce la parola che probabilmente stavamo cercando.\nTodo: aggiornare wordlist.\n"
},
{
"alpha_fraction": 0.5349462628364563,
"alphanum_fraction": 0.5349462628364563,
"avg_line_length": 20.441177368164062,
"blob_id": "d82fe285c821ed00468e8c2e93f7d3502c02bae9",
"content_id": "c0e6b746561f28954f0a6c4ff01bb80463e6eb65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 744,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 34,
"path": "/data.py",
"repo_name": "lorecioni/WordHints",
"src_encoding": "UTF-8",
"text": "\ndef initVocabulary():\n V = []\n V.append(\"albero\")\n V.append(\"ciao\")\n V.append(\"casa\")\n V.append(\"baule\")\n V.append(\"cane\")\n V.append(\"dado\")\n V.append(\"dito\")\n V.append(\"erba\")\n V.append(\"elefante\")\n V.append(\"farfalla\")\n V.append(\"gioco\")\n V.append(\"grande\")\n V.append(\"hotel\")\n V.append(\"ira\")\n V.append(\"istrice\")\n V.append(\"limone\")\n V.append(\"moto\")\n V.append(\"nuoto\")\n V.append(\"nave\")\n V.append(\"parco\")\n V.append(\"paolo\")\n V.append(\"quadro\")\n V.append(\"rissa\")\n V.append(\"restare\")\n V.append(\"sala\")\n V.append(\"sana\")\n V.append(\"tavolo\")\n V.append(\"tenda\")\n V.append(\"troppo\")\n V.append(\"uva\")\n V.append(\"vino\")\n return V\n \n \n"
},
{
"alpha_fraction": 0.4036770462989807,
"alphanum_fraction": 0.43085530400276184,
"avg_line_length": 25.617021560668945,
"blob_id": "25e09099794da71d8612f9f93a5588ae0d462c93",
"content_id": "6b0c3105498dd3debd52e3c94141dda4f9e9010f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1251,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 47,
"path": "/editDistance.py",
"repo_name": "lorecioni/WordHints",
"src_encoding": "UTF-8",
"text": "def editDistance(x, y):\n m = len(x)\n n = len(y)\n\n # print 'Distanza da \"' + x +'\" a \"' + y + '\":'\n\n cost = {}\n cost['copy'] = 0 #Costo per la copia\n cost['replace'] = 1 #Costo per la sostituzione\n cost['twiddle'] = 1 #Costo per lo scambio\n cost['delete'] = 3 #Costo per la cancellazione\n cost['insert'] = 1 #Costo per l'inserimento\n\n c = [range(n + 1)] * (m + 1)\n\n for i in range(m + 1):\n c[i] = range(i,i + n + 1)\n\n\n\n for i in range(1,m +1):\n for j in range(1,n +1):\n if x[i-1] == y[j-1]:\n c[i][j] = minimum(c[i-1][j] + cost['delete'], c[i][j-1] + cost['insert'], c[i-1][j-1] + cost['copy'])\n \n elif (i>=2 and j>=2 and x[i-1] == y[j-2] and x[i-2] == y[j-1]):\n c[i][j] = minimum((c[i-2][j-2] + cost['twiddle']),(c[i][j-1] + cost['delete']), (c[i-1][j] + cost['insert']))\n \n else:\n c[i][j] = minimum(c[i][j-1] + cost['insert'], c[i-1][j] + cost['delete'], c[i-1][j-1] + cost['replace'])\n\n\n # for i in range(m+1):\n # print c[i]\n\n # print 'Costo: ',c[m][n]\n\n return c[m][n]\n \n\ndef minimum(x, y, z):\n m = x\n if y < m:\n m = y\n if z < m:\n m = z\n return m\n"
}
] | 5 |
thimabru1010/Homework1_ELE2765
|
https://github.com/thimabru1010/Homework1_ELE2765
|
be9401bcc0b7cd009865b6de4c9ba1e83a747744
|
1d7431f6197bc8bde1f9baad89cdd944a551cf69
|
f0def71b00957ba47c7804c7316f62ae1639ae84
|
refs/heads/master
| 2022-06-05T17:52:28.120485 | 2022-04-24T21:35:50 | 2022-04-24T21:35:50 | 256,780,031 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6174063086509705,
"alphanum_fraction": 0.6448370218276978,
"avg_line_length": 27.399999618530273,
"blob_id": "7777a899bf9f2bf84bf8b0a1f9a61bb88fbd19b3",
"content_id": "0decbf1973633f3521b1760d43f6e0e5db90185c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6969,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 245,
"path": "/train_classificator_final.py",
"repo_name": "thimabru1010/Homework1_ELE2765",
"src_encoding": "UTF-8",
"text": "\n#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nfrom keras.layers import Input, Dense, Conv2D, MaxPool2D, Flatten, Dropout, BatchNormalization, Activation\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nimport keras\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\n\nimport os\nfrom tqdm import tqdm\nimport json\n\n\n# In[10]:\n\n\ndef VGG_11(input_shape, classes):\n activation_f = 'relu'\n momemtum = 0.8\n input_img = Input(shape=input_shape)\n # Block 1\n # layer 1\n x = Conv2D(32, (3,3), strides=1, activation='linear', padding='same')(input_img)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n x = MaxPool2D(pool_size=(2,2), strides=2, padding='same')(x)\n # Block 2\n #layer 2\n x = Conv2D(64, (3,3), strides=1, activation='linear', padding='same')(x)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n x = MaxPool2D(pool_size=(2,2), strides=2, padding='same')(x)\n # Block 3\n #layer 3\n x = Conv2D(128, (3,3), strides=1, activation='linear', padding='same')(x)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n #layer 4\n x = Conv2D(128, (3,3), strides=1, activation='linear', padding='same')(x)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n x = MaxPool2D(pool_size=(2,2), strides=2, padding='same')(x)\n # Block 4\n #layer 5\n x = Conv2D(256, (3,3), strides=1, activation='linear', padding='same')(x)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n #layer 6\n x = Conv2D(256, (3,3), strides=1, activation='linear', padding='same')(x)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n x = MaxPool2D(pool_size=(2,2), strides=2, padding='same')(x)\n # Block 5\n # layer 7\n x = Conv2D(256, (3,3), strides=1, activation='linear', padding='same')(x)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n #layer 8\n x = Conv2D(256, (3,3), strides=1, activation='linear', padding='same')(x)\n x = BatchNormalization(momentum=momemtum)(x)\n x = Activation(activation_f)(x)\n x = MaxPool2D(pool_size=(2,2), strides=2, padding='same')(x)\n \n #layer 9\n #fully-connected layer\n x = Flatten()(x)\n \n #layer 10\n x = Dense(512, activation='linear')(x)\n x = Activation(activation_f)(x)\n #layer 11\n x = Dense(512, activation='linear')(x) \n x = Activation(activation_f)(x)\n x = Dropout(0.2)(x)\n \n # softmax layer (output)\n pred = Dense(classes, activation='softmax')(x)\n \n model = Model(input_img , pred)\n \n return model\n \n\n\n# In[3]:\n\n\ndef graph_training_history(history):\n acc_train = history['accuracy']\n acc_test = history['val_accuracy']\n loss_train = history['loss']\n loss_test = history['val_loss']\n #print(acc_train, acc_test, loss_train, loss_test)\n plt.rcParams['axes.facecolor']='white'\n plt.figure(1)\n\n # summarize history for accuracy\n plt.subplot(121)\n plt.plot(acc_train)\n plt.plot(acc_test)\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.tight_layout()\n\n # summarize history for loss\n plt.subplot(122)\n plt.plot(loss_train)\n plt.plot(loss_test)\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper right')\n plt.tight_layout()\n \n plt.show()\n\n\n# In[4]:\n\n\n# Augmentations\ntrain_aug=ImageDataGenerator(rescale=1./255,\n height_shift_range=0.2, featurewise_center=0,\n horizontal_flip=True, vertical_flip=True,\n zoom_range=0.3)\n\n# No Augmentations\n#train_aug = ImageDataGenerator(rescale=1./255)\n\n#Paths dos datasets\ntrain_folder = \"UCMERCED_HW_SceneClassification/data/separated_in_labels_aug/train\"\nval_folder = \"UCMERCED_HW_SceneClassification/data/separated_in_labels_aug/val\"\n\n# train_folder = \"UCMERCED_HW_SceneClassification/data/separated_in_labels/train\"\n# val_folder = \"UCMERCED_HW_SceneClassification/data/separated_in_labels/val\"\n\n\n# In[14]:\n\n\n# Definina o batch-size e tamanho da imagem de entrada\nbatch_size = 32\ninput_shape = (256, 256, 3)\n(w, h, _) = input_shape\nprint(w,h)\ntrain_generator = train_aug.flow_from_directory(train_folder, \n target_size=(w, h),batch_size=batch_size, \n class_mode='categorical', shuffle=True, seed=42)\n\nval_generator = train_aug.flow_from_directory(val_folder, \n target_size=(w, h), batch_size=batch_size, \n class_mode='categorical', shuffle=True, seed=42)\n\n\n# In[16]:\n\n\n# Pega o tamanho total de imagens no dataset\nfilenames = train_generator.filenames\nsamples = len(filenames)\nprint(samples)\n\n\n# In[17]:\n\n\n# Define o Early Stopping e salva o melhor modelo durante o treino\nfile_name = 'best_model.h5'\ncheckpointer = ModelCheckpoint(file_name, monitor='val_accuracy', save_best_only=True)\nearly_stop = EarlyStopping(monitor = 'val_accuracy', min_delta = 0.001,\nmode = 'max', patience = 10)\ncallbacks=[checkpointer,early_stop]\n\n\n# In[18]:\n\n\n# Define o número de classes, número de épocas e o learning rate e o decaimento usado\nclasses = 21\nepochs = 100\nsteps_in_epoch = samples // batch_size\nlr = 1e-3\nadam = Adam(learning_rate = lr, decay=0.01)\nnet = VGG_11(input_shape, classes)\nnet.compile(loss = 'categorical_crossentropy', optimizer=adam , metrics=['accuracy'])\nnet.summary()\n\nhistory = net.fit_generator(train_generator, steps_per_epoch=steps_in_epoch, epochs=epochs, \n validation_data=val_generator, validation_steps=1, \n verbose=1,callbacks=callbacks)\n\n\n# In[19]:\n\n\n# Mostra o gráfico ao fim do treino\ngraph_training_history(history.history)\n\n\n# In[20]:\n\n\n# Salva o modelo ao final do treino (não o melhor) e salva a história do treinamento em formato de string\nnet.save('weights/model_END.h5')\n# Saving history\nwith open('weights/history_model.json', 'w') as f:\n json.dump(str(net.history.history), f)\n\n\n# In[21]:\n\n\n\ntest_folder = \"UCMERCED_HW_SceneClassification/data/test_separated\"\ntest_aug = ImageDataGenerator(rescale=1./255)\n\ntest_generator = test_aug.flow_from_directory(test_folder, \n target_size=(w, h), batch_size=batch_size, \n class_mode='categorical', shuffle=True, seed=42)\nscoreSeg = net.evaluate_generator(test_generator, batch_size)\n\n\n# In[22]:\n\n\nprint(net.metrics_names)\nprint(scoreSeg)\n\n\n# In[26]:\n\n\nfrom keras.utils import plot_model\nplot_model(net, to_file='model.png')\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.7598684430122375,
"alphanum_fraction": 0.7960526347160339,
"avg_line_length": 75,
"blob_id": "998594e704f81c14136f485886b49e2834bae837",
"content_id": "5a16348bff6a14712cd61a023416f1d2c2f81f72",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 304,
"license_type": "permissive",
"max_line_length": 199,
"num_lines": 4,
"path": "/README.md",
"repo_name": "thimabru1010/Homework1_ELE2765",
"src_encoding": "UTF-8",
"text": "# Homework 1 ELE2765\nHomework 1 of Deep Learning subject on Electrical Engineering Master's at PUC-Rio\n\nThe Objective of this Homework is to train a classificator in UC Merced Land Use Dataset. To solve this task, I implemented and trained a VGG11 in tensorflow 2.x (keras) and obtained 70% of accuracy.\n"
}
] | 2 |
onocy/doma
|
https://github.com/onocy/doma
|
24dabe6dd9a1b5c294f28664689dbf66b40313c0
|
be001f961648b7c7879ae1934d322f2d3310e78a
|
0fb62888072d22abb958a1e66ecb423d4090ef43
|
refs/heads/master
| 2021-09-10T09:40:09.644247 | 2017-12-13T05:36:58 | 2017-12-13T05:36:58 | 103,565,222 | 1 | 1 | null | 2017-09-14T18:05:11 | 2017-11-28T00:15:45 | 2017-12-13T05:42:58 |
Python
|
[
{
"alpha_fraction": 0.5586307644844055,
"alphanum_fraction": 0.5833940505981445,
"avg_line_length": 30.204545974731445,
"blob_id": "8d25641d8c497726f66acf1c74c0333969e8614c",
"content_id": "db0679eae740d6bccd780dda629206d279c81b1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1373,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 44,
"path": "/cleanslate/doma/migrations/0022_auto_20171212_1219.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-12 17:19\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0021_auto_20171212_1155'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='event',\n name='deadline',\n ),\n migrations.AddField(\n model_name='event',\n name='end_time',\n field=models.DateTimeField(default=django.utils.timezone.now, help_text='When is this event going to end?'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='event',\n name='home',\n field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='doma.Home'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='event',\n name='start_time',\n field=models.DateTimeField(default=django.utils.timezone.now, help_text='When is this event going to start?'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='event',\n name='created_on',\n field=models.DateTimeField(),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5675182342529297,
"alphanum_fraction": 0.6277372241020203,
"avg_line_length": 25.095237731933594,
"blob_id": "30e76b02fc40830857c6d8bba99c538278f4d9eb",
"content_id": "c2a0133bd082b29356d4a1405e3cc9ddd8f31173",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 548,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 21,
"path": "/cleanslate/doma/migrations/0023_auto_20171212_1230.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-12 17:30\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0022_auto_20171212_1219'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='home',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='doma.Home'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5038167834281921,
"alphanum_fraction": 0.5877862572669983,
"avg_line_length": 19.6842098236084,
"blob_id": "3620f456c2b5aa27d2bc85c37c1ab171f0776c81",
"content_id": "7ec85653b96dcb3ae9c2dee1d6e1fe596b204651",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 393,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 19,
"path": "/cleanslate/doma/migrations/0017_remove_forum_created_on.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-11 22:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0016_auto_20171211_1729'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='forum',\n name='created_on',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5667275786399841,
"alphanum_fraction": 0.6270566582679749,
"avg_line_length": 25.047618865966797,
"blob_id": "f09bcc1a358410abb8d62c6372cacc94741d4638",
"content_id": "88623c85c0984a07df7bc92ad47bcb633bd88310",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 21,
"path": "/cleanslate/doma/migrations/0003_auto_20171101_1659.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-11-01 20:59\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0002_auto_20171101_1657'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='home',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='doma.Home'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.680497944355011,
"alphanum_fraction": 0.680497944355011,
"avg_line_length": 35.07692337036133,
"blob_id": "4bbefbd1d3652f9c86b23cf6c0abb50364c19a89",
"content_id": "a2fd2a4ada6ec94dea5670ae3f4fd2c2925b7321",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 13,
"path": "/cleanslate/cleanslate/middleware/last_seen_middleware.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "from django.utils import timezone\r\nfrom doma.models import Profile\r\n\r\nclass UpdateLastActivityMiddleware(object):\r\n def __init__(self, get_response):\r\n self.get_response = get_response\r\n\r\n def __call__(self, request):\r\n return self.get_response(request)\r\n\r\n def process_view(self, request, view_func, view_args, view_kwargs):\r\n if request.user.is_authenticated():\r\n Profile.objects.filter(user=request.user).update(lastSeen=timezone.now())\r\n"
},
{
"alpha_fraction": 0.5572031736373901,
"alphanum_fraction": 0.5678103566169739,
"avg_line_length": 50.04419708251953,
"blob_id": "0997231c7b42f4de4ecf21f8ddb95c3303aac2c4",
"content_id": "c17323a9d3a61d95026c18286352d571b289d980",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9239,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 181,
"path": "/cleanslate/doma/migrations/0001_initial.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-11-01 20:48\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Chore',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a chore name', max_length=200)),\n ('description', models.CharField(help_text='Enter description', max_length=500)),\n ('created_on', models.DateField()),\n ('deadline', models.DateField(help_text='When is this chore due?')),\n ],\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter an event name', max_length=200)),\n ('description', models.CharField(help_text='Enter description', max_length=500)),\n ('created_on', models.DateField()),\n ('deadline', models.DateField(help_text='When is this event going to occur?')),\n ],\n ),\n migrations.CreateModel(\n name='Forum',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a forum name', max_length=200)),\n ('description', models.TextField(help_text='Enter a description for this forum', max_length=1000)),\n ('created_on', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Home',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Enter your Home Name', max_length=100)),\n ('address', models.CharField(help_text='Enter your Address', max_length=100, null=True)),\n ('leaseStart', models.DateTimeField(blank=True, null=True)),\n ('leaseEnds', models.DateTimeField()),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a post name', max_length=200)),\n ('content', models.CharField(help_text='Enter content', max_length=500)),\n ('created_on', models.DateTimeField()),\n ],\n ),\n migrations.CreateModel(\n name='Reminder',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a reminder name', max_length=200)),\n ('description', models.CharField(help_text='Enter description', max_length=500)),\n ('created_on', models.DateField()),\n ('deadline', models.DateField(help_text='When is this reminder due?')),\n ],\n ),\n migrations.CreateModel(\n name='Review',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ],\n ),\n migrations.CreateModel(\n name='Topic',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a topic name', max_length=200)),\n ('content', models.CharField(max_length=500)),\n ('created_on', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Transaction',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a transaction name', max_length=200)),\n ('description', models.CharField(help_text='Enter description', max_length=500)),\n ('created_on', models.DateField()),\n ('deadline', models.DateField(help_text='When is this transaction due?')),\n ('amount', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(blank=True, help_text='Enter your password', max_length=100, null=True)),\n ('first_name', models.CharField(max_length=100)),\n ('last_name', models.CharField(max_length=100)),\n ('phone', models.CharField(blank=True, help_text='Enter your phone number', max_length=10, null=True)),\n ('yog', models.CharField(blank=True, help_text='Enter your graduation date', max_length=100, null=True)),\n ('major', models.CharField(blank=True, max_length=100, null=True)),\n ('role', models.CharField(choices=[('a', 'Admin'), ('u', 'User')], default='u', max_length=1)),\n ('status', models.TextField(help_text='Enter a status for others to view', max_length=1000)),\n ('bio', models.TextField(help_text='Enter a brief description of yourself', max_length=1000)),\n ('smokes', models.BooleanField(default=False, help_text='Do you smoke cigarettes?')),\n ('bedtime', models.TimeField(blank=True, help_text='What is your usual sleep-time?', null=True)),\n ('lastSeen', models.DateField(blank=True, null=True)),\n ('email', models.EmailField(help_text='Enter your email', max_length=254)),\n ('pet_allergies', models.NullBooleanField(help_text='Are you allergic to pets?')),\n ('home', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='doma.Home')),\n ],\n ),\n migrations.CreateModel(\n name='Village',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a village name', max_length=200)),\n ('forum', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='doma.Forum')),\n ('home', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='village_home', to='doma.Home')),\n ],\n ),\n migrations.AddField(\n model_name='topic',\n name='created_by',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='doma.User'),\n ),\n migrations.AddField(\n model_name='topic',\n name='forum',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='doma.Forum'),\n ),\n migrations.AddField(\n model_name='review',\n name='reviewed',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviewed_user', to='doma.User'),\n ),\n migrations.AddField(\n model_name='review',\n name='reviewedBy',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviewer', to='doma.User'),\n ),\n migrations.AddField(\n model_name='post',\n name='created_by',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='op', to='doma.User'),\n ),\n migrations.AddField(\n model_name='post',\n name='topic',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='doma.Topic'),\n ),\n migrations.AddField(\n model_name='home',\n name='createdBy',\n field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='home_creator', to='doma.User'),\n ),\n migrations.AddField(\n model_name='home',\n name='forum',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='doma.Forum'),\n ),\n migrations.AddField(\n model_name='home',\n name='village',\n field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='home_village', to='doma.Village'),\n ),\n migrations.AddField(\n model_name='forum',\n name='created_by',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='doma.User'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5817282795906067,
"alphanum_fraction": 0.5960437059402466,
"avg_line_length": 45.85365676879883,
"blob_id": "3c58b2f45bd2f33ff3c1fb65f06f39b86965692d",
"content_id": "6075770d7410065a5009fd895e457944e0119a5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3842,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 82,
"path": "/cleanslate/doma/migrations/0009_auto_20171127_1803.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-11-27 23:03\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('doma', '0008_auto_20171101_2002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('phone', models.CharField(blank=True, help_text='Enter your phone number', max_length=10, null=True)),\n ('yog', models.CharField(blank=True, help_text='Enter your graduation date', max_length=100, null=True)),\n ('major', models.CharField(blank=True, max_length=100, null=True)),\n ('role', models.CharField(choices=[('a', 'Admin'), ('u', 'User')], default='u', max_length=1)),\n ('status', models.TextField(help_text='Enter a status for others to view', max_length=1000)),\n ('bio', models.TextField(blank=True, help_text='Enter a brief description of yourself', max_length=1000)),\n ('smokes', models.BooleanField(default=False, help_text='Do you smoke cigarettes?')),\n ('bedtime', models.TimeField(blank=True, help_text='What is your usual sleep-time?', null=True)),\n ('lastSeen', models.DateField(null=True)),\n ('email', models.EmailField(blank=True, help_text='Enter your email', max_length=254)),\n ('pet_allergies', models.NullBooleanField(help_text='Are you allergic to pets?')),\n ],\n ),\n migrations.RemoveField(\n model_name='user',\n name='home',\n ),\n migrations.AlterField(\n model_name='forum',\n name='created_by',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='forum_created_by', to='doma.Profile'),\n ),\n migrations.AlterField(\n model_name='home',\n name='created_by',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='home_created_by', to='doma.Profile'),\n ),\n migrations.AlterField(\n model_name='post',\n name='created_by',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='op', to='doma.Profile'),\n ),\n migrations.AlterField(\n model_name='review',\n name='reviewed',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviewed_user', to='doma.Profile'),\n ),\n migrations.AlterField(\n model_name='review',\n name='reviewedBy',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviewer', to='doma.Profile'),\n ),\n migrations.AlterField(\n model_name='topic',\n name='created_by',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='doma.Profile'),\n ),\n migrations.DeleteModel(\n name='User',\n ),\n migrations.AddField(\n model_name='profile',\n name='home',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='doma.Home'),\n ),\n migrations.AddField(\n model_name='profile',\n name='user',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5321375131607056,
"alphanum_fraction": 0.5814648866653442,
"avg_line_length": 25.760000228881836,
"blob_id": "d6d9b44f60450d663a75c7a0f27c574bfdb0fafd",
"content_id": "c3ce36d90fecfe9ff0b818f62b6f74d1387d87d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 669,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 25,
"path": "/cleanslate/doma/migrations/0024_auto_20171212_1232.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-12 17:32\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0023_auto_20171212_1230'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='end_time',\n field=models.DateField(help_text='When is this event going to end?'),\n ),\n migrations.AlterField(\n model_name='event',\n name='start_time',\n field=models.DateField(help_text='When is this event going to start?'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6402016282081604,
"alphanum_fraction": 0.6472904682159424,
"avg_line_length": 45.67647171020508,
"blob_id": "05130f357106f1478431be04bdeb9d6fe58201f3",
"content_id": "f5ed4149eb67383d5ab9e2c1da7e4c01e5031151",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6348,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 136,
"path": "/cleanslate/doma/forms.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.core.files.images import get_image_dimensions\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nimport datetime\nfrom .models import User, Home, Forum, Topic, Chore, Event, Profile # Village, Transaction, Review, Reminder, Post,\nfrom markdownx.fields import MarkdownxFormField\n\nclass EditProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['phone', 'yog', 'major', 'bio', 'status']\n HOMES = [(None, '')]\n for home in Home.objects.all():\n HOMES += [(home.id, home.name)]\n home = forms.ChoiceField(help_text='Which home do you want to be in?', choices=HOMES, required=False)\n avatar = forms.ImageField(required=False)\n\n def clean_avatar(self):\n if self.cleaned_data['avatar']:\n avatar = self.cleaned_data['avatar']\n try:\n w, h = get_image_dimensions(avatar)\n max_width = max_height = 250\n if w > max_width or h > max_height:\n raise forms.ValidationError(\n \"Please use an image that is {} x {} pixels or smaller.\".format(max_width, max_height)\n )\n main, sub = avatar.content_type.split('/')\n if not (main == 'image' and sub in ['jpeg', 'gif', 'png']):\n raise forms.ValidationError(\n \"Please use a JPEG, GIF or PNG image.\"\n )\n if len(avatar) > (1000 * 1024):\n raise forms.ValidationError(\n u'Avatar file size may not exceed 50k.')\n except AttributeError:\n \"\"\"\n Handles case when we are updating the user profile\n and do not supply a new avatar\n \"\"\"\n pass\n return avatar\n else:\n return None\n\nclass EditChoreForm(forms.Form):\n title = forms.CharField(help_text = 'Enter a chore name')\n description = forms.CharField(help_text = 'Enter a description')\n deadline = forms.DateField(help_text = 'When is this chore due?', widget=forms.DateInput(attrs={'type': 'date'}))\n\n def clean_deadline(self):\n if self.cleaned_data['deadline'] < datetime.date.today():\n raise ValidationError(_('Invalid date - deadline cannot be in the past'))\n return self.cleaned_data['deadline']\n\nclass CreateChoreForm(forms.Form):\n title = forms.CharField(help_text = 'Enter a chore name')\n description = forms.CharField(help_text = 'Enter a description')\n deadline = forms.DateField(help_text = 'When is this chore due?', widget=forms.DateInput(attrs={'type': 'date'}))\n\n def clean_title(self):\n data = self.cleaned_data['title']\n # Check title is not longer than 200 characters\n if len(data) > 200:\n raise ValidationError(_('Invalid title - cannot be longer than 200 characters'))\n return data\n\n def clean_description(self):\n data = self.cleaned_data['description']\n # Check description is not longer than 500\n if len(data) > 500:\n raise ValidationError(_('Invalid description - cannot be longer than 500 characters'))\n return data\n\n def clean_created_on(self):\n data = self.cleaned_data['created_on']\n # Check date is not in future.\n if data > datetime.date.today():\n raise ValidationError(_('Invalid date - created_on cannot be in the future'))\n return data\n\n def clean_deadline(self):\n data = self.cleaned_data['deadline']\n # Check date is not in past.\n if data < datetime.date.today():\n raise ValidationError(_('Invalid date - deadline cannot be in the past'))\n return data\n\nclass CreateUserForm(forms.Form):\n username = forms.CharField(help_text = 'Enter a username')\n email = forms.EmailField(help_text = 'Enter an email')\n password = forms.CharField(widget=forms.PasswordInput)\n\n class Meta:\n model = User\n\nclass EditUserForm(forms.Form):\n username = forms.CharField(help_text = 'Enter a username')\n email = forms.EmailField(help_text = 'Enter an email')\n password = forms.CharField(widget=forms.PasswordInput)\n\n class Meta:\n model = User\n\nclass CreateHomeForm(forms.Form):\n name = forms.CharField(max_length=100, help_text='Enter your Home Name')\n address = forms.CharField(max_length=100, help_text='Enter your Address')\n leaseStart = forms.DateField(widget=forms.DateInput(attrs={'type': 'date'}))\n leaseEnds = forms.DateField(widget=forms.DateInput(attrs={'type': 'date'}))\n\nclass EditHomeForm(forms.Form):\n name = forms.CharField(max_length=100, help_text='Enter your Home Name')\n address = forms.CharField(max_length=100, help_text='Enter your Address')\n leaseStart = forms.DateField(widget=forms.DateInput(attrs={'type': 'date'}))\n leaseEnds = forms.DateField(widget=forms.DateInput(attrs={'type': 'date'}))\n\nclass CreateTopicForm(forms.Form):\n title = forms.CharField(help_text='Enter a topic name')\n content = MarkdownxFormField(help_text='Enter the content of the topic. You can use markdown (e.g. ###H3 Header)')\n\nclass EditTopicForm(forms.Form):\n title = forms.CharField(help_text='Enter a topic name')\n content = MarkdownxFormField(help_text='Enter the content of the topic. You can use markdown (e.g. ###H3 Header)')\n\nclass CreateEventForm(forms.Form):\n title = forms.CharField(help_text='Enter an event name')\n description = forms.CharField(widget=forms.Textarea, help_text='Enter a description of the event')\n start_time = forms.DateField(help_text='When is this event going to start?', widget=forms.DateInput(attrs={'type': 'date'}))\n end_time = forms.DateField(help_text='When is this event going to end?', widget=forms.DateInput(attrs={'type': 'date'}))\n\nclass EditEventForm(forms.Form):\n title = forms.CharField(help_text='Enter an event name')\n description = forms.CharField(widget=forms.Textarea, help_text='Enter a description of the event')\n start_time = forms.DateField(help_text='When is this event going to start?', widget=forms.DateInput(attrs={'type': 'date'}))\n end_time = forms.DateField(help_text='When is this event going to end?', widget=forms.DateInput(attrs={'type': 'date'}))\n"
},
{
"alpha_fraction": 0.8052147030830383,
"alphanum_fraction": 0.8052147030830383,
"avg_line_length": 25.079999923706055,
"blob_id": "60d0fef4651c31eded0dad2256683fbbb8c08050",
"content_id": "1f5f3d73a2e0bba09b3b0c8d138dff6882a19e20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 652,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 25,
"path": "/cleanslate/doma/admin.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom .models import Profile\nfrom .models import Home\nfrom .models import Forum\nfrom .models import Topic\nfrom .models import Chore\nfrom .models import Event\n#from .models import Review\n#from .models import Reminder\n#from .models import Transaction\n#from .models import Post\n#from .models import Village\n\nadmin.site.register(Profile)\nadmin.site.register(Home)\nadmin.site.register(Forum)\nadmin.site.register(Topic)\nadmin.site.register(Chore)\nadmin.site.register(Event)\n#admin.site.register(Review)\n#admin.site.register(Reminder)\n#admin.site.register(Transaction)\n#admin.site.register(Post)\n#admin.site.register(Village)\n"
},
{
"alpha_fraction": 0.5619469285011292,
"alphanum_fraction": 0.5943952798843384,
"avg_line_length": 25.076923370361328,
"blob_id": "972fdab65cad8a6df0526d4ff0e6dbc675d79d2b",
"content_id": "8ce27fc3bdd3ce54b7e3a62a99197edb0f72c854",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 678,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 26,
"path": "/cleanslate/doma/migrations/0016_auto_20171211_1729.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-11 22:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0015_remove_forum_created_by'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='home',\n name='forum',\n ),\n migrations.AddField(\n model_name='forum',\n name='home',\n field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='doma.Home'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5690909028053284,
"alphanum_fraction": 0.6290909051895142,
"avg_line_length": 25.190475463867188,
"blob_id": "98e9d0504910d11e935e476651804e718cc470a2",
"content_id": "afe05cada39876b96c9dafc769dbc58c78b31124",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 21,
"path": "/cleanslate/doma/migrations/0020_auto_20171212_0128.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-12 06:28\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0019_auto_20171212_0027'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='topic',\n name='forum',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='topics', to='doma.Forum'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6670644283294678,
"alphanum_fraction": 0.6754176616668701,
"avg_line_length": 38.34272384643555,
"blob_id": "f9c8f271333d7e5b0e4dac0cae3adf873822b2cf",
"content_id": "494ffb9713456cd1d0f4eba9a3f1c99b4a48c7ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8380,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 213,
"path": "/cleanslate/doma/models.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom markdownx.models import MarkdownxField\nfrom markdownx.utils import markdownify\nfrom django.utils import timezone\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n role_choices = (\n ('a', 'Admin'),\n ('u', 'User')\n )\n STATUSES = (\n ('online', 'Online'),\n ('offline', 'Offline'),\n ('busy', 'Busy'),\n ('vacation', 'On Vacation')\n )\n phone = models.CharField(max_length=10, help_text='Enter your phone number', null=True, blank=True)\n yog = models.CharField(max_length=100, help_text='Enter your graduation date', null=True, blank=True)\n major = models.CharField(max_length=100, null=True, blank=True)\n role = models.CharField(max_length=1, choices=role_choices, default='u')\n status = models.CharField(max_length=8, help_text='Select a status for others to view', default='online', choices=STATUSES)\n bio = models.TextField(max_length=1000, help_text='Enter a brief description of yourself', blank=True)\n lastSeen = models.DateTimeField(null=True)\n home = models.ForeignKey('Home', on_delete=models.CASCADE, null=True, blank=True)\n avatar = models.ImageField(upload_to='avatars/upload/', default='avatars/default.png', blank=True)\n #smokes = models.BooleanField(default=False, help_text='Do you smoke cigarettes?')\n #bedtime = models.TimeField(null=True, blank=True, help_text='What is your usual sleep-time?')\n #pet_allergies = models.NullBooleanField(null=True, blank=True, help_text='Are you allergic to pets?')\n\n def is_admin(self):\n return self.role in 'a'\n\n def has_home(self):\n return self.home is not None\n\n def get_absolute_url(self):\n return reverse('profile-detail', args=[str(self.id)])\n\n def __str__(self):\n return '%s' % (self.user.username)\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n\nclass Home(models.Model):\n created_by = models.ForeignKey('Profile', null=False, related_name=\"home_created_by\")\n name = models.CharField(max_length=100, help_text='Enter your Home Name')\n address = models.CharField(max_length=100, help_text='Enter your Address', null=True)\n leaseStart = models.DateField(null=True, blank=True)\n leaseEnds = models.DateField(null=True, blank=True)\n #village = models.ForeignKey('Village', on_delete=models.SET_NULL, null=True, blank=True)\n\n def get_absolute_url(self):\n return reverse('home-detail', args=[str(self.id)])\n\n def __str__(self):\n return \"Home: {}\".format(self.name)\n\nclass Topic(models.Model):\n title = models.CharField(max_length=200, help_text=\"Enter a topic name\")\n content = MarkdownxField()\n forum = models.ForeignKey('Forum', on_delete=models.CASCADE, null=False, related_name='topics')\n created_by = models.ForeignKey('Profile', on_delete=models.SET_NULL, null=True)\n created_on = models.DateTimeField()\n\n @property\n def formatted_markdown(self):\n return markdownify(self.content)\n\n def __str__(self):\n return 'Topic: %s' % self.title\n\n def get_absolute_url(self):\n return reverse('topic-detail', args=[str(self.id)])\n\nclass Forum(models.Model):\n title = models.CharField(max_length=200, help_text=\"Enter a forum name\")\n description = models.TextField(max_length=1000, help_text='Enter a description for this forum')\n home = models.OneToOneField('Home', on_delete=models.CASCADE)\n\n def get_absolute_url(self):\n return reverse('forum-detail', args=[str(self.id)])\n\n def __str__(self):\n return self.title\n\n@receiver(post_save, sender=Home)\ndef create_home_forum(sender, instance, created, **kwargs):\n if created:\n Forum.objects.create(home=instance)\n\n@receiver(post_save, sender=Home)\ndef save_home_forum(sender, instance, **kwargs):\n instance.forum.title = instance.name\n instance.forum.save()\n\nclass Chore(models.Model):\n title = models.CharField(max_length=200, help_text='Enter a chore name')\n description = models.CharField(max_length=500, help_text='Enter description')\n created_on = models.DateField()\n deadline = models.DateField(help_text='When is this chore due?')\n #owners\n\n @property\n def almost_due(self):\n if (self.deadline - timezone.now().date()).days < 2:\n return True\n else:\n return False\n\n def __str__(self):\n return 'Chore: %s' % self.title\n\n def get_absolute_url(self):\n return reverse('chore-detail', args=[str(self.id)])\n\nclass Event(models.Model):\n title = models.CharField(max_length=200, help_text='Enter an event name')\n description = models.CharField(max_length=500, help_text='Enter description')\n created_on = models.DateTimeField()\n start_time = models.DateField(help_text='When is this event going to start?')\n end_time = models.DateField(help_text='When is this event going to end?')\n home = models.ForeignKey('Home', on_delete=models.CASCADE, related_name='events')\n\n @property\n def almost_due(self):\n if (self.end_time - self.start_time).days < 2:\n return True\n else:\n return False\n\n def __str__(self):\n return 'Event: %s' % self.title\n\n def get_absolute_url(self):\n return reverse('event-detail', args=[str(self.id)])\n\n# Unfinished / unimplemented models\n\n#class Post(models.Model):\n# title = models.CharField(max_length=200, help_text='Enter a post name')\n# content = models.CharField(max_length=500, help_text='Enter content')\n# topic = models.ForeignKey('Topic', on_delete=models.SET_NULL, null=True)\n# created_by = models.ForeignKey('Profile', on_delete=models.CASCADE, null=False, related_name='op')\n# created_on = models.DateTimeField()\n\n# def __str__(self):\n# return 'Post: %s' % self.title\n\n# def get_absolute_url(self):\n# return reverse('post-detail', args=[str(self.id)])\n\n#class Transaction(models.Model):\n# title = models.CharField(max_length=200, help_text='Enter a transaction name')\n# description = models.CharField(max_length=500, help_text='Enter description')\n# created_on = models.DateField()\n# deadline = models.DateField(help_text='When is this transaction due?')\n# amount = models.IntegerField()\n # debtors\n # creditors\n # change 'amount' issue with whole numbers\n # transaction split\n\n# def __str__(self):\n# return 'Transaction: %s' % self.title\n\n# def get_absolute_url(self):\n# return reverse('transaction-detail', args=[str(self.id)])\n\n#class Reminder(models.Model):\n# title = models.CharField(max_length=200, help_text='Enter a reminder name')\n# description = models.CharField(max_length=500, help_text='Enter description')\n# created_on = models.DateField()\n# deadline = models.DateField(help_text='When is this reminder due?')\n # owners\n\n# def __str__(self):\n# return 'Reminder: %s' % self.title\n\n# def get_absolute_url(self):\n# return reverse('reminder-detail', args=[str(self.id)])\n\n#class Village(models.Model):\n# title = models.CharField(max_length=200, help_text=\"Enter a village name\")\n# forum = models.OneToOneField('Forum', on_delete=models.CASCADE, null=False, default=1)\n\n# def __str__(self):\n# return 'Village: %s' % self.title\n\n# def get_absolute_url(self):\n# return reverse('village-detail', args=[str(self.id)])\n\n#class Review(models.Model):\n# reviewed = models.ForeignKey('Profile', on_delete=models.SET_NULL, null=True, related_name='reviewed_user')\n# reviewedBy = models.ForeignKey('Profile', on_delete=models.SET_NULL, null=True, blank=True, related_name='reviewer') # added blank option for anonyomous reviews. Maybe changed later\n# review = models.TextField(max_length=1000, help_text='Enter your review here', default='')\n\n# def get_absolute_url(self):\n# return reverse('home-detail', args=[str(self.id)])\n\n# def __str__(self):\n# return '%s reviewed %s' % (self.reviewedBy, self.reviewed)\n"
},
{
"alpha_fraction": 0.6614173054695129,
"alphanum_fraction": 0.6614173054695129,
"avg_line_length": 30.75,
"blob_id": "e6b63e533d35a0095c6f872c40e94f5751106b30",
"content_id": "0031c670be2b5581c5e2ee0631857a68603e0b9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 4,
"path": "/README.md",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# Doma\n## Roommate Communication Platform \n\n:busts_in_silhouette: Developers: Ciaran O'Brien, Molly O'Neil, Blake Mongeon, Gianluca Artusa, Cuong Lam, Phillip Michalowski\n"
},
{
"alpha_fraction": 0.7439024448394775,
"alphanum_fraction": 0.7439024448394775,
"avg_line_length": 19.5,
"blob_id": "a0db42f922acc3c6e383ce3117db7df398c37e66",
"content_id": "1cea322896e14bad851626d2bb4730969981b84f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 4,
"path": "/cleanslate/doma/apps.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\nclass DomaConfig(AppConfig):\n name = 'doma'\n"
},
{
"alpha_fraction": 0.545187771320343,
"alphanum_fraction": 0.5745305418968201,
"avg_line_length": 32.411766052246094,
"blob_id": "2fac12aa85a58a9f3d3a5f25483ab07b3eec86fd",
"content_id": "e6a3c0331aa8f0a711106f826f5185fb669d3a36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1704,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 51,
"path": "/cleanslate/doma/migrations/0008_auto_20171101_2002.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-11-02 00:02\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0007_auto_20171101_1723'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='review',\n name='review',\n field=models.TextField(default='', help_text='Enter your review here', max_length=1000),\n ),\n migrations.AlterField(\n model_name='review',\n name='reviewedBy',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviewer', to='doma.User'),\n ),\n migrations.AlterField(\n model_name='topic',\n name='content',\n field=models.CharField(blank=True, max_length=500),\n ),\n migrations.AlterField(\n model_name='user',\n name='bio',\n field=models.TextField(blank=True, help_text='Enter a brief description of yourself', max_length=1000),\n ),\n migrations.AlterField(\n model_name='user',\n name='email',\n field=models.EmailField(blank=True, help_text='Enter your email', max_length=254),\n ),\n migrations.AlterField(\n model_name='user',\n name='lastSeen',\n field=models.DateField(null=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='password',\n field=models.CharField(help_text='Enter your password', max_length=100, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5562201142311096,
"alphanum_fraction": 0.5968899726867676,
"avg_line_length": 32.439998626708984,
"blob_id": "7a481280c59b11cf609301f56c941bb8f296d602",
"content_id": "7bcd4eb04da727820c5cc75a0aec253de84fe56f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 25,
"path": "/cleanslate/doma/migrations/0030_auto_20171212_1951.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-13 00:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0029_auto_20171212_1914'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='avatar',\n field=models.ImageField(blank=True, default='/media/avatars/default.jpg', upload_to='avatars/upload/'),\n ),\n migrations.AlterField(\n model_name='profile',\n name='status',\n field=models.CharField(choices=[('online', 'Online'), ('offline', 'Offline'), ('busy', 'Busy'), ('vacation', 'On Vacation')], default='online', help_text='Select a status for others to view', max_length=8),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6597510576248169,
"alphanum_fraction": 0.6631459593772888,
"avg_line_length": 50.98039245605469,
"blob_id": "555cfded47f0c71a37e5617890e1f823ca74dfd1",
"content_id": "e75d467aa8b7ca95aa43e030526284e6fd4c06a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2651,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 51,
"path": "/cleanslate/cleanslate/urls.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "\"\"\"cleanslate URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.conf.urls import include\nfrom django.views.generic import RedirectView\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom doma.views import home, profile, reminders, calendar, edit_chore, create_chore, delete_chore, edit_user_profile, create_user, edit_user, create_home, create_topic, edit_topic, create_event, forum, edit_home, edit_event\n#reminders, finance,\n\nurlpatterns = [\n url(r'^$', home, name='doma/login/'),\n url(r'^admin/', admin.site.urls),\n url(r'^doma/$', home, name='home'),\n url(r'^doma/profile/$', profile, name='profile'),\n url(r'^doma/reminders/$', reminders, name='reminder'),\n #url(r'^doma/finance/$', finance, name='finance'),\n url(r'^doma/calendar/$', calendar, name='calendar'),\n url(r'^doma/message-board/$', forum, name='message-board'),\n url(r'^doma/profile/(?P<pk>[-\\w]+)/edit/$', edit_user_profile, name='edit-user-profile'),\n url(r'^doma/chore/(?P<pk>[-\\w]+)/edit/$', edit_chore, name = 'edit-chore-deadline'),\n url(r'^doma/chore/create/$', create_chore, name = 'create-chore'),\n url(r'^doma/chore/(?P<pk>[-\\w]+)/delete/$', delete_chore, name = 'delete-chore'),\n url(r'^doma/user/create/$', create_user, name='create-user'),\n url(r'^doma/user/(?P<pk>[-\\w]+)/edit/$', edit_user, name='edit-user'),\n url(r'^doma/home/create/$', create_home, name='create-home'),\n url(r'^doma/home/(?P<pk>[-\\w]+)/edit/$', edit_home, name='edit-home'),\n url(r'^doma/topic/create/$', create_topic, name='create-topic'),\n url(r'^doma/topic/(?P<pk>[-\\w]+)/edit/$', edit_topic, name='edit-topic'),\n url(r'^doma/event/create/$', create_event, name='create-event'),\n url(r'^doma/event/(?P<pk>[-\\w]+)/edit/$', edit_event, name='edit-event'),\n]\n\nurlpatterns += [\n url(r'^doma/', include('django.contrib.auth.urls')),\n url(r'^markdownx/', include('markdownx.urls')),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n"
},
{
"alpha_fraction": 0.55759596824646,
"alphanum_fraction": 0.5751252174377441,
"avg_line_length": 28.950000762939453,
"blob_id": "623c8555036f20bdcc79a7ae8f83f718b9a3573d",
"content_id": "3816f9ba6a9b8bfd3900b7bdb4304a4ae96b8284",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1198,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 40,
"path": "/cleanslate/doma/migrations/0014_auto_20171211_1722.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-11 22:22\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0013_remove_profile_presence'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='home',\n name='village',\n ),\n migrations.AlterField(\n model_name='home',\n name='created_by',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='home_created_by', to='doma.Profile'),\n ),\n migrations.AlterField(\n model_name='home',\n name='forum',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='doma.Forum'),\n ),\n migrations.AlterField(\n model_name='home',\n name='leaseEnds',\n field=models.DateField(blank=True, null=True),\n ),\n migrations.AlterField(\n model_name='home',\n name='leaseStart',\n field=models.DateField(blank=True, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5535483956336975,
"alphanum_fraction": 0.6025806665420532,
"avg_line_length": 27.703702926635742,
"blob_id": "fd101d1b8ab9479d60915fa21aa16dc0e8db35da",
"content_id": "8d862dd0b14dba2fe60d4bab535353babe3aabad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 27,
"path": "/cleanslate/doma/migrations/0007_auto_20171101_1723.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-11-01 21:23\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0006_auto_20171101_1715'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='review',\n name='review',\n field=models.TextField(default='', help_text='Enter your review here', max_length=1000),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='topic',\n name='forum',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='doma.Forum'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6055834889411926,
"alphanum_fraction": 0.6068955659866333,
"avg_line_length": 37.32122802734375,
"blob_id": "197f2e2b4043b25c33ff7456ad5e266a6fc045cf",
"content_id": "7129ae68fed4e443fc3a9da2f7752342dc5e82dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13719,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 358,
"path": "/cleanslate/doma/views.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom .forms import EditProfileForm, EditChoreForm, CreateChoreForm, CreateUserForm, CreateHomeForm, EditUserForm, CreateTopicForm, EditTopicForm, CreateEventForm, EditHomeForm, EditEventForm\nfrom doma.models import User, Profile, Home, Forum, Topic, Chore, Event#,Review, Reminder, Transaction, Village, Post,\nfrom django.forms.models import model_to_dict\nfrom django.contrib import messages\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.db import IntegrityError\nfrom django.utils import timezone\nimport datetime\n\n@login_required\ndef home(request):\n \"\"\"\n View function for home page of site.\n \"\"\"\n from itertools import zip_longest\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n user_groups = list(grouper(Profile.objects.filter(home = request.user.profile.home), 2))\n\n message_board = request.user.profile.home.forum\n topics = Topic.objects.filter(forum = message_board).order_by('-created_on')\n topics = topics[:3]\n return render(\n request,\n 'home.html',\n context = {\n #'num_topics': num_topics,\n 'user_groups': user_groups,\n 'topics': topics}\n )\n\n@login_required\ndef forum(request):\n message_board = request.user.profile.home.forum\n topics = Topic.objects.filter(forum = message_board).order_by('-created_on')\n return render(\n request,\n 'message_board.html',\n context={'topics': topics}\n )\n\n\n@login_required\ndef profile(request):\n \"\"\"\n View function for individual profiles on site.\n \"\"\"\n if request.user.is_authenticated:\n chosen_user = User.objects.get(pk=request.user.id)\n status = chosen_user.profile.status\n lastSeen = chosen_user.profile.lastSeen\n bio = chosen_user.profile.bio\n email = chosen_user.email\n avatar = chosen_user.profile.avatar\n return render(\n request,\n 'profile.html',\n context = {\n 'username' : chosen_user.username,\n 'status' : status,\n 'bio': bio,\n 'email': email,\n 'lastSeen': lastSeen,\n 'avatar': avatar,\n }\n )\n else:\n return render(\n request,\n 'profile.html',\n context = {\n 'username': \"anonymous\",\n 'status': \"Online\",\n 'bio': \"This user has no bio\",\n 'email': \"\"\n }\n )\n return render(\n request,\n 'profile.html',\n context={}\n )\n\n@login_required\ndef calendar(request):\n \"\"\"\n View function for Calendar\n \"\"\"\n events = Event.objects.filter(home = request.user.profile.home)\n return render(\n request,\n 'calendar.html',\n context={'events': events}\n )\n\n@login_required\ndef reminders(request):\n \"\"\"\n View function for reminders (Later- not a separate page)\n \"\"\"\n events = Event.objects.all()\n #transactions = Transaction.objects.all()\n chores = Chore.objects.all()\n return render(\n request,\n 'reminders_list.html',\n context={\n 'events': events,\n #'transactions':transactions,\n 'chores':chores\n }\n )\n\n@login_required\ndef finance(request):\n \"\"\"\n View function for reminders (Later- not a separate page)\n \"\"\"\n finance = Transaction.objects.all()\n return render(\n request,\n 'finance_list.html',\n context={\n 'transactions': finance\n }\n )\n\n@login_required\ndef edit_user_profile(request, pk):\n \"\"\"\n View function for renewing a specific BookInstance by librarian\n \"\"\"\n new_profile=get_object_or_404(Profile, pk = pk)\n if request.method == 'POST':\n form = EditProfileForm(request.POST, request.FILES)\n if form.is_valid():\n new_profile.phone = form.cleaned_data['phone']\n new_profile.yog = form.cleaned_data['yog']\n new_profile.major = form.cleaned_data['major']\n new_profile.status = form.cleaned_data['status']\n new_profile.bio = form.cleaned_data['bio']\n if form.cleaned_data['home']:\n new_profile.home = Home.objects.get(pk = form.cleaned_data['home'])\n if form.clean_avatar():\n new_profile.avatar = form.clean_avatar()\n if new_profile.save():\n messages.success(request, 'You successfully updated your profile settings.')\n return HttpResponseRedirect(reverse(profile))\n else:\n messages.error(request, 'Please correct the errors in the form.')\n else:\n form = EditProfileForm(initial=model_to_dict(new_profile))\n\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef edit_chore(request, pk):\n updated_chore = get_object_or_404(Chore, pk = pk)\n if request.method == 'POST':\n form = EditChoreForm(request.POST)\n if form.is_valid():\n updated_chore.title = form.cleaned_data['title']\n updated_chore.description = form.cleaned_data['description']\n if form.clean_deadline():\n updated_chore.deadline = form.clean_deadline()\n updated_chore.save()\n\n return HttpResponseRedirect(reverse(reminders))\n else:\n form = EditChoreForm(initial=model_to_dict(updated_chore))\n return render(request, 'form.html', {'form': form, 'chore': updated_chore})\n\n@login_required\ndef create_chore(request):\n if request.method == 'POST':\n form = CreateChoreForm(request.POST)\n if form.is_valid():\n chore = Chore.objects.create(\n title = form.cleaned_data['title'],\n description = form.cleaned_data['description'],\n created_on = timezone.now(),\n deadline = form.cleaned_data['deadline']\n )\n if chore.save():\n messages.success(request, 'You successfully created a chore.')\n return HttpResponseRedirect(reverse(reminders))\n else:\n messages.error(request, 'Please correct the errors in the form.')\n else:\n form = CreateChoreForm()\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef delete_chore(request, pk):\n if request.method == 'POST':\n chore = get_object_or_404(Chore, pk = pk)\n chore.delete()\n\n return HttpResponseRedirect(reverse(reminders))\n return render(request, 'chore_delete_form.html', {})\n\ndef create_user(request):\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n try:\n new_user = User.objects.create_user(username = form.cleaned_data['username'], email = form.cleaned_data['email'], password = form.cleaned_data['password'])\n messages.success(request, 'You successfully created a new user. Sign in now.')\n return HttpResponseRedirect(reverse(profile))\n except IntegrityError as e:\n messages.error(request, \"You have not met Django's built in attribute requirements. Try using a stronger password and a longer username.\")\n return render(request, 'form.html', {'form': form})\n else:\n messages.error(request, 'Please correct the errors in the form.')\n return render(request, 'form.html', {'form': form})\n else:\n form = CreateUserForm()\n return render(request, 'form.html', {'form': form})\n\ndef edit_user(request, pk):\n updated_user = User.objects.filter(pk = pk)[0]\n if request.method == 'POST':\n form = EditUserForm(request.POST)\n if form.is_valid():\n updated_user.username = form.cleaned_data['username']\n updated_user.email = form.cleaned_data['email']\n updated_user.set_password(form.cleaned_data['password'])\n if updated_user.save():\n update_session_auth_hash(request, updated_user)\n messages.success(request, 'You successfully updated your account settings.')\n return HttpResponseRedirect(reverse(profile))\n else:\n messages.error(request, 'Please correct the errors in the form.')\n else:\n form = EditUserForm(initial = model_to_dict(updated_user))\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef create_home(request):\n if request.method == 'POST':\n form = CreateHomeForm(request.POST)\n if form.is_valid():\n new_home = Home.objects.create(name = \"\", address = \"\", created_by = request.user.profile)\n new_home.name = form.cleaned_data['name']\n new_home.address = form.cleaned_data['address']\n new_home.leaseStart = form.cleaned_data['leaseStart']\n new_home.leaseEnds = form.cleaned_data['leaseEnds']\n new_home.save()\n request.user.profile.home = new_home\n request.user.profile.save()\n return HttpResponseRedirect(reverse(home))\n else:\n form = CreateHomeForm()\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef edit_home(request, pk):\n updated_home = Home.objects.filter(pk = pk)[0]\n if request.method == 'POST':\n form = EditHomeForm(request.POST)\n if form.is_valid():\n updated_home.name = form.cleaned_data['name']\n updated_home.address = form.cleaned_data['address']\n updated_home.leaseStart = form.cleaned_data['leaseStart']\n updated_home.leaseEnds = form.cleaned_data['leaseEnds']\n if updated_home.save():\n messages.success(request, 'You successfully updated the home.')\n return HttpResponseRedirect(reverse(home))\n else:\n messages.error(request, 'Please correct the errors in the form.')\n else:\n form = EditHomeForm(initial = model_to_dict(updated_home))\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef create_topic(request):\n if request.method == 'POST':\n form = CreateTopicForm(request.POST)\n if form.is_valid():\n new_topic = Topic.objects.create(\n title = form.cleaned_data['title'],\n content = form.cleaned_data['content'],\n forum = request.user.profile.home.forum,\n created_by = request.user.profile,\n created_on = timezone.now()\n )\n if new_topic.save():\n messages.success(request, 'You successfully created a new topic.')\n return HttpResponseRedirect(reverse(home))\n else:\n messages.error(request, 'Please correct the errors in the form.')\n else:\n form = CreateTopicForm()\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef edit_topic(request, pk):\n updated_topic = Topic.objects.filter(pk = pk)[0]\n if request.method == 'POST':\n form = EditTopicForm(request.POST)\n if form.is_valid():\n updated_topic.title = form.cleaned_data['title']\n updated_topic.content = form.cleaned_data['content']\n if updated_topic.save():\n messages.success(request, 'You successfully updated the topic.')\n return HttpResponseRedirect(reverse(home))\n else:\n messages.error(request, 'Please correct the errors in the form.')\n else:\n form = EditTopicForm(initial = model_to_dict(updated_topic))\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef create_event(request):\n if request.method == 'POST':\n form = CreateEventForm(request.POST)\n if form.is_valid():\n new_event = Event.objects.create(\n title = form.cleaned_data['title'],\n description = form.cleaned_data['description'],\n start_time = form.cleaned_data['start_time'],\n end_time = form.cleaned_data['end_time'],\n home = request.user.profile.home,\n created_on = timezone.now()\n )\n if new_event.save():\n messages.success(request, 'You successfully created a new event.')\n return HttpResponseRedirect(reverse(calendar))\n else:\n messages.error(request, 'Please correct the errors in the form')\n else:\n form = CreateEventForm()\n return render(request, 'form.html', {'form': form})\n\n@login_required\ndef edit_event(request, pk):\n updated_event = Event.objects.filter(pk = pk)[0]\n if request.method == 'POST':\n form = EditEventForm(request.POST)\n if form.is_valid():\n updated_event.title = form.cleaned_data['title']\n updated_event.description = form.cleaned_data['description']\n updated_event.start_time = form.cleaned_data['start_time']\n updated_event.end_time = form.cleaned_data['end_time']\n if updated_event.save():\n messages.success(request, 'You successfully updated the event.')\n return HttpResponseRedirect(reverse(reminders))\n else:\n messages.error(request, 'Please correct the errors in the form.')\n else:\n form = EditEventForm(initial = model_to_dict(updated_event))\n return render(request, 'form.html', {'form': form})\n"
},
{
"alpha_fraction": 0.5347222089767456,
"alphanum_fraction": 0.581944465637207,
"avg_line_length": 29,
"blob_id": "6e5b9db4a8214327ec137282e62ab7498d8df400",
"content_id": "0e1ee2ce88749af192c91a5527735619e31aa71d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 24,
"path": "/cleanslate/doma/migrations/0019_auto_20171212_0027.py",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-12 05:27\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doma', '0018_auto_20171211_2241'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='profile',\n name='email',\n ),\n migrations.AlterField(\n model_name='profile',\n name='status',\n field=models.CharField(choices=[('online', 'Online'), ('offline', 'Offline'), ('busy', 'Busy'), ('vacation', 'On Vacation')], default='online', help_text='Select a status for others to view', max_length=8),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.7599999904632568,
"avg_line_length": 24,
"blob_id": "dce9570b8d27411bdc0513f8f10d948811a45778",
"content_id": "8ab060ef91ddd6ab2dc85c62940bb46e4790148b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 25,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 1,
"path": "/cleanslate/requirements.txt",
"repo_name": "onocy/doma",
"src_encoding": "UTF-8",
"text": "django-markdownx==2.0.21\n"
}
] | 23 |
felix-martel/graph-convolutions
|
https://github.com/felix-martel/graph-convolutions
|
6cffb1a21e13708f46074e103a71ec5b9cddec54
|
8e4e7d25fd098fe7d4267aeb39af22fdcca5c6cc
|
dbe8e04d681b24b1bbc62a79bc96c25850eceaa6
|
refs/heads/main
| 2022-12-22T17:36:15.934860 | 2020-10-05T10:59:20 | 2020-10-05T10:59:20 | 301,376,557 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5642368793487549,
"alphanum_fraction": 0.5683371424674988,
"avg_line_length": 38.55855941772461,
"blob_id": "70b4236b9f0ab3a5f37ffc63f14a3445e085503c",
"content_id": "29ab5ad504e5a4475c26ad312d6483181fde256a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4390,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 111,
"path": "/rgcn.py",
"repo_name": "felix-martel/graph-convolutions",
"src_encoding": "UTF-8",
"text": "import torch.nn as nn\nimport torch\n\n\nclass RGCNLayer(nn.Module):\n \"\"\"\n A graph convolution layer.\n\n Based on \"Modeling Relational Data with Graph Convolutional Networks\", Schlichtkrull et al 2017. For Wr,\n we use basis functions decomposition instead of block diagonal decomposition, no dropout for now, and\n featureless inputs (that is, entities are represented by one-hot vectors and have no extra features attached).\n \"\"\"\n\n def __init__(self, T, B, dim_in, dim_out, init=\"random\"):\n \"\"\"\n T: adjacency tensor (n_relations * n_entities * n_entities)\n B: number of basis functions\n dim_in: dimension of input feature vectors\n dim_out: dimension of output feature vectors\n \"\"\"\n super().__init__()\n Nr, (Ne, _) = len(T), T[0].shape\n self.V = self.init(B, dim_in, dim_out, how=init)\n self.A = self.init(Nr, B, how=init)\n self.T = T\n\n def init(self, *size, how=\"random\", fill_value=1., requires_grad=True):\n if how == \"random\":\n data = torch.rand(*size)\n elif how == \"constant\":\n data = torch.full(size, fill_value)\n else:\n raise ValueError(f\"Unsupported initialization method '{how}'\")\n return nn.Parameter(data=data, requires_grad=requires_grad)\n\n def forward(self, H):\n # Input: N * d_in\n W = torch.einsum(\"rb,bio->rio\", self.A, self.V) # -> \"R * d_in * d_out\"\n W.requires_grad_()\n if isinstance(H, torch.sparse.Tensor):\n HxW = []\n for w in W:\n h = torch.sparse.mm(H, w)\n h.requires_grad_()\n HxW.append(h)\n # H = [torch.sparse.mm(H, w) for w in W] #\n else:\n HxW = torch.matmul(H, W)\n H = []\n for a, hw in zip(self.T, HxW):\n h = torch.sparse.mm(a, hw)\n h.requires_grad_()\n H.append(h)\n # H = torch.stack([torch.sparse.mm(a, hw) for a, hw in zip(self.T, H)])\n H = torch.stack(H).sum(axis=0)\n return H\n\nclass RGCN(nn.Module):\n \"\"\"\n A relational-graph convolutional network implementation.\n\n This is only the encoder part, the decoder is not implemented yet. It can be used\n in a (semi-)supervised setting for entity classification. Based on \"Modeling\n Relational Data with Graph Convolutional Networks\", Schlichtkrull et al 2017.\n \"\"\"\n def __init__(self, T, n_classes, hidden_sizes=None, n_basis=10):\n \"\"\"\n\n :param T: adjacency tensor of the corresponding knowledge graph, as outputed\n by `utils.graph.build_adjacency_tensor`\n :param n_classes: number of classes (for the entity classification task)\n :param hidden_sizes: (optional) a list of hidden sizes for each convolution layer\n :param n_basis: the number of basis functions (named `B` in Schlichtkrull's paper). Can\n be an int, in which case it is used for each layer, or a list of ints (one per layer).\n \"\"\"\n super().__init__()\n Nr, (Ne, _) = len(T), T[0].shape\n self.n_relations = Nr\n self.n_entities = Ne\n self.n_classes = n_classes\n self.input_size = self.n_entities\n self.output_size = self.n_classes\n self.T = T\n\n self.convolutions = self.build_convolutions(hidden_sizes, n_basis)\n self.softmax = nn.Softmax(0)\n\n def build_convolutions(self, hidden_sizes, n_basis, init=\"random\"):\n if hidden_sizes is None:\n hidden_sizes = [32]\n hidden_sizes = [self.input_size, *hidden_sizes]\n if not isinstance(n_basis, int):\n assert len(n_basis) == len(hidden_sizes), \"You must provide a \\\n number of basis functions (`n_basis`) for each layer\"\n else:\n n_basis = [n_basis] * len(hidden_sizes)\n hidden_sizes.append(self.output_size)\n layers = []\n for input_size, hidden_size, B in zip(hidden_sizes,\n hidden_sizes[1:],\n n_basis\n ):\n conv = RGCNLayer(self.T, B, input_size, hidden_size, init=init)\n layers.append(conv)\n return nn.ModuleList(layers)\n\n def forward(self, x):\n for conv in self.convolutions:\n x = conv(x)\n x = self.softmax(x)\n return x"
},
{
"alpha_fraction": 0.6585530638694763,
"alphanum_fraction": 0.6768086552619934,
"avg_line_length": 21.707693099975586,
"blob_id": "078b289f50b7cef52585c4d3b9d9603d800b1392",
"content_id": "d9702642d098360652b2642b34a0b68039792e75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1479,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 65,
"path": "/train.py",
"repo_name": "felix-martel/graph-convolutions",
"src_encoding": "UTF-8",
"text": "import torch\n\nimport fb15k\nfrom rgcn import RGCN\nfrom utils import graph, tensor\n\nLR = 0.001\nEPOCHS = 5\nN_CLASSES = 10\nN_BASIS_FUNCTIONS = 20\n\n#\n# Loading data\n#\n\ntrain, val, test = fb15k.load(\"train\", \"valid\", \"test\")\nprint(f\"{len(train)} training triples found.\")\n\nT, e2c, e2i, r2i = graph.build_adjacency_tensor(train)\nn_relations = len(T)\nn_entities = T[0].shape[0]\nprint(f\"{n_entities} entities and {n_relations} relations found.\")\n\n# Supervised setting: each entity has a class. Here we build the ground truth, that is the expected output tensor\n# Give a unique identifier to each class\nclasses = {c: i for i, c in enumerate(set(e2c))}\nn_classes = len(classes)\ny_true = [classes[c] for c in e2c]\ny_true = torch.LongTensor(y_true)\nprint(f\"{n_classes} distinct classes found.\")\n\n\n#\n# Model definition\n#\n\nrgcn = RGCN(T,\n n_classes=n_classes,\n hidden_sizes=[64, 32, 16],\n n_basis=N_BASIS_FUNCTIONS\n )\n\nprint(rgcn)\n\n#\n# Training params\n#\n\noptim = torch.optim.Adam(rgcn.parameters(recurse=True), lr=LR)\ncross_entropy = torch.nn.CrossEntropyLoss()\n# We're in the featureless setting, so each entity is one-hot encoded, hence\n# the input data is simply the identity matrix of dim N_entities x N_entities\nI = tensor.sparse_eye(n_entities)\n\n#\n# Training\n#\n\nfor i in range(EPOCHS):\n print(f\"Step {i+1}/{EPOCHS}\")\n optim.zero_grad()\n y_pred = rgcn(I)\n loss = cross_entropy(y_pred, y_true)\n loss.backward()\n optim.step()\n\n\n\n"
},
{
"alpha_fraction": 0.6197289228439331,
"alphanum_fraction": 0.6460843086242676,
"avg_line_length": 29.204545974731445,
"blob_id": "a72b88e19595a2d6a2b8505279b66dcc0e753c24",
"content_id": "5de9d7cb42b2a98fb068985f436eb2144eb0f724",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1328,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 44,
"path": "/utils/tensor.py",
"repo_name": "felix-martel/graph-convolutions",
"src_encoding": "UTF-8",
"text": "import torch\nfrom scipy import sparse\nimport numpy as np\n\n\ndef sparse_eye(dim: int) -> torch.sparse.FloatTensor:\n \"\"\"\n Make a sparse identity matrix of dimension `dim`.\n \"\"\"\n return torch.sparse.FloatTensor(\n torch.arange(dim).repeat(2, 1),\n torch.ones(dim),\n torch.Size([dim, dim])\n )\n\ndef normalize(a):\n \"\"\"\n Normalize an adjacency matrix, as described in Kipf et al. 2017\n :param a: a ndarray representing an adjacency matrix\n :return: a normalized ndarray, with the same shape as `a`\n \"\"\"\n d = np.array(a.sum(1)).squeeze()\n d = np.divide(1, d, where=d!=0)\n d = sparse.diags(d, format=\"csr\")\n return d * a\n\ndef csr_to_torch(csr: sparse.csr_matrix) -> torch.sparse.LongTensor:\n \"\"\"\n Transform a `scipy.sparse.csr_matrix` to a sparse Torch tensor.\n\n Based on https://stackoverflow.com/questions/50665141/converting-a-scipy-coo-matrix-to-pytorch-sparse-tensor/50665264#50665264\n \"\"\"\n coo = csr.tocoo()\n t = torch.sparse.LongTensor(\n torch.LongTensor(np.vstack((coo.row, coo.col))),\n torch.FloatTensor(coo.data),\n torch.Size(coo.shape)\n )\n return t\n\ndef make_onehot(n_samples, n_classes):\n y = torch.zeros(n_samples, n_classes)\n y[torch.arange(n_samples),torch.randint(0, n_classes, (n_samples,))] = 1.\n return y"
},
{
"alpha_fraction": 0.7384615540504456,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 65,
"blob_id": "a5626f758c7db1290743e7919f4eead7108a892f",
"content_id": "7b7759881b5e81c3152bdc0aae530fe9ae1ca7ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 1,
"path": "/fb15k/__init__.py",
"repo_name": "felix-martel/graph-convolutions",
"src_encoding": "UTF-8",
"text": "from .download import FB15K_DIRNAME, FB15K_URL, load, get_classes"
},
{
"alpha_fraction": 0.639950692653656,
"alphanum_fraction": 0.6461158990859985,
"avg_line_length": 41.71052551269531,
"blob_id": "85b465d995bcb332b7b1654ea76739c1c025252e",
"content_id": "a3b7eefe226aca76ab700887a6d5fa86f93623ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1622,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 38,
"path": "/utils/graph.py",
"repo_name": "felix-martel/graph-convolutions",
"src_encoding": "UTF-8",
"text": "from collections import defaultdict\nfrom .tensor import normalize, csr_to_torch\nfrom fb15k import get_classes\nfrom scipy import sparse\n\ndef build_adjacency_tensor(triples):\n \"\"\"\n Build an adjacency tensor from a list of triples\n\n `triples` is a list of string tuples `(head, relation, tail)`. Let R be the number of distinct relations,\n and E the number of entities (heads and tails). Then, the adjacency tensor is a list [T1, T2, ..., TR], with\n each Ti a E x E normalized tensor:\n Ti = Di^-1 * Ai\n With Aijk = 1 if (e_k, r_i, e_j) is in `triples`, and 0 otherwise\n And Di a diagonal matrix containing the in-degree of each entity\n\n The function also return the class of each entity, and mappings from entities to theirs ids and from\n relations to their ids.\n \"\"\"\n hs, rs, ts = zip(*triples)\n entities = {e: i for i, e in enumerate(set(hs) | set(ts))}\n rev_entities = {i: e for e, i in entities.items()}\n relations = {r: i for i, r in enumerate(set(rs))}\n nr, ne = len(relations), len(entities)\n sorted_triples = defaultdict(list)\n for h, r, t in triples:\n sorted_triples[relations[r]].append((entities[t], entities[h]))\n A = []\n for r, coords in sorted_triples.items():\n row_inds, col_inds = zip(*coords)\n data = [1] * len(coords)\n a = sparse.csr_matrix((data, (row_inds, col_inds)), shape=(ne, ne))\n a = normalize(a)\n a = csr_to_torch(a)\n A.append(a)\n e2c = get_classes(triples)\n classes = [e2c.get(rev_entities[i], \"unknown\") for i in range(len(rev_entities))]\n return A, classes, entities, relations"
},
{
"alpha_fraction": 0.6285249590873718,
"alphanum_fraction": 0.6762472987174988,
"avg_line_length": 31.89285659790039,
"blob_id": "6fa0d28784f34cdd2b783af0b5b08a171493b840",
"content_id": "ff7c185c6e054adb2745992f9b899ddac9fdcd4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1844,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 56,
"path": "/fb15k/download.py",
"repo_name": "felix-martel/graph-convolutions",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDownloads the `FB15K-237` dataset. Alternatively, you can use the following commands:\n```\nwget https://download.microsoft.com/download/8/7/0/8700516A-AB3D-4850-B4BB-805C515AECE1/FB15K-237.2.zip\nunzip FB15K-237.2.zip\n```\n\"\"\"\nimport io\nimport os\nfrom collections import defaultdict, Counter\n\nimport requests\n\nfrom zipfile import ZipFile\n\nFB15K_URL = \"https://download.microsoft.com/download/8/7/0/8700516A-AB3D-4850-B4BB-805C515AECE1/FB15K-237.2.zip\"\nDIRNAME = \"./fb15k\"\nFB15K_DIRNAME = os.path.join(DIRNAME, \"Release\")\nSPLITS = {\"train\", \"test\", \"valid\"}\n\ndef download(url=FB15K_URL, to=DIRNAME):\n r = requests.get(url)\n r.raise_for_status()\n\n raw = io.BytesIO(r.content)\n with ZipFile(raw) as zfile:\n zfile.extractall(to)\n\ndef get_classes(triples):\n \"\"\"\n Heuristic for guessing entity classes in the FB15K dataset\n\n :param triples: List[str, str, str] a list of triples\n :return: Dict[str, str] a mapping from entities to classes\n \"\"\"\n rph = defaultdict(Counter)\n for h, r, t in triples:\n rph[h][r.split(\"/\")[1]] += 1\n classes = {h: rel.most_common(1)[0][0] if rel else \"unknown\" for h, rel in rph.items()}\n return classes\n\n\ndef load_one(split, dirname=DIRNAME):\n if split not in SPLITS:\n raise KeyError(f\"Invalid split '{split}'. Valid splits are {', '.join(SPLITS)}.\")\n with open(os.path.join(dirname, split + \".txt\")) as f:\n triples = [line.split() for line in f]\n return triples\n\ndef load(*splits, dirname=FB15K_DIRNAME, download_if_absent=True):\n if download_if_absent and not (os.path.exists(dirname) and os.path.exists(os.path.join(dirname, \"train.txt\"))):\n download(to=os.path.join(dirname, os.path.pardir))\n datasets = [load_one(split, dirname) for split in splits]\n if len(datasets) == 1:\n return datasets[0]\n return datasets\n\n\n"
},
{
"alpha_fraction": 0.640350878238678,
"alphanum_fraction": 0.7355889678001404,
"avg_line_length": 29.653846740722656,
"blob_id": "c5196aa6c22aec70ac3d2bb360d4731e5f7b0ac4",
"content_id": "455621ddf83e8f2446b8661c65e00bd37e3924d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 798,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 26,
"path": "/README.md",
"repo_name": "felix-martel/graph-convolutions",
"src_encoding": "UTF-8",
"text": "# graph-convolutions\nRelational Graph Convolutional Networks (R-GCNs) with PyTorch\n\n---\n\nHere are some experimentations on R-GCNs, based on the paper \n*Modeling Relational Data with Graph Convolutional Networks* from [[Schlichtkrull *et al.*]](https://arxiv.org/pdf/1703.06103.pdf).\n\n### Requirements\n\nYou'll need PyTorch, numpy, scipy.\n\n\n### Data\n\nWe use the [FB15K-237](https://www.microsoft.com/en-us/download/details.aspx?id=52312) dataset, from [[Toutanova et al. EMNLP 2015]](http://dx.doi.org/10.18653/v1/D15-1174).\nIt can be downloaded with:\n```\nwget https://download.microsoft.com/download/8/7/0/8700516A-AB3D-4850-B4BB-805C515AECE1/FB15K-237.2.zip\nunzip FB15K-237.2.zip\n```\nOr you can use directly:\n```\nimport fb15k\ntrain, test = fb15k.load(\"train\", \"test\", download_if_absent=True)\n```\n\n"
}
] | 7 |
CSUHuman-UAV-UGV-Collaboration/bebop_control
|
https://github.com/CSUHuman-UAV-UGV-Collaboration/bebop_control
|
9e404ba6c9f37078cd774d0626bd1fbb75c14d20
|
90b70e5ea7a993c6baa7eafb1d3635c9efda0f1f
|
51618740caed5b7458e1b385b93cef53d26b8b40
|
refs/heads/master
| 2021-05-03T13:51:59.218561 | 2018-04-12T19:04:29 | 2018-04-12T19:04:29 | 120,516,402 | 3 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7216981053352356,
"alphanum_fraction": 0.7339622378349304,
"avg_line_length": 30.176469802856445,
"blob_id": "65f45de890b5aec3b12a841f6e22d65dff310762",
"content_id": "11826a90e52b0ebae7f45dd658ab285d3ebe2823",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1060,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 34,
"path": "/README.md",
"repo_name": "CSUHuman-UAV-UGV-Collaboration/bebop_control",
"src_encoding": "UTF-8",
"text": "# bebop_control\n\n## TODO:\n\n* NAVIGATING THE DRONE\n\n* LANDING THE DRONE\n* ~~position the camera facing fully down (camera_control topic angular.y)~~\n* ~~get the pose of the marker using ar_track_alvar~~\n* ~~PART 1: align the drone orientation with the marker (angular velocity, rotation)~~\n* ~~convert quaternion (x,y,z,w) to euler (roll, pitch, yaw) if needed~~\n* ~~http://wiki.ros.org/tf2/Tutorials/Quaternions~~\n* ~~PART 2: align the drone position with the marker (linear velocity, translation)~~\n* ~~lower drone and land on the landing pad~~\n* improve landing performance\n\t* ~~implement 'smooth' translation~~\n\t* implement variable lock zone size\n\t* flat trim before flight\n\t* clean bottom camera\n\t* add more visual features on the landing pad\n\norb_slam_2_ros notes\n\nhttps://answers.ros.org/question/282343/working-catkinized-orb-slam-2-or-other-monocular-slam/\n\nAlso use Eigen 3.2 instead\n\nAlso needed glog_catkin https://github.com/ethz-asl/glog_catkin\n\n--> which needs \n\n--> sudo apt-get install autoconf\n\n--> https://github.com/uzh-rpg/pangolin_catkin\n"
},
{
"alpha_fraction": 0.5804776549339294,
"alphanum_fraction": 0.585669755935669,
"avg_line_length": 31.361345291137695,
"blob_id": "d36f81384e54b272c5f6987b427ae35114499031",
"content_id": "b4fd98b68b05ec88290fc87d386a698470404b38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3852,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 119,
"path": "/scripts/interface_tcp_client_turt.py",
"repo_name": "CSUHuman-UAV-UGV-Collaboration/bebop_control",
"src_encoding": "UTF-8",
"text": "import threading, socket, sys, rospy\nfrom std_msgs.msg import Empty, String\nfrom botsapp.msg import DroneStates, TurtleStates, ResourceString\n\nclass Client(object):\n def __init__(self, host, port):\n\n # initialize vars\n self.sending = False\n self.resource_string = ResourceString()\n\n # initialize ros\n rospy.on_shutdown(self.shutdown)\n rospy.loginfo(\"Interface Client Running\")\n\n # server socket connect\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n try:\n self.sock.connect((host,port))\n except:\n print 'Unable to connect'\n sys.exit()\n\n # listen daemon (thread)\n listen_thread = threading.Thread(target=self.listen)\n listen_thread.daemon = True\n listen_thread.start()\n\n # subscribers\n rospy.Subscriber('turtle_states', TurtleStates, self.states_callback)\n rospy.Subscriber('turtle_response', String, self.response_callback)\n\n # publishers\n self.pub_drone_states = rospy.Publisher('drone_states', DroneStates, queue_size=1)\n self.pub_drone_response = rospy.Publisher('drone_response', String, queue_size=1)\n self.pub_drone_request = rospy.Publisher('drone_request', String, queue_size=1)\n self.pub_turtle_request = rospy.Publisher('turtle_request', String, queue_size=1)\n\n\n def shutdown(self):\n # do cleanup here if necessary\n rospy.loginfo(\"Interface Client Shutting down\")\n self.disconnect = True\n\n\n def states_callback(self, data):\n # get drone state and prepare to send\n message = 'turtle_states ' + str(data.BotState)\n self.sending = True\n self.send(message)\n rospy.loginfo(\"Sent turtle state\")\n\n\n def response_callback(self, data):\n # get response after a task is done from the turtlebot and send\n message = 'turtle_response ' + str(data.data)\n self.sending = True\n self.send(message)\n rospy.loginfo(\"Sent turtle response\")\n\n\n def parse_publish(self, message):\n parts = message.strip().split()\n # maybe use resource strings here\n # parse which message type it is and publish\n # add more message types here\n if parts[0] == 'drone_states':\n msg = DroneStates()\n msg.DroneState = int(parts[1])\n self.pub_drone_states.publish(msg)\n elif parts[0] == 'drone_response':\n msg = String()\n msg.data = parts[1]\n self.pub_drone_response.publish(msg)\n elif parts[0] == 'drone_request':\n msg = String()\n msg.data = parts[1]\n self.pub_drone_request.publish(msg)\n elif parts[0] == 'turtle_request':\n msg = String()\n msg.data = message.replace(parts[0] + ' ','')\n print \"DEBUG: \", msg.data\n self.pub_turtle_request.publish(msg)\n else:\n rospy.loginfo(\"Invalid message recieved. Ignoring.\")\n\n\n def send(self, message):\n if self.sending == True:\n self.sending = False\n msg = message\n self.sock.send(msg)\n\n\n def listen(self):\n while True:\n data = self.sock.recv(1024)\n if not data:\n print \"Connection lost\"\n sys.exit(0)\n\n print \"Recieved \",data\n self.parse_publish(data)\n \n\nif __name__ == \"__main__\":\n try:\n rospy.init_node('interface_turtle_client')\n if(len(sys.argv) < 3):\n print 'Usage python interface_tcp_client.py hostname port'\n sys.exit()\n\n host = sys.argv[1]\n port = int(sys.argv[2])\n\n Client(host,port)\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.logerr(\"Interface Client ROS node terminated.\")\n\n"
},
{
"alpha_fraction": 0.5640149116516113,
"alphanum_fraction": 0.5671224594116211,
"avg_line_length": 28.731481552124023,
"blob_id": "3ca6139939a7099b5baade396ecc725e18d3b585",
"content_id": "8c872cfae41f49cf8d61faab3650bebd827d5d29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3218,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 108,
"path": "/scripts/interface_tcp_client.py",
"repo_name": "CSUHuman-UAV-UGV-Collaboration/bebop_control",
"src_encoding": "UTF-8",
"text": "# telnet based\n# TODO: instead of using select to monitor the socket and\n# stdin streams we need to use ros to monitor msgs,\n# maybe need multithreading\n\nimport socket, select, string, sys\nimport rospy\nimport threading\nfrom std_msgs.msg import Empty\n\nclass InterfaceClientDrone():\n def __init__(self, host, port):\n # initialize vars and ros\n self.host = host\n self.port = port\n self.message = '' # drone state to send through interface\n self.disconnect = False\n\n rospy.on_shutdown(self.shutdown)\n rospy.loginfo(\"Interface Client running...\")\n\n # connect to server\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.settimeout(2)\n\n # connect to remote host\n try:\n self.s.connect((host, port))\n except:\n print 'Unable to connect'\n sys.exit()\n\n print 'Connected to remote host. Start sending messages'\n self.prompt()\n\n # start the socket handler thread\n socket_thread = threading.Thread(target=self.socket_handler)\n socket_thread.start()\n\n # subscribers, TODO: change from empty msg to drone states\n rospy.Subscriber('drone_states', Empty, self.states_callback)\n\n # publishers, TODO: change from empty msg to turtle states\n self.pub_turtle_states = rospy.Publisher('turtle_states', Empty, queue_size=1, latch=True)\n\n\n def shutdown(self):\n # do cleanup here if necessary\n rospy.loginfo(\"Interface Client Shutting down...\")\n self.disconnect = True\n\n\n def states_callback(self, data):\n # get drone state and prepare to send\n # self.message = str(data.drone_state)\n self.message = 'test'\n # get the readable sockets\n self.s.send(self.message)\n self.prompt()\n\n\n def prompt(self):\n sys.stdout.write('<My State> ')\n sys.stdout.flush()\n\n\n def socket_handler(self):\n host = self.host\n port = self.port\n\n while True:\n socket_list = [self.s]\n\n read_sockets, write_sockets, error_sockets = select.select(socket_list, [], [])\n\n for sock in read_sockets:\n # incoming messages from remote server\n if sock == self.s:\n data = sock.recv(4096)\n if not data:\n print \"\\n disconnected from chat server\"\n sys.exit()\n else:\n sys.stdout.write(data)\n self.prompt()\n # publish here\n data = Empty()\n self.pub_turtle_states.publish(data) \n\n if self.disconnect == True:\n sys.exit(0)\n\n\n\nif __name__ == \"__main__\":\n rospy.init_node('interface_drone_client')\n try:\n if(len(sys.argv) < 3):\n print 'Usage python interface_tcp_client.py hostname port'\n sys.exit()\n\n host = sys.argv[1]\n port = int(sys.argv[2])\n\n InterfaceClientDrone(host, port)\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.logerr(\"Interface Client ROS node terminated.\")\n\n \n\n"
},
{
"alpha_fraction": 0.4947156608104706,
"alphanum_fraction": 0.5033551454544067,
"avg_line_length": 34.91265106201172,
"blob_id": "1aaf9a192b32e811c2286e014ac88c3607e821f2",
"content_id": "8cf6898791df6521f7ae3d2688d25ea5c1058a0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11922,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 332,
"path": "/scripts/control.py",
"repo_name": "CSUHuman-UAV-UGV-Collaboration/bebop_control",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import Twist\n#from sensor_msgs.msg import Image\nfrom ar_track_alvar_msgs.msg import AlvarMarkers\nimport numpy as np\nfrom std_msgs.msg import Empty, String\nfrom botsapp.msg import DroneStates, TurtleStates, ResourceString\nfrom tf.msg import tfMessage\nfrom tf.transformations import quaternion_from_euler\nfrom tf.transformations import euler_from_quaternion\n\nclass BebopControl():\n def __init__(self):\n # initialize class vars, flags and ros\n self.speed = 0.1\n self.min_speed = 0.01\n self.vert_speed = 0.20\n self.land_initiated = False\n self.angle_zone = 0.10\n self.position_zone = 0.05\n self.land_zone_base = 0.10\n self.can_rotate = False\n self.can_descend = False\n self.angle_locked = False\n self.x_locked = False\n self.y_locked = False\n self.land_height = 0.5\n self.hover = False\n\n self.resource_string = ResourceString()\n self.drone_states = DroneStates()\n\n # initialize drone state in docked state (0)\n self.drone_states.DroneState = self.drone_states.DOCKED\n\n rospy.on_shutdown(self.shutdown)\n rospy.loginfo(\"Drone Hub Running\")\n\n #subscribers\n #rospy.Subscriber('bebop/initiate_landing', Empty, self.land)\n rospy.Subscriber(self.resource_string.TOPIC_DRONEREQUEST, String, self.request_callback)\n # !!! Change the callback function in marker subscriber to change landing behavior\n # !!! > self.marker_callback_simul or > self.marker_callback\n rospy.Subscriber('/ar_pose_marker', AlvarMarkers, self.marker_callback_simul)\n \n #publishers\n self.pub_takeoff = rospy.Publisher('bebop/takeoff', Empty, queue_size=1, latch=True)\n self.pub_land = rospy.Publisher('bebop/land', Empty, queue_size=1, latch=True)\n self.pub_cmd_vel = rospy.Publisher('bebop/cmd_vel', Twist, queue_size=1)\n self.pub_camera_control = rospy.Publisher('bebop/camera_control', Twist, queue_size=1)\n self.pub_response = rospy.Publisher(self.resource_string.TOPIC_DRONERESPONSE, String, queue_size=1)\n self.pub_drone_states = rospy.Publisher(self.resource_string.TOPIC_DRONESTATE, DroneStates, queue_size=1)\n\n # uncomment to enable takeoff on start here\n #takeoff = Empty()\n #self.pub_takeoff.publish(takeoff)\n\n\n def shutdown(self):\n rospy.loginfo(\"Shutting down drone_hub node.\")\n land = Empty()\n self.pub_land.publish(land)\n rospy.sleep()\n\n\n # callback to handle commands/requests\n def request_callback(self, msg):\n if msg.data == 'takeoff':\n # do takeoff\n if self.drone_states.DroneState == self.drone_states.DOCKED:\n self.takeoff()\n elif msg.data == 'land':\n # do land\n if self.drone_states.DroneState == self.drone_states.FLYING:\n self.land()\n elif msg.data == 'search':\n if self.drone_states.DroneState == self.drone_states.FLYING:\n self.search()\n else:\n # invalid\n rospy.loginfo(\"Unknown request. Ignoring.\")\n\n \n # takeoff\n def takeoff(self):\n takeoff = Empty()\n self.pub_takeoff.publish(takeoff)\n rospy.sleep(2)\n\n # update drone state to flying\n self.drone_states.DroneState = self.drone_states.FLYING\n self.pub_drone_states.publish(self.drone_states)\n rospy.sleep(1)\n\n # response\n response = String()\n response.data = \"1\"\n self.pub_response.publish(response)\n\n\n # landing intiation\n def land(self):\n if self.land_initiated == False:\n self.land_initiated = True\n rospy.loginfo(\"Initiating Landing Sequence.\")\n cam_msg = Twist()\n cam_msg.angular.y = -90\n self.pub_camera_control.publish(cam_msg)\n rospy.sleep(4)\n\n # update drone state to landing\n self.drone_states.DroneState = self.drone_states.LANDING\n self.pub_drone_states.publish(self.drone_states)\n rospy.loginfo(\"> Camera facing down.\")\n\n\n def search(self):\n # testing only. Simply moves forward turns around\n vel_msg = Twist()\n vel_msg.linear.x = self.speed\n \n # move forward for 1 seconds\n seconds = 0.0\n while(seconds < 1):\n self.pub_cmd_vel.publish(vel_msg)\n rospy.sleep(0.1)\n seconds += 0.1\n\n # turn around\n\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0.1\n\n seconds = 0.0\n while(seconds < 1):\n self.pub_cmd_vel.publish(vel_msg)\n rospy.sleep(0.1)\n seconds += 0.1\n\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0\n\n self.pub_cmd_vel.publish(vel_msg)\n\n # response\n rospy.sleep(1)\n response = String()\n response.data = \"1\"\n self.pub_response.publish(response)\n\n\n # utility function that checks if velocity message is 0\n def non_zero_vel(self, vel_msg):\n if (vel_msg.linear.x == 0 and vel_msg.linear.y == 0 and vel_msg.linear.z == 0 and vel_msg.angular.z == 0):\n return False\n\n return True\n\n \n # call back for marker pose (old)\n def marker_callback(self, data):\n \n if self.land_initiated:\n for marker in data.markers:\n vel_msg = Twist()\n q = marker.pose.pose.orientation\n roll,pitch,yaw = euler_from_quaternion([q.x, q.y, q.z, q.w])\n\n p = marker.pose.pose.position\n print \"position\", p\n\n # rotate\n if self.can_rotate:\n print \"euler: \",roll,pitch,yaw\n if yaw < (1.57 - self.angle_zone):\n print \"rotate counter clockwise\"\n vel_msg.angular.z = 0.1\n self.angle_locked = False\n elif yaw > (1.57 + self.angle_zone):\n print \"rotate clockwise\"\n vel_msg.angular.z = -0.1\n self.angle_locked = False\n else:\n print \"angle locked\"\n vel_msg.angular.z = 0\n self.angle_locked = True\n\n # position\n\n # left-right\n if p.x > (0 + self.position_zone):\n vel_msg.linear.y = -self.speed\n self.x_locked = False\n elif p.x < (0 - self.position_zone):\n vel_msg.linear.y = self.speed\n self.x_locked = False\n else:\n vel_msg.linear.y = 0\n self.x_locked = True\n print \"x locked\"\n\n # forward-backward\n if p.y > (0 + self.position_zone):\n vel_msg.linear.x = -self.speed\n self.y_locked = False\n elif p.y < (0 - self.position_zone):\n vel_msg.linear.x = self.speed\n self.y_locked = False\n else:\n vel_msg.linear.x = 0\n self.y_locked = True\n print \"y locked\"\n\n if self.x_locked and self.y_locked:\n self.can_rotate = True\n else:\n self.can_rotate = False\n\n # descend\n if self.angle_locked == True and self.x_locked == True and self.y_locked == True:\n if p.z > self.land_height:\n vel_msg.linear.z = -(self.vert_speed)\n else:\n land = Empty()\n self.pub_land.publish(land)\n #self.initiate_landing = False\n\n self.pub_cmd_vel.publish(vel_msg)\n\n \n # new call back for marker\n def marker_callback_simul(self, data):\n \n if self.land_initiated:\n for marker in data.markers:\n #testing only\n self.can_rotate = True\n\n vel_msg = Twist()\n q = marker.pose.pose.orientation\n roll,pitch,yaw = euler_from_quaternion([q.x, q.y, q.z, q.w])\n\n p = marker.pose.pose.position\n print \"position\", p\n\n # rotate\n if self.can_rotate:\n print \"euler: \",roll,pitch,yaw\n if yaw < (1.57 - self.angle_zone):\n print \"rotate counter clockwise\"\n vel_msg.angular.z = 0.05\n self.angle_locked = False\n elif yaw > (1.57 + self.angle_zone):\n print \"rotate clockwise\"\n vel_msg.angular.z = -0.05\n self.angle_locked = False\n else:\n print \"angle locked\"\n vel_msg.angular.z = 0\n self.angle_locked = True\n\n # position\n\n # left-right\n if p.x > (0 + self.position_zone):\n vel_msg.linear.y = -1 * max(self.speed * abs(p.x), self.min_speed)\n #self.x_locked = False\n elif p.x < (0 - self.position_zone):\n vel_msg.linear.y = max(self.speed * abs(p.x), self.min_speed)\n #self.x_locked = False\n else:\n vel_msg.linear.y = 0\n #self.x_locked = True\n print \"x locked\"\n\n # forward-backward\n if p.y > (0 + self.position_zone):\n vel_msg.linear.x = -1 * max(self.speed * abs(p.y), self.min_speed)\n #self.y_locked = False\n elif p.y < (0 - self.position_zone):\n vel_msg.linear.x = max(self.speed * abs(p.y), self.min_speed)\n #self.y_locked = False\n else:\n vel_msg.linear.x = 0\n #self.y_locked = True\n print \"y locked\"\n\n zone = max(self.land_zone_base * p.z, self.position_zone)\n\n if p.x < (0 + zone) and p.x > (0 - zone) and p.y < (0 + zone) and p.y > (0 - zone):\n self.can_descend = True\n else:\n self.can_descend = False\n\n # descend\n #if self.angle_locked == True and self.x_locked == True and self.y_locked == True:\n if self.angle_locked == True and self.can_descend == True:\n if p.z > self.land_height:\n vel_msg.linear.z = -(self.vert_speed)\n else:\n land = Empty()\n self.pub_land.publish(land)\n self.land_initiated = False\n\n # update drone state to docked\n self.drone_states.DroneState = self.drone_states.DOCKED\n self.pub_drone_states.publish(self.drone_states)\n rospy.sleep(1)\n\n # response\n response = String()\n response.data = \"1\"\n self.pub_response.publish(response)\n\n if(self.non_zero_vel(vel_msg)):\n self.pub_cmd_vel.publish(vel_msg)\n self.hover = False\n elif self.hover == False:\n self.pub_cmd_vel.publish(vel_msg)\n self.hover = True\n\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('drone_hub', anonymous=False)\n BebopControl()\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.logerr(\"drone_hub node terminated.\")"
},
{
"alpha_fraction": 0.6538461446762085,
"alphanum_fraction": 0.6552975177764893,
"avg_line_length": 29.600000381469727,
"blob_id": "9a181679c00f4f833112687cefe55effe9942591",
"content_id": "bd0566cd4e2df8b864071c8c67c8d986a8609ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1378,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 45,
"path": "/scripts/svo_to_nav.py",
"repo_name": "CSUHuman-UAV-UGV-Collaboration/bebop_control",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom geometry_msgs.msg import TwistWithCovariance\nfrom nav_msgs.msg import Odometry\n\nclass NavPublisher():\n def __init__(self):\n # initialize twist with covariance\n self.twist = TwistWithCovariance()\n\n rospy.init_node('bebop_svo_to_nav', anonymous=False)\n\n # subscribers (TODO)\n rospy.Subscriber('bebop/odom', Odometry, self.bebop_callback)\n rospy.Subscriber('svo/pose_imu', PoseWithCovarianceStamped, self.svo_callback) \n\n # publishers (TODO: publish to /vo for robot pose ekf)\n\n self.pub_vo = rospy.Publisher('/vo', Odometry, queue_size=10)\n\n def svo_callback(self, data):\n # converts to nav_msg odometry type msg and publish\n odom = Odometry()\n odom.pose = data.pose\n odom.twist = self.twist\n\n self.pub_vo.publish(odom)\n\n def bebop_callback(self, data):\n # gets data for velocity and covariance and store it for svo\n self.twist = data.twist\n\n def shutdown(self):\n rospy.loginfo(\"Shutting down bebop_svo_to_nav node\")\n\n\nif __name__ == '__main__':\n try:\n NavPublisher()\n rospy.loginfo(\"Starting bebop_svo_to_nav node.\")\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.logerr(\"Terminating bebop_svo_to_nav node.\")\n\n"
}
] | 5 |
timkaing/spd-2-4
|
https://github.com/timkaing/spd-2-4
|
54a85362e6702c12e0f1c49fda3331d0ecb69cf1
|
5611d735dd37ca3afacb6da7c19b49783ec61050
|
be68ef719b7c275742c281fdbf4f28599810385d
|
refs/heads/master
| 2022-04-10T03:01:20.131845 | 2020-03-03T07:31:16 | 2020-03-03T07:31:16 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6735751032829285,
"alphanum_fraction": 0.6839378476142883,
"avg_line_length": 37.79999923706055,
"blob_id": "fcabd61d9b4154052974cc8fb6ab4f2cd48b3ba7",
"content_id": "14cef0bd995d44a473a750751c0234a5db09a9d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 5,
"path": "/leetcode/1295.js",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "// Given an array nums of integers, return how many of them contain an even number of digits.\n\nconst findNumbers = (nums) => {\n return nums.filter(x => x.toString().length %2 == 0).length;\n}"
},
{
"alpha_fraction": 0.6427184343338013,
"alphanum_fraction": 0.6563106775283813,
"avg_line_length": 38.69230651855469,
"blob_id": "51393754142a3056ec64453f0ce16c576c42016e",
"content_id": "ef0097e9f1e4b20839de2e7802c80502b557580e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 13,
"path": "/codesignal/array_change.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# You are given an array of integers.\n# On each move you are allowed to increase exactly one of its element by one.\n# Find the minimal number of moves required to obtain a strictly increasing sequence from the input.\n\ndef arrayChange(inputArray):\n moves = 0\n total_moves = 0\n for i in range(len(inputArray)-1):\n if inputArray[i] >= inputArray[i+1]:\n moves = (inputArray[i] - inputArray[i+1] + 1)\n total_moves += moves\n inputArray[i+1] += moves\n return total_moves"
},
{
"alpha_fraction": 0.30000001192092896,
"alphanum_fraction": 0.5,
"avg_line_length": 9,
"blob_id": "bf1923c64a76f4d66403b15eafe7bf9dc373d0be",
"content_id": "5f2c4bd5e273af7c300601cee2c1f7d3dc3bcd82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10,
"license_type": "no_license",
"max_line_length": 9,
"num_lines": 1,
"path": "/README.md",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# spd-2-4\n"
},
{
"alpha_fraction": 0.5062240958213806,
"alphanum_fraction": 0.5228216052055359,
"avg_line_length": 24.3157901763916,
"blob_id": "61d541f96b4901b83265ddc4896baa4942e45b9c",
"content_id": "011b61f1e84d7099728dcf41bf63fe161f7ad439",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 19,
"path": "/codesignal/palindrome_rearranging.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a string, find out if its characters can be rearranged to form a palindrome.\n\ndef palindromeRearranging(inputString):\n if len((set(inputString))) <= 1:\n return True\n dict = {}\n for i in list(inputString):\n if i in dict:\n dict[i] += 1\n else:\n dict[i] = 1\n print(dict)\n count = 0\n for i in dict.values():\n if i % 2 != 0:\n count += 1\n if count > 1:\n return False\n return True\n\n"
},
{
"alpha_fraction": 0.49339935183525085,
"alphanum_fraction": 0.5115511417388916,
"avg_line_length": 29.157894134521484,
"blob_id": "2951867d46dd3987a51cbeac753688b64a425d5d",
"content_id": "4f5da8ec15d7413f31a7876fc96a883dc738be59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 606,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 19,
"path": "/leetcode/647.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a string, your task is to count how many palindromic substrings in this string.\n\n# The substrings with different start indexes or end indexes are counted as different substrings even they consist of same characters.\n\ndef countSubstrings(self, s):\n\n count = 0\n \n for i in range(len(s)):\n j = 0\n while i-j >= 0 and i+j < len(s) and s[i-j] == s[i+j]:\n count += 1\n j += 1\n j = 0\n while i-j >= 0 and i+j+1 < len(s) and s[i-j] == s[i+j+1]:\n count += 1\n j += 1\n \n return count\n \n "
},
{
"alpha_fraction": 0.5579710006713867,
"alphanum_fraction": 0.5748792290687561,
"avg_line_length": 26.600000381469727,
"blob_id": "0beb48ad092cf85f4b96d484ec8b6bdf3910f5c3",
"content_id": "abdc8ac7932c9bd97981349d038f2b731fa7b266",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 15,
"path": "/codesignal/is_ipv4_address.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a string, find out if it satisfies the IPv4 address naming rules.\n\ndef isIPv4Address(inputString):\n inputString = inputString.split('.')\n print(inputString)\n if len(inputString) != 4:\n return False\n for i in inputString:\n try:\n x = int(i)\n if x > 255 or x < 0:\n return False\n except ValueError:\n return False\n return True\n"
},
{
"alpha_fraction": 0.6128205060958862,
"alphanum_fraction": 0.6239316463470459,
"avg_line_length": 31.52777862548828,
"blob_id": "a97188202da895e0dd7447aef9f41616e93798c0",
"content_id": "04987a339be5a9dc42567d3cae29a752a5711fea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1171,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 36,
"path": "/codesignal/box_blur.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# The pixels in the input image are represented as integers.\n# The algorithm distorts the input image in the following way:\n# Every pixel x in the output image has a value equal to\n# the average value of the pixel values from the 3 × 3 square that has its center at x\n# including x itself. All the pixels on the border of x are then removed.\n\n# Return the blurred image as an integer, with the fractions rounded down.\ndef boxBlur(image):\n height = len(image)\n width = len(image[0])\n print(\"height: \" + str(height))\n print(\"width: \" + str(width))\n result = []\n final = []\n for i in range(0, height - 2):\n for j in range(0, width - 2):\n\n result.append(three_by_three(i, j, image))\n \n result_h = height - 2\n result_w = width - 2\n\n for i in range(result_h):\n final.append(result[:result_w])\n result = result[result_w:]\n return final\n\n\ndef three_by_three(h_start, w_start, image):\n three_sqr_val = 0\n total_pixels = 9\n for i in range(h_start, h_start + 3):\n for j in range(w_start, w_start + 3):\n \n three_sqr_val += image[i][j]\n return(three_sqr_val // total_pixels)"
},
{
"alpha_fraction": 0.5451327562332153,
"alphanum_fraction": 0.5628318786621094,
"avg_line_length": 30.44444465637207,
"blob_id": "564db0b9c4aa9f1833c2ee416e146765be3b57d7",
"content_id": "23dc1f5fb0cc4bc036993e084cb23c4996a29db4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 565,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 18,
"path": "/codesignal/matrix_elements_sum.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given matrix, a rectangular matrix of integers\n# where each value represents the cost of the room\n# your task is to return the total sum of all rooms\n# that are suitable for the CodeBots\n# (ie: add up all the values that don't appear below a 0).\n\ndef matrixElementsSum(matrix):\n r = len(matrix) # 3\n c = len(matrix[0]) # 4\n sum = 0\n for i in range(r):\n for j in range(c):\n if i>0 and matrix[i-1][j] == 0:\n matrix[i][j] = 0\n sum += 0\n else:\n sum += (matrix[i][j])\n return sum"
},
{
"alpha_fraction": 0.7211220860481262,
"alphanum_fraction": 0.7227723002433777,
"avg_line_length": 39.46666717529297,
"blob_id": "e04c7e1a490e46634acd4d28eda0a2978e38f986",
"content_id": "b5739f9a76a7cbe17c125054b737dc979a44d31e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 606,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 15,
"path": "/leetcode/575.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given an integer array with even length, where different numbers in this array represent different kinds of candies.\n# Each number means one candy of the corresponding kind.\n# You need to distribute these candies equally in number to brother and sister.\n# Return the maximum number of kinds of candies the sister could gain.\n\ndef distributeCandies(self, candies):\n unique_pieces = len(set(candies))\n max_pieces = len(candies) // 2\n \n if unique_pieces > max_pieces:\n return max_pieces\n elif unique_pieces < max_pieces:\n return unique_pieces\n else:\n return max_pieces"
},
{
"alpha_fraction": 0.6835106611251831,
"alphanum_fraction": 0.6914893388748169,
"avg_line_length": 30.41666603088379,
"blob_id": "dc9301a049d18c55ebd6827fc2d7902eed9b1f0e",
"content_id": "f693ebfe73e10269ef9004248899460216cab0e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 376,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 12,
"path": "/leetcode/58.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a string s consists of upper/lower-case alphabets and empty space characters ' ',\n# return the length of last word (last word means the last appearing word if we loop from left to right) in the string.\n\n# If the last word does not exist, return 0.\n\ndef lengthOfLastWord(self, s):\n\n words = s.split()\n if not words:\n return 0\n \n return len(words[-1])"
},
{
"alpha_fraction": 0.6084070801734924,
"alphanum_fraction": 0.6216813921928406,
"avg_line_length": 33.846153259277344,
"blob_id": "93c6aa2afb7ef0206daf5ed78eb7083038db3be4",
"content_id": "91dae72ba153040c3d29539bacc8663beec34abe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 452,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 13,
"path": "/leetcode/709.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Implement function ToLowerCase() that has a string parameter str, and returns the same string in lowercase.\n\ndef toLowerCase(self, str: str) -> str:\n lowercase = ''\n for char in str:\n # ord returns an integer representing unicode\n if ord(char) >= 65 and ord(char) <= 90:\n # chr returns a character from unicode\n lowercase += chr(ord(char)+32)\n else:\n lowercase += char\n\n return lowercase"
},
{
"alpha_fraction": 0.6724137663841248,
"alphanum_fraction": 0.6743295192718506,
"avg_line_length": 42.58333206176758,
"blob_id": "5b172dfbe5fcaba812545e20019f5ed9384f328f",
"content_id": "edcfc4a7ebdd4229694edd03200019d003e1cf36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 522,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 12,
"path": "/leetcode/938.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given the root node of a binary search tree, return the sum of values of all nodes with value between L and R (inclusive).\n\n# The binary search tree is guaranteed to have unique values.\n\ndef rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:\n if not root:\n return 0\n elif root.val < L:\n return self.rangeSumBST(root.right, L, R)\n elif root.val > R:\n return self.rangeSumBST(root.left, L, R)\n return root.val + self.rangeSumBST(root.left, L, R) + self.rangeSumBST(root.right, L, R)"
},
{
"alpha_fraction": 0.6707589030265808,
"alphanum_fraction": 0.6841517686843872,
"avg_line_length": 41.619049072265625,
"blob_id": "52686267a1e9a11af5d8a440a8e4dfebf4e9dd7f",
"content_id": "20edcc2135f694a68ac78892f40cc18bc60d448a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 896,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 21,
"path": "/codesignal/almost_increasing_sequence.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a sequence of integers as an array, determine whether it is possible to obtain \n# a strictly increasing sequence by removing no more than one element from the array.\n\ndef check_increasing(seq):\n # This will check if it is increasing:\n # If it is return -1 else return the element at which is it not increasing\n for i in range(len(seq)-1):\n if seq[i] >= seq[i+1]:\n return i\n return -1\n\ndef almostIncreasingSequence(sequence):\n check = check_increasing(sequence)\n # List is increasing\n if check == -1:\n return True\n # Check if removing an item will make a strictly increasing list\n if check_increasing(sequence[check-1:check] + sequence[check+1:]) == -1 or check_increasing(sequence[check:check+1] + sequence[check+2:]) == -1:\n return True\n # If not return False, since more than 1 element needs to be removed\n return False\n\n"
},
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.6641509532928467,
"avg_line_length": 37,
"blob_id": "0d7fd3e1ac3e4e7fa48cc9b9040d961b5e3e59fd",
"content_id": "fdab84118ee11d066a623bdf6eca9febc97c0f93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 7,
"path": "/codesignal/common_character_count.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given two strings, find the number of common characters between them.\n\nfrom collections import Counter\n\ndef commonCharacterCount(s1, s2):\n common_letters = Counter(s1) & Counter(s2) # => {'q': 2, 'r': 1}\n return(sum(common_letters.values())) # => r"
},
{
"alpha_fraction": 0.5638841390609741,
"alphanum_fraction": 0.5826234817504883,
"avg_line_length": 29.947368621826172,
"blob_id": "59e388bcfb3c1555b6c06938fb8ff6a67ada2624",
"content_id": "40c00ef426a41f76d77284dba475d4e2c083dfcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 599,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 19,
"path": "/leetcode/412.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Write a program that outputs the string representation of numbers from 1 to n.\n\n# But for multiples of three it should output “Fizz” instead of the number\n# For the multiples of five output “Buzz”.\n# For numbers which are multiples of both three and five output “FizzBuzz”.\n\ndef fizzBuzz(self, n):\n result = []\n for i in range(1, n+1):\n value = str(i)\n if i % 3 == 0 and i % 5 == 0:\n value = \"FizzBuzz\"\n elif i % 3 == 0:\n value = \"Fizz\"\n elif i % 5 == 0:\n value = \"Buzz\"\n result.append(value)\n\n return result"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6710526347160339,
"avg_line_length": 20.85714340209961,
"blob_id": "7b7531802c6a491c684caad72e253984a6ee5f79",
"content_id": "9c4d0e59469548524d6e8b9d9b92a76fab22a56c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 7,
"path": "/codesignal/add.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Write a function that returns the sum of two numbers\n# Input: add(1, 2)\n# Output: 3\n\ndef add(param1, param2):\n sum = param1 + param2\n return sum"
},
{
"alpha_fraction": 0.4954838752746582,
"alphanum_fraction": 0.4954838752746582,
"avg_line_length": 42.11111068725586,
"blob_id": "307b9e2818ed6d7758b6cb232e6d336700fa6499",
"content_id": "de9af62c3c0b71070f62ff0d5292c6829e338f5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 18,
"path": "/leetcode/804.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Now, given a list of words, each word can be written as a concatenation of the Morse code of each letter.\n# For example, \"cba\" can be written as \"-.-..--...\", (which is the concatenation \"-.-.\" + \"-...\" + \".-\").\n# We'll call such a concatenation, the transformation of a word.\n\n# Return the number of different transformations among all words we have.\n\ndef uniqueMorseRepresentations(self, words: List[str]) -> int:\n morse = [\".-\",\"-...\",\"-.-.\",\"-..\",\".\",\"..-.\",\"--.\",\"....\",\"..\",\".---\",\"-.-\",\".-..\",\"--\",\"-.\",\"---\",\".--.\",\"--.-\",\".-.\",\"...\",\"-\",\"..-\",\"...-\",\".--\",\"-..-\",\"-.--\",\"--..\"]\n\n result = set()\n\n for word in words:\n val = \"\"\n for letter in word:\n val += morse[ord(letter)-ord('a')]\n\n result.add(val)\n return len(result)"
},
{
"alpha_fraction": 0.5894039869308472,
"alphanum_fraction": 0.6158940196037292,
"avg_line_length": 31.428571701049805,
"blob_id": "48a93588d36a9f2b51d73bcc017ba12a548317b5",
"content_id": "7908c3c16b0b4f5aa4b467365d1b86d6e6071456",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 14,
"path": "/codesignal/alternating_sums.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# You are given an array of positive integers - the weights of the people.\n# Return an array of two integers, where the first element is the total weight of team 1\n# and the second element is the total weight of team 2 after the division is complete.\n\ndef alternatingSums(a):\n team_1 = 0\n team_2 = 0\n for i in range(len(a)):\n if i % 2 == 0:\n team_1 += a[i]\n else:\n team_2 += a[i]\n\n return [team_1, team_2]"
},
{
"alpha_fraction": 0.6908783912658691,
"alphanum_fraction": 0.6942567825317383,
"avg_line_length": 36.0625,
"blob_id": "a60d57ba6c4aaceb9474dd74e8852b110d5bbc76",
"content_id": "99bbc8defeed9aad9840771186fc1a591bcdb89c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 592,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 16,
"path": "/leetcode/771.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# You're given strings J representing the types of stones that are jewels, and S representing the stones you have.\n# Each character in S is a type of stone you have.\n# You want to know how many of the stones you have are also jewels.\n\n# The letters in J are guaranteed distinct, and all characters in J and S are letters.\n# Letters are case sensitive, so \"a\" is considered a different type of stone from \"A\".\n\ndef numJewelsInStones(self, J: str, S: str) -> int:\n jewelSet = set(J)\n jewels = 0\n\n for stone in S:\n if stone in jewelSet:\n jewels += 1\n\n return jewels"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6422287225723267,
"avg_line_length": 30,
"blob_id": "3d3ac3c9d4648591e93caac2abc013d5961b849c",
"content_id": "d2f79bc2d4eff24f382ea38e3215076c4728972e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 22,
"path": "/codesignal/is_lucky.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Ticket numbers usually consist of an even number of digits.\n# A ticket number is considered lucky\n# if the sum of the first half of the digits is equal to the sum of the second half.\n\n# Given a ticket number n, determine if it's lucky or not.\ndef isLucky(n):\n # consist of an even number of digits\n # luck if sum of first half == sum of second\n digits = list(map(int, str(n)))\n print(digits)\n left_sum = 0\n right_sum = 0\n\n for i in range(len(digits) // 2):\n left_sum += digits[i]\n \n for i in reversed(range(len(digits) // 2, len(digits))):\n right_sum += digits[i]\n\n print(left_sum)\n print(right_sum)\n return left_sum == right_sum\n"
},
{
"alpha_fraction": 0.6986755132675171,
"alphanum_fraction": 0.7019867300987244,
"avg_line_length": 32.55555725097656,
"blob_id": "90a7be6b91aedfde359d24ad4548edc4121e794c",
"content_id": "3488be132141f718f90f36555f4bf10c21cdc848",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 9,
"path": "/codesignal/all_longest_strings.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given an array of strings\n# return another array containing all of its longest strings.\n\ndef allLongestStrings(inputArray):\n result = {}\n for word in inputArray:\n result.setdefault(len(word),[]).append(word)\n longest_count = sorted(result.keys())[-1]\n return result[longest_count]\n"
},
{
"alpha_fraction": 0.5541666746139526,
"alphanum_fraction": 0.5541666746139526,
"avg_line_length": 29.125,
"blob_id": "267b9b7fb995403931168ef859ed103994ab3dcd",
"content_id": "6239f64f7237b2cfd16be520216562f6ab5be7df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 8,
"path": "/leetcode/1.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "def twoSum(self, nums, target):\n hashTable = {}\n for i, j in enumerate(nums):\n difference = target - j\n if difference in hashTable:\n return [hashTable[difference], i]\n else:\n hashTable[j] = i"
},
{
"alpha_fraction": 0.525581419467926,
"alphanum_fraction": 0.5906976461410522,
"avg_line_length": 35,
"blob_id": "ce0ff76ec2062991904036adc66ea9d6bda2fcce",
"content_id": "9a550d51308b98f0ec5b1f9c0093e1026ae77369",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 6,
"path": "/leetcode/7.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a 32-bit signed integer, reverse digits of an integer.\n\ndef reverse(self, x):\n sign = [1, -1][x < 0]\n reverse = sign * (int(str(abs(x))[::-1]))\n return reverse if -(2**31)-1 < reverse < 2**31 else 0"
},
{
"alpha_fraction": 0.5786290168762207,
"alphanum_fraction": 0.5987903475761414,
"avg_line_length": 25.157894134521484,
"blob_id": "f3a751bc39d6a60419ed1d861037b04f5c4c0c2b",
"content_id": "f4b8d4ebeb92452358dadb9322bb9457fd090c29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 496,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 19,
"path": "/codesignal/are_similar.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Two arrays are called similar if one can be obtained from another\n# by swapping at most one pair of elements in one of the arrays.\n\n# Given two arrays a and b, check whether they are similar.\n\ndef areSimilar(a, b):\n\n tmp1=list()\n tmp2=list()\n for i in range(len(a)):\n if a[i]!=b[i]:\n tmp1.append(a[i])\n tmp2.append(b[i])\n if len(tmp1)==0:\n return True\n elif len(tmp1)>2:\n return False\n else:\n return tmp1==list(reversed(tmp2))"
},
{
"alpha_fraction": 0.662162184715271,
"alphanum_fraction": 0.6756756901741028,
"avg_line_length": 40.11111068725586,
"blob_id": "b2b23f10ddef58faa3d33e215912349db59da3e9",
"content_id": "bc21dd91c0fcbdb3fab0e5824779a698c15285da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 370,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 9,
"path": "/codesignal/array_maximal_adjacent_difference.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given an array of integers,\n# find the maximal absolute difference between any two of its adjacent elements.\n\ndef arrayMaximalAdjacentDifference(inputArray):\n max = inputArray[1] - inputArray[0]\n for i in range(len(inputArray) - 1):\n if abs((inputArray[i+1] - inputArray[i])) > max:\n max = abs((inputArray[i+1] - inputArray[i]))\n return max\n"
},
{
"alpha_fraction": 0.650943398475647,
"alphanum_fraction": 0.6540880799293518,
"avg_line_length": 34.44444274902344,
"blob_id": "d6b5105a0bf1b40ddd6176dc07ffdbc16728b482",
"content_id": "8e0a81d825559688cf17652fb435db13adc84f2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 9,
"path": "/leetcode/1281.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given an integer number n, return the difference between the product of its digits and the sum of its digits.\n\ndef subtractProductAndSum(self, n: int) -> int:\n prod = 1\n list_of_nums = list(map(int, str(n)))\n for i in list_of_nums:\n prod = prod * i\n diff = prod - sum(list_of_nums)\n return diff"
},
{
"alpha_fraction": 0.593052089214325,
"alphanum_fraction": 0.6029776930809021,
"avg_line_length": 30.076923370361328,
"blob_id": "e3450cb71c91ec927186a34c6686675380cb0785",
"content_id": "f41bddd43f759120dedf6a016ea25cde8ec508a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 13,
"path": "/leetcode/1351.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a m * n matrix grid which is sorted in non-increasing order both row-wise and column-wise. \n\n# Return the number of negative numbers in grid.\n\ndef countNegatives(self, grid: List[List[int]]) -> int:\n height = len(grid)\n width = len(grid[0])\n count = 0\n for i in range(height):\n for j in range(width): \n if grid[i][j] < 0:\n count += 1\n return count"
},
{
"alpha_fraction": 0.6265060305595398,
"alphanum_fraction": 0.6345381736755371,
"avg_line_length": 33.42856979370117,
"blob_id": "cf9c997c777a9d1567aa5adfad8e46ccfa224d4b",
"content_id": "a1a5a501c974bbb76d6b4ec99aeb10055564cdf0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 7,
"path": "/leetcode/9.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Determine whether an integer is a palindrome. An integer is a palindrome when it reads the same backward as forward.\n\ndef isPalindrome(self, x):\n if x<0 or x != int(str(abs(x))[::-1]):\n return False\n else:\n return True\n "
},
{
"alpha_fraction": 0.7434210777282715,
"alphanum_fraction": 0.7516447305679321,
"avg_line_length": 59.400001525878906,
"blob_id": "ec74d328bb64f8c64efcb3ff169e33772a863428",
"content_id": "cf5c82b869d49e06672d9119013e65993a68b655",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 277,
"num_lines": 10,
"path": "/codesignal/make_array_consecutive.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Bob got statues of different sizes as a present from CodeMaster for his birthday\n# each statue having an non-negative integer size. Since he likes to make things perfect, he wants to arrange them from smallest to largest so that each statue will be bigger than the previous one exactly by 1. He may need some additional statues to be able to accomplish that.\n# Help him figure out the minimum number of additional statues needed.\n\ndef makeArrayConsecutive2(statues):\n statues.sort()\n start = statues[0]\n end = statues[-1]\n difference = end - start + 1\n return(difference - len(statues))\n "
},
{
"alpha_fraction": 0.6206373572349548,
"alphanum_fraction": 0.6282246112823486,
"avg_line_length": 32,
"blob_id": "dba252d34fdd33654059c8870c127d985c4b739f",
"content_id": "682c27f9dfc5f6f10b8bff45cd6c9d0bf4ff5366",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 659,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 20,
"path": "/codesignal/reverse_in_parentheses.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Write a function that reverses characters in (possibly nested)\n# parentheses in the input string.\n\n# Input strings will always be well-formed with matching ()s.\n\ndef reverseInParentheses(inputString):\n length = len(inputString)\n\n if '(' and ')' not in list(inputString):\n return inputString\n\n close_index = inputString.rfind(\")\")\n\n for i in reversed(range(0, close_index)):\n if inputString[i] == \")\":\n close_index = i\n if inputString[i] == \"(\":\n sub = inputString[i+1:close_index][::-1]\n new = inputString[0:i] + sub + inputString[close_index+1:]\n return reverseInParentheses(new)"
},
{
"alpha_fraction": 0.601307213306427,
"alphanum_fraction": 0.6699346303939819,
"avg_line_length": 33.11111068725586,
"blob_id": "b2c6ba87eb36b288fc41b015c72d11a1b3c7960c",
"content_id": "8b11a3b03d0c2aad91503cdae55fb08ecdd5823a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 306,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 9,
"path": "/codesignal/century_from_year.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a year, return the century it is in. \n# The first century spans from the year 1 up to and including the year 100\n# the second - from the year 101 up to and including the year 200, etc.\n\ndef centuryFromYear(year):\n if year%100 == 0:\n return year//100\n else:\n return year//100 + 1"
},
{
"alpha_fraction": 0.4545454680919647,
"alphanum_fraction": 0.5415019989013672,
"avg_line_length": 24.299999237060547,
"blob_id": "24a0ebf112ab1ff4548ccd3e8bfaff2ec2fdf2d2",
"content_id": "9e1be6dd23447b0c0a8e6321900cf748bdd6f195",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 10,
"path": "/codesignal/shape_area.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Below we will define an n-interesting polygon.\n# Your task is to find the area of a polygon for a given n.\n\ndef shapeArea(n):\n #1, 5, 13, 25\n #1, 2(4+1), 3(9+4), 4(16+9)\n if n == 1:\n return 1\n else:\n return (n**2 + (n-1)**2)\n"
},
{
"alpha_fraction": 0.5990338325500488,
"alphanum_fraction": 0.6054750680923462,
"avg_line_length": 30.049999237060547,
"blob_id": "994a241524e2dbf124f21d659fcc4af992bf3d3f",
"content_id": "da37fbde77e97f5ee7e8f99f32345a01bc34b453",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 621,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 20,
"path": "/leetcode/383.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given an arbitrary ransom note string and another string containing letters from all the magazines,\n# write a function that will return true if the ransom note can be constructed from the magazines ; otherwise, it will return false.\n\ndef canConstruct(self, ransomNote, magazine):\n \n dictionary = {}\n \n for i in magazine:\n if i in dictionary:\n dictionary[i] += 1\n else:\n dictionary[i] = 1\n \n for j in ransomNote:\n if j in dictionary and dictionary[j] > 0:\n dictionary[j] -= 1\n else:\n return False\n \n return True\n"
},
{
"alpha_fraction": 0.5991984009742737,
"alphanum_fraction": 0.6032063961029053,
"avg_line_length": 21.727272033691406,
"blob_id": "9251267ddf62a755e885feb09e7f02d14ec1760f",
"content_id": "7ab5975b4d9409bdbd827e3d7f0efef4659e1f72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 22,
"path": "/codesignal/sort_by_height.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Some people are standing in a row in a park.\n# There are trees between them which cannot be moved.\n# Your task is to rearrange the people by their heights\n# in a non-descending order without moving the trees. People can be very tall!\n\ndef sortByHeight(a):\n\n indices = []\n\n for i, val in enumerate(a):\n if val == -1:\n indices.append(i)\n \n a.sort()\n\n length = len(indices)\n a = (a[length:])\n for i in indices:\n a.insert(i, -1)\n \n \n return a"
},
{
"alpha_fraction": 0.48441246151924133,
"alphanum_fraction": 0.486810564994812,
"avg_line_length": 28.785715103149414,
"blob_id": "de24141353367127768789dccd93cf9a75131879",
"content_id": "87d17663a338d70092d560b167aefb9866e9b20e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 14,
"path": "/leetcode/1108.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "\n# Given a valid (IPv4) IP address, return a defanged version of that IP address.\n\nclass Solution:\n def defangIPaddr(self, address: str) -> str:\n return address.replace('.', '[.]')\n \n # if built in string function is not allowed\n #\n # defang = ''\n # for i in address:\n # if i == '.':\n # i = '[.]'\n # defang += i\n # return defang"
},
{
"alpha_fraction": 0.5868725776672363,
"alphanum_fraction": 0.5907335877418518,
"avg_line_length": 26.3157901763916,
"blob_id": "98c306c3532ce6a49a836cad6c79b3b5cfedb636",
"content_id": "b3c7b83aac50d9b71bf580c2c054883d4aa9782f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 19,
"path": "/leetcode/1207.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given an array of integers arr\n# write a function that returns true if and only if the number of occurrences of each value in the array is unique.\ndef uniqueOccurrences(self, arr: List[int]) -> bool:\n numdict = dict()\n\n for i in arr:\n if i in numdict:\n numdict[i] += 1\n else:\n numdict[i] = 1\n\n countList = []\n\n for count in numdict.values():\n if count not in countList:\n countList.append(count)\n else: \n return False\n return True"
},
{
"alpha_fraction": 0.6012269854545593,
"alphanum_fraction": 0.6104294657707214,
"avg_line_length": 31.700000762939453,
"blob_id": "12438ea8ede395f7a61f7a97e5c6f4c40ed685cf",
"content_id": "2891ee9bf332cfd02c6b9f70bec74d762b9b59f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 10,
"path": "/codesignal/add_border.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Given a rectangular matrix of characters, add a border of asterisks(*) to it.\n\ndef addBorder(picture):\n row = len(picture)\n column = len(picture[0]) + 2\n for i in range(len(picture)):\n picture[i] = \"*\" + picture[i] + \"*\"\n picture.insert(0, column * '*')\n picture.append(column * '*')\n return picture"
},
{
"alpha_fraction": 0.6342412233352661,
"alphanum_fraction": 0.6498054265975952,
"avg_line_length": 38.46154022216797,
"blob_id": "f686708701cadcefc1b47c1b55e71ac3f610541b",
"content_id": "556f04251f72756a4e3eab49a433af657e020d53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 13,
"path": "/leetcode/1313.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "\n# We are given a list nums of integers representing a list compressed with run-length encoding.\n\n# Consider each adjacent pair of elements [a, b] = [nums[2*i], nums[2*i+1]] (with i >= 0).\n# For each such pair, there are a elements with value b in the decompressed list.\n\n# Return the decompressed list.class Solution:\n\ndef decompressRLElist(self, nums: List[int]) -> List[int]:\n output = []\n for i in range(len(nums)-1):\n if i%2 == 0:\n output += ([nums[i+1]] * nums[i])\n return(output)\n"
},
{
"alpha_fraction": 0.6051282286643982,
"alphanum_fraction": 0.6102564334869385,
"avg_line_length": 24.399999618530273,
"blob_id": "07d9bb8c501270ea93a950cd7fdc9a54b01be99d",
"content_id": "251fe37731d7df793da199a01e9fdd8c2d20e3ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 15,
"path": "/leetcode/14.py",
"repo_name": "timkaing/spd-2-4",
"src_encoding": "UTF-8",
"text": "# Write a function to find the longest common prefix string amongst an array of strings.\n\n# If there is no common prefix, return an empty string \"\".\n\n\ndef longestCommonPrefix(self, strs):\n if not strs:\n return \"\"\n\n for index, letter in enumerate(zip(*strs)):\n if len(set(letter)) > 1:\n return(strs[0][:index])\n \n else:\n return min(strs)\n \n "
}
] | 39 |
fredsherbet/language-logs
|
https://github.com/fredsherbet/language-logs
|
ed530444256ef21a40215e2a9f465a4c3ec5710f
|
a2f1cb6bc63f644b452dcff5299a9bf6b800d4a2
|
b559a9633a097ed98940c17ee87c9a957ad04e8f
|
refs/heads/master
| 2021-09-02T04:53:14.500575 | 2017-12-30T14:15:15 | 2017-12-30T14:15:15 | 115,188,140 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7238928079605103,
"alphanum_fraction": 0.7288135886192322,
"avg_line_length": 41.53488540649414,
"blob_id": "03ea92fb00c8202ca50e8992b82fdd88c108e2f7",
"content_id": "df56eaf3c078c7f538ba4cde1430b59e65304a80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1829,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 43,
"path": "/README.md",
"repo_name": "fredsherbet/language-logs",
"src_encoding": "UTF-8",
"text": "# Language log parser\n\n(Python coding exercise solution, written by Matthew Russell, in December 2017.)\n\nParses Apache HTTP logs, and produces a monthly report, with the following detail.\n\n1. A sorted list of the top 5 languages, measured by GB of data served from requests for files of that language for that month, including how many GB were served for each language\n2. The percentage of requests that were successful (2xx return code) that month\n3. A list of all requested filenames that contained non-Ascii characters that month\n\n## Usage\n\nBy default, `report.py` reads from stdin, and prints to stdout. You can specify a specific HTTP log file or folder containing HTTP logs, using the options, as shown below.\n\n usage: report.py [-h] [-i INPUT] [-f FOLDER]\n\n Produce monthly reports for language server from its Apache HTTP logs\n\n optional arguments:\n -h, --help show this help message and exit\n -i INPUT, --input INPUT\n Apache HTTP log to parse\n -f FOLDER, --folder FOLDER\n Folder of Apache HTTP log to parse\n\nFor example, to produce a report for the logs in `/var/log/apache/access/`, and save the report to `languge-report.txt`, run the following.\n\n ./report.py -f /var/log/apache/access | tee language-report.txt\n\n## Debugging\n\nThe script produces a debugging log, `engineering.log`, which contains some\nlogging, including details for any exceptions hit or raised.\n\nThe exception `LogInputError` indicates a formatting error in the HTTP log.\n\n## Assumptions\n\nThe following are assumptions made, for future reference.\n\n* The logs are in chronological order - that allows us to free the details\n about the month when we roll around to the next one, rather than storing all\n details in memory until we've completed parsing all the logs.\n"
},
{
"alpha_fraction": 0.5334513187408447,
"alphanum_fraction": 0.5392330288887024,
"avg_line_length": 31.471263885498047,
"blob_id": "dd7797022a6728b667c56f976922295532cb207f",
"content_id": "c2e32ee70ecc7100db6031511f5e09f2a98ed405",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8475,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 261,
"path": "/report.py",
"repo_name": "fredsherbet/language-logs",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport logging\nimport sys\nimport os\nimport argparse\nimport datetime\nimport shlex\nfrom collections import Counter\n\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\nclass ReportException(Exception):\n \"\"\"Execution exception\"\"\"\n def __init__(self, message, log_line=None, exc=None):\n Exception.__init__(self, message)\n self.log_line = log_line\n self.exc = exc\n\n\nclass LogInputError(ReportException):\n \"\"\"Error parsing the log; the log is malformatted\"\"\"\n\n\ndef main():\n \"\"\"Handle command line interface - arguments, logging, and error\n reporting\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Produce monthly reports for language server from its \"\n \"Apache HTTP logs\")\n parser.add_argument(\"-i\", \"--input\", help=\"Apache HTTP log to parse\")\n parser.add_argument(\"-f\", \"--folder\", help=\"Folder of Apache HTTP log to parse\")\n args = parser.parse_args()\n\n LOG_FILE = \"engineering.log\"\n logging.basicConfig(level=logging.INFO,\n filename=LOG_FILE,\n filemode=\"w\")\n\n try:\n if args.folder:\n print get_full_report(\n file_paths=sorted(os.path.join(args.folder, f)\n for f in os.listdir(args.folder)))\n elif args.input:\n with open(args.input) as f:\n print get_full_report(f)\n else:\n print get_full_report(sys.stdin)\n except LogInputError as exc:\n logging.exception(\"Failed to parse HTTP log; failed to produce a report.\")\n sys.stderr.write(\"Failed to parse HTTP log; failed to produce a report.\")\n sys.stderr.write(exc.message)\n if exc.log_line:\n sys.stderr.write('Error handling log line:\\n ' + exc.log_line)\n sys.stderr.write(\"If needed, see further debugging information in \" + LOG_FILE)\n return 1\n except:\n logging.exception(\"Error halted execution; failed to produce a report.\")\n sys.stderr.write(\"Error halted execution; failed to produce a report.\")\n sys.stderr.write(\"See debugging information in \" + LOG_FILE)\n return 2\n return 0\n\n\ndef get_full_report(log_file=None, file_paths=None):\n def get_files():\n if log_file:\n yield log_file\n if file_paths:\n for p in file_paths:\n with open(p) as f:\n yield f\n\n report_str = ''\n report = None\n for f in get_files():\n for log_line in f.readlines():\n log.debug(\"Handling line \" + log_line)\n apache_log = ApacheLogLine(log_line)\n if report is None:\n report = Report(apache_log.month_year)\n if apache_log.month_year != report.title:\n log.info(\"Formatting report \" + report.title)\n report_str += '\\n' + str(report)\n report = Report(apache_log.month_year)\n try:\n report.add_log(apache_log)\n except ReportException as exc:\n exc.log_line = log_line\n raise\n except Exception:\n logging.exception(\"Exception handling line: \" + log_line)\n raise\n\n if report is not None:\n report_str += '\\n' + str(report)\n return report_str.strip()\n\n\nclass Report():\n \"\"\"Stores data for a report, and formats it\"\"\"\n def __init__(self, title):\n log.info(\"Start new report \" + title)\n self.title = title\n self.lang_amounts = {}\n self.non_ascii_names = []\n self.total_request_count = 0\n self.successful_request_count = 0\n\n def __str__(self):\n return \"\"\"\n{title} Report\nTop 5 Languages:\n{lang}\n\nRequest Success:\n {succ}\n\nNon-ascii filenames:\n {non-ascii}\"\"\".format(**{'title': self.title,\n 'lang': self.format_lang_table(),\n 'succ': self.format_success(),\n 'non-ascii': \"\\n \".join(self.non_ascii_names)})\n\n def format_lang_table(self):\n return \" \\n\".join(\n \"{:>7} {}\".format(self.format_bytes(s), l)\n for (l, s) in Counter(self.lang_amounts).most_common(5))\n\n @staticmethod\n def format_bytes(num_bytes):\n res = float(num_bytes)\n suffix = ['TB', 'GB', 'MB', 'KB', 'B']\n while res >= 1000:\n suffix.pop()\n res = res / 1024\n if res >= 100:\n fmt = \"{:.0f}{:>3}\"\n else:\n fmt = \"{:.1f}{:>3}\"\n return fmt.format(res, suffix.pop())\n\n def format_success(self):\n return \"{:.1f}% ({:d} of {:d})\".format(\n float(self.successful_request_count) * 100 / self.total_request_count,\n self.successful_request_count, self.total_request_count)\n\n def add_log(self, apache_log):\n non_ascii_file = self.get_non_ascii_file(apache_log.path)\n self.total_request_count += 1\n if non_ascii_file:\n self.non_ascii_names.append(non_ascii_file)\n if not apache_log.is_successful_request:\n return\n self.successful_request_count += 1\n try:\n try:\n self.lang_amounts[self.get_lang(apache_log.path)] += int(apache_log.bytes)\n except KeyError:\n self.lang_amounts[self.get_lang(apache_log.path)] = int(apache_log.bytes)\n except ValueError as exc:\n raise LogInputError(\"Bytes count in HTTP log is not an integer.\",\n log_line=self.log_line,\n exc=exc)\n\n @staticmethod\n def get_non_ascii_file(path):\n filename = path.split('/')[-1]\n try:\n filename.decode('ascii')\n except UnicodeDecodeError:\n return filename\n\n @staticmethod\n def get_lang(path):\n # Path is expected to be of the following format.\n # /<language>/<filename>\n # e.g. /English/some_audio_file.wav\n path_parts = path.split('/')\n if len(path_parts) != 3:\n # The path isn't the expected format, so we don't know what the\n # language is.\n return ''\n return path_parts[1]\n\n\nclass ApacheLogLine():\n NAME_MAP = {\n 'ip': 0,\n 'ruser': 1,\n 'luser': 2,\n 'time': 3,\n 'request': 4,\n 'status': 5,\n 'bytes': 6,\n }\n\n def __init__(self, log_line):\n # The apache log is made up of parts, separated by spaces.\n # Some parts can be multi-word, and so wrapped in quotes.\n # The shlex.split handles that for us.\n # There's one wrinkle; the timestamp has spaces, and isn't\n # wrapped in quotes (it's wrapped in square brackets instead),\n # so we'll normalise that, here.\n self.log_line = log_line\n self.parts = shlex.split(log_line)\n self.parts[3] += ' ' + self.parts[4]\n del self.parts[4]\n self.parts[3] = self.parts[3].strip('[]')\n\n def __str__(self):\n return \"AccessLogLine: \" + \" \".join(self.parts)\n\n def __getattr__(self, name):\n try:\n return self.parts[self.NAME_MAP[name]]\n except KeyError:\n raise AttributeError(\"{} has no {}\".format(self, name))\n except IndexError as exc:\n raise LogInputError(\"Log line is malformatted; does not have the \"\n \"expected number of parts\",\n log_line=self.log_line,\n exc=exc)\n\n @property\n def path(self):\n try:\n return self.request.split()[1].split('?', 1)[0]\n except:\n log.warning(\"Log line has no path. \" + self.log_line,\n exc_info=True)\n return \"\"\n\n @property\n def is_successful_request(self):\n return self.status.startswith('2')\n\n @property\n def datetime(self):\n formats = ['%d/%m/%Y:%H:%M:%S',\n '%d/%b/%Y:%H:%M:%S',\n ]\n for fmt in formats:\n try:\n return datetime.datetime.strptime(self.time.split()[0], fmt)\n except ValueError as exc:\n pass\n raise LogInputError(\"Could not parse date \" + self.time,\n log_line=self.log_line)\n\n @property\n def month_year(self):\n return self.datetime.strftime('%B %Y')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n"
}
] | 2 |
syadav214/skMachineLearning
|
https://github.com/syadav214/skMachineLearning
|
61f991308773bc17494a6a8993edb32c27f0f3e4
|
f4f33b7eed36e5d0f618fe3735c349ce957d7c4b
|
224bf38c7d944c249e354ca0ead6bd13c940248c
|
refs/heads/master
| 2021-04-15T10:16:57.994296 | 2019-01-03T09:43:31 | 2019-01-03T09:43:31 | 126,816,067 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7060301303863525,
"alphanum_fraction": 0.733668327331543,
"avg_line_length": 25.53333282470703,
"blob_id": "d430f097f0758cdb8e7db463753b66b629b258b2",
"content_id": "19ae78b66c421bd8145ab8b0f93e19b84c1e080a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 15,
"path": "/mlSpark/README.md",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "# SparkML\n\nLearning Spark ML Libraries\n\nPrerequisites on Windows:\n\n1. Eclipse\n2. Java Runtime 1.8\n\nGetting Started:\n\n1. download the winutils.exe file & set your hadoop home pointing to it.\n2. paste winutils.exe in bin ex: C:\\hadoop\\bin\\winuitls.exe\n3. Create a maven project and Include spark-core_2.11 in pom file.\n4. Put this in code => System.setProperty(\"hadoop.home.dir\",\"C:\\hadoop\" );\n"
},
{
"alpha_fraction": 0.5542339086532593,
"alphanum_fraction": 0.571792721748352,
"avg_line_length": 35.402061462402344,
"blob_id": "018b3403efc7567a4a72f6924b51dd0b8a8e36fa",
"content_id": "3f533020057dc9581d074a42d61f3acc728c2f0b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 7062,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 194,
"path": "/mlDotNetCore/recommendEngineConsole/Program.cs",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\n\n\nclass Program\n{\n static Dictionary<string, List<Recommendation>> productRecommendations = new Dictionary<string, List<Recommendation>>();\n\n static void Main(string[] args)\n {\n Init();\n\n Console.Write(\"Enter your preference: \");\n var displayMode = Console.ReadLine();\n\n if (displayMode.ToLower().Equals(\"top\"))\n Console.WriteLine(\"\\nBest matches\");\n\n foreach (var datakey in productRecommendations)\n {\n string person = datakey.Key;\n\n var matches = TopMatches(person);\n\n if (displayMode.ToLower().Equals(\"all\"))\n {\n Console.WriteLine(\"\\nBest match for: {0}\", person);\n Console.WriteLine(\"\\nPerson Pearson Score\");//16 is lenght from Person to Pearson\n }\n\n foreach (var item in matches)\n {\n if (displayMode.ToLower().Equals(\"top\"))\n {\n Console.WriteLine(\"\\n{0} to {1} at {2} \", person, item.Name, item.Rating.ToString(\"#0.00000\"));\n break;\n }\n else\n {\n var spaceLen = 16 - item.Name.Length;\n var spaceChar = \"\";\n for (int i = 1; i <= spaceLen; i++)\n {\n spaceChar += \" \";\n }\n\n Console.WriteLine(\"{0}{1}{2}\", item.Name, spaceChar, item.Rating.ToString(\"#0.00000\"));\n }\n }\n }\n\n Console.WriteLine(\"\\nPress any key\");\n Console.ReadKey();\n }\n\n static void Init()\n {\n List<Recommendation> list = new List<Recommendation>();\n list.Add(new Recommendation() { Name = \"Wile E Coyote\", Rating = 4.5 });\n list.Add(new Recommendation() { Name = \"Bugs Bunny\", Rating = 2.5 });\n list.Add(new Recommendation() { Name = \"Elmer Fudd\", Rating = 5.0 });\n list.Add(new Recommendation() { Name = \"Foghorn Leghorn\", Rating = 2.0 });\n productRecommendations.Add(\"Rohan\", list);\n\n\n list = new List<Recommendation>();\n list.Add(new Recommendation() { Name = \"Wile E Coyote\", Rating = 5.0 });\n list.Add(new Recommendation() { Name = \"Bugs Bunny\", Rating = 3.5 });\n list.Add(new Recommendation() { Name = \"Elmer Fudd\", Rating = 1.0 });\n list.Add(new Recommendation() { Name = \"Foghorn Leghorn\", Rating = 3.5 });\n list.Add(new Recommendation() { Name = \"Daffy Duck\", Rating = 1.0 });\n productRecommendations.Add(\"Rahul\", list);\n\n list = new List<Recommendation>();\n list.Add(new Recommendation() { Name = \"Wile E Coyote\", Rating = 1.0 });\n list.Add(new Recommendation() { Name = \"Bugs Bunny\", Rating = 3.5 });\n list.Add(new Recommendation() { Name = \"Elmer Fudd\", Rating = 5.0 });\n list.Add(new Recommendation() { Name = \"Foghorn Leghorn\", Rating = 4.0 });\n list.Add(new Recommendation() { Name = \"Daffy Duck\", Rating = 4.0 });\n productRecommendations.Add(\"Adam\", list);\n\n list = new List<Recommendation>();\n list.Add(new Recommendation() { Name = \"Bugs Bunny\", Rating = 3.5 });\n list.Add(new Recommendation() { Name = \"Elmer Fudd\", Rating = 4.0 });\n list.Add(new Recommendation() { Name = \"Foghorn Leghorn\", Rating = 5.0 });\n list.Add(new Recommendation() { Name = \"Daffy Duck\", Rating = 2.5 });\n productRecommendations.Add(\"Katy\", list);\n\n list = new List<Recommendation>();\n list.Add(new Recommendation() { Name = \"Wile E Coyote\", Rating = 4.5 });\n list.Add(new Recommendation() { Name = \"Bugs Bunny\", Rating = 5.0 });\n list.Add(new Recommendation() { Name = \"Foghorn Leghorn\", Rating = 3.0 });\n productRecommendations.Add(\"Jessica\", list);\n\n\n }\n\n static IList<Recommendation> TopMatches(string name)\n {\n // grab of list of products that *excludes* the item we're searching for\n var sortedList = productRecommendations.Where(x => x.Key != name);\n\n sortedList.OrderByDescending(x => x.Key);\n\n List<Recommendation> recommendations = new List<Recommendation>();\n\n // go through the list and calculate the Pearson score for each product\n foreach (var entry in sortedList)\n {\n recommendations.Add(new Recommendation() { Name = entry.Key, Rating = CalculatePearsonCorrelation(name, entry.Key) });\n }\n\n recommendations = recommendations.OrderByDescending(x => x.Rating).ToList();\n\n return recommendations;\n }\n\n static double CalculatePearsonCorrelation(string product1, string product2)\n {\n List<Recommendation> shared_items = new List<Recommendation>();\n\n // collect a list of products have have reviews in common\n foreach (var item in productRecommendations[product1])\n {\n if (productRecommendations[product2].Where(x => x.Name == item.Name).Count() != 0)\n {\n shared_items.Add(item);\n }\n }\n\n if (shared_items.Count == 0)\n {\n // they have nothing in common exit with a zero\n return 0;\n }\n\n // sum up all the preferences\n double product1_review_sum = 0.00f;\n double product2_review_sum = 0.00f;\n\n // sum up the squares\n double product1_rating_square = 0f;\n double product2_rating_square = 0f;\n\n //sum up the products\n double critics_product_sum = 0f;\n\n //temp ratings\n double temp1_rating = 0f;\n double temp2_rating = 0f;\n\n foreach (Recommendation item in shared_items)\n {\n temp1_rating = productRecommendations[product1].Where(x => x.Name == item.Name).FirstOrDefault().Rating;\n temp2_rating = productRecommendations[product2].Where(x => x.Name == item.Name).FirstOrDefault().Rating;\n\n product1_review_sum += temp1_rating;\n product2_review_sum += temp2_rating;\n\n product1_rating_square += Math.Pow(temp1_rating, 2);\n product2_rating_square += Math.Pow(temp2_rating, 2);\n\n\n critics_product_sum += temp1_rating * temp2_rating;\n }\n\n\n //calculate pearson score\n double pearson_relative_sum = critics_product_sum - (product1_review_sum * product2_review_sum / shared_items.Count);\n\n //square of sum\n double product1_review_sum_square = Math.Pow(product1_review_sum, 2);\n double product2_review_sum_square = Math.Pow(product2_review_sum, 2);\n\n double product1_finalVal = product1_rating_square - product1_review_sum_square / shared_items.Count;\n double product2_finalVal = product2_rating_square - product2_review_sum_square / shared_items.Count;\n\n //density\n double density = (double)Math.Sqrt(product1_finalVal * product2_finalVal);\n\n if (density == 0)\n return 0;\n\n var pearson_score = pearson_relative_sum / density;\n return pearson_score;\n }\n}\n\npublic class Recommendation\n{\n public string Name { get; set; }\n public double Rating { get; set; }\n}\n"
},
{
"alpha_fraction": 0.5590062141418457,
"alphanum_fraction": 0.6024844646453857,
"avg_line_length": 17.295454025268555,
"blob_id": "c0f7b9c7c7d0b6a6cca2ec7a69d3824b3032c1a1",
"content_id": "07e48fc182f29eb9bca3e51217d8959de6fee04a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 44,
"path": "/YouTubeTutorial_Eduonix/1.array_matrix_numpy.py",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "# runnable in jupyter labs and spyder (anaconda)\n\nimport sys\nimport numpy as np\nprint('Python: {}'.format(sys.version))\nprint('Numpy: {}'.format(np.__version__))\n\n# scalar == single value\nx = 6\nprint(x)\n\n# vector == array\nx = np.array((1, 2, 3))\nprint(x)\n\n# matrix == 2d array\nx = np.matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nprint(x)\nprint('matrix dimenstions {}'.format(x.shape))\nprint('matrix size {}'.format(x.size))\n\nx = np.zeros((5, 5))\nprint(x)\n\nx = np.ones((3, 3))\nprint('matrix dimenstions {}'.format(x.shape))\n\n# tensor == more than 2d array\nx = np.ones((3, 3, 3))\nprint(x)\nprint('tensor dimenstions {}'.format(x.shape))\n\nA = np.ones((5, 5), dtype=np.int)\nA[0, 1] = 2\nprint(A)\n\nA[:, 1] = 3 # works on all columns\nprint(A)\n\nA = np.ones((5, 5, 5), dtype=np.int)\nprint(A)\n\nA[:, 0, 0] = 6\nprint(A)\n"
},
{
"alpha_fraction": 0.7355769276618958,
"alphanum_fraction": 0.754807710647583,
"avg_line_length": 16.33333396911621,
"blob_id": "5e009c6d5f031c403c9d53dae7b6657a93531732",
"content_id": "f93b00acffc25818a90c6f4e2dda41af180262d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 208,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 12,
"path": "/README.md",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "# skMachineLearning\n\nPrograms in Python, R and few in Java/C# to practice ML Algorithms\n\nTrying out ML\n\nPrerequisites on Windows:\n\n1. Anaconda with sypder\n2. Python supported by Anaconda\n3. R\n4. R Studio\n"
},
{
"alpha_fraction": 0.3782649338245392,
"alphanum_fraction": 0.39832088351249695,
"avg_line_length": 21.808509826660156,
"blob_id": "8becce9388be58084754179abadd68c35035b077",
"content_id": "3aaeed4019480672354310b81819d1f36fbb7789",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2144,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 94,
"path": "/mlDotNetCore/sortAlgo/Program.cs",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "using System;\n\nclass Program\n{\n static void Main(string[] args)\n {\n\n Program o = new Program();\n o.QuickSortTest();\n Console.Read();\n }\n\n void QuickSortTest()\n {\n int[] number = { 89, 76, 45, 92, 67, 12, 99 };\n QuickSort(number, 0, number.Length - 1);\n //Sorted array\n foreach (int num in number)\n {\n Console.WriteLine(\"{0}\", num);\n }\n }\n void QuickSort(int[] arr, int left, int right)\n {\n // For Recusrion \n if (left < right)\n {\n int pivot = Partition(arr, left, right);\n\n //left numbers\n if (pivot > 1)\n QuickSort(arr, left, pivot - 1);\n\n //right numbers\n if (pivot + 1 < right)\n QuickSort(arr, pivot + 1, right);\n }\n }\n\n static int Partition(int[] numbers, int left, int right)\n {\n int pivot = numbers[left];\n\n while (true)\n {\n\n while (numbers[left] < pivot)\n left++;\n while (numbers[right] > pivot)\n right--;\n if (left < right)\n {\n int temp = numbers[right];\n numbers[right] = numbers[left];\n numbers[left] = temp;\n }\n else\n {\n return right;\n }\n }\n }\n\n void BubbleSort()\n {\n int[] number = { 89, 76, 45, 92, 67, 12, 99 };\n bool flag = true;\n int temp;\n int numLength = number.Length;\n\n //sorting an array\n for (int i = 1; (i <= (numLength - 1)) && flag; i++)\n {\n flag = false;\n for (int j = 0; j < (numLength - 1); j++)\n {\n if (number[j + 1] < number[j])\n {\n temp = number[j];\n number[j] = number[j + 1];\n number[j + 1] = temp;\n flag = true;\n }\n }\n }\n\n //Sorted array\n foreach (int num in number)\n {\n Console.WriteLine(\"{0}\", num);\n }\n\n }\n}\n"
},
{
"alpha_fraction": 0.5065404772758484,
"alphanum_fraction": 0.5216006636619568,
"avg_line_length": 37.22697448730469,
"blob_id": "f90ce4ca2b8ed5cc3eeea00e0ee03df02405eb2d",
"content_id": "2673f28de30c40544dc7927cbbbcfb430566c5f5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 11620,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 304,
"path": "/mlDotNetCore/forecastEngineConsole/Program.cs",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\n\nclass Program\n{\n static void Main(string[] args)\n {\n //Console.Write(\"Enter First Number:\");\n //string a = Console.ReadLine();\n //Console.Write(\"Enter Second Number:\");\n //string b = Console.ReadLine();\n //int c = Convert.ToInt32(a) + Convert.ToInt32(b);\n // Console.WriteLine(\"Sum:\"+c.ToString());\n\n Program o = new Program();\n o.Start();\n Console.Read();\n\n }\n\n void Start()\n {\n List<Person> lstPredictPerson = new List<Person>();\n double outVal = 0;\n int outShortVal = 0;\n Console.Write(\"Enter Height:\");\n var height = double.TryParse(Console.ReadLine(), out outVal) == true ? outVal : 0;\n Console.Write(\"Enter Weight:\");\n var weight = double.TryParse(Console.ReadLine(), out outVal) == true ? outVal : 0;\n Console.Write(\"Enter FootSize:\");\n var footSize = double.TryParse(Console.ReadLine(), out outVal) == true ? outVal : 0;\n Console.Write(\"Enter Age:\");\n int age = int.TryParse(Console.ReadLine(), out outShortVal) == true ? outShortVal : 0;\n\n lstPredictPerson.Add(new Person\n {\n Height = height,\n Weight = weight,\n FootSize = footSize,\n Age = age\n });\n\n\n List<Person> lstPerson = new List<Person>();\n\n //training data. \n lstPerson.Add(new Person { Sex = \"male\", Height = 6, Weight = 90, FootSize = 12, Age = 25 });\n lstPerson.Add(new Person { Sex = \"male\", Height = 5.92, Weight = 95, FootSize = 11, Age = 30 });\n lstPerson.Add(new Person { Sex = \"male\", Height = 5.58, Weight = 85, FootSize = 12, Age = 40 });\n lstPerson.Add(new Person { Sex = \"male\", Height = 5.92, Weight = 82, FootSize = 10, Age = 20 });\n lstPerson.Add(new Person { Sex = \"male\", Height = 5, Weight = 50, FootSize = 6, Age = 15 });\n lstPerson.Add(new Person { Sex = \"male\", Height = 5, Weight = 40, FootSize = 5, Age = 10 });\n lstPerson.Add(new Person { Sex = \"male\", Height = 2, Weight = 20, FootSize = 3, Age = 4 });\n\n lstPerson.Add(new Person { Sex = \"female\", Height = 5.5, Weight = 75, FootSize = 8, Age = 40 });\n lstPerson.Add(new Person { Sex = \"female\", Height = 5.42, Weight = 65, FootSize = 7, Age = 30 });\n lstPerson.Add(new Person { Sex = \"female\", Height = 5.75, Weight = 75, FootSize = 9, Age = 20 });\n lstPerson.Add(new Person { Sex = \"female\", Height = 2, Weight = 15, FootSize = 2, Age = 3 });\n\n lstPerson.Add(new Person { Sex = \"transgender\", Height = 4, Weight = 100, FootSize = 5, Age = 35 });\n lstPerson.Add(new Person { Sex = \"transgender\", Height = 4.10, Weight = 75, FootSize = 8, Age = 30 });\n lstPerson.Add(new Person { Sex = \"transgender\", Height = 5.42, Weight = 95, FootSize = 7, Age = 25 });\n lstPerson.Add(new Person { Sex = \"transgender\", Height = 5.50, Weight = 75, FootSize = 9, Age = 20 });\n\n TrainClassifier(lstPerson, lstPredictPerson);\n //output would be transgender.\n //Console.WriteLine(classifier.Classify(new double[] { 4, 150, 12 }));\n }\n\n\n\n void TrainClassifier(List<Person> lstPerson, List<Person> lstPredictPerson)\n {\n try\n {\n List<MeanPerson> lstMeanPerson = new List<MeanPerson>();\n\n //calc data\n var results = (from singlePerson in lstPerson\n group singlePerson by singlePerson.Sex into g\n select new { Sex = g.Key, Count = g.Count() }).ToList();\n\n\n for (int j = 0; j < results.Count; j++)\n {\n var selectedSex = results[j].Sex;\n\n var commonSetData = from commonSet in lstPerson\n where commonSet.Sex == selectedSex\n select commonSet;\n\n\n if (commonSetData.Any())\n {\n var sumHeight = commonSetData.Sum(a => a.Height);\n var sumWeight = commonSetData.Sum(a => a.Weight);\n var sumFootSize = commonSetData.Sum(a => a.FootSize);\n var sumAge = commonSetData.Sum(a => a.Age);\n var noOfRecords = commonSetData.Count();\n\n lstMeanPerson.Add(new MeanPerson\n {\n Sex = selectedSex,\n HeightMean = sumHeight > 0 ? sumHeight / noOfRecords : 0,\n WeightMean = sumWeight > 0 ? sumWeight / noOfRecords : 0,\n FootSizeMean = sumFootSize > 0 ? sumFootSize / noOfRecords : 0,\n AgeMean = sumAge > 0 ? sumAge / noOfRecords : 0\n });\n\n }\n }\n\n\n\n //int a = 1;\n //for (int i = 1; i < 4; i++)\n //{\n // row[a] = Helper.Mean(SelectRows(table, i, string.Format(\"{0} = '{1}'\",\n // table.Columns[0].ColumnName, results[j].Name)));\n // // row[++a] = Helper.Variance(SelectRows(table, i,\n // // string.Format(\"{0} = '{1}'\",\n // // table.Columns[0].ColumnName, results[j].Name)));\n // a++;\n //}\n\n var height = lstPredictPerson[0].Height;\n var weight = lstPredictPerson[0].Weight;\n var footSize = lstPredictPerson[0].FootSize;\n var age = lstPredictPerson[0].Age;\n\n List<Height> llstHeight = new List<Height>();\n List<Weight> llstWeight = new List<Weight>();\n List<FootSize> llstFootSize = new List<FootSize>();\n List<Age> llstAge = new List<Age>();\n\n foreach (var x in lstMeanPerson)\n {\n var diffH = x.HeightMean - height;\n var diffW = x.WeightMean - weight;\n var diffFS = x.FootSizeMean - footSize;\n var diffA = x.AgeMean - age;\n\n diffH = diffH > 0 ? diffH : diffH * -1;\n diffW = diffW > 0 ? diffW : diffW * -1;\n diffFS = diffFS > 0 ? diffFS : diffFS * -1;\n diffA = diffA > 0 ? diffA : diffA * -1;\n\n llstHeight.Add(new Height { Sex = x.Sex, diff = diffH });\n llstWeight.Add(new Weight { Sex = x.Sex, diff = diffW });\n llstFootSize.Add(new FootSize { Sex = x.Sex, diff = diffFS });\n llstAge.Add(new Age { Sex = x.Sex, diff = diffA });\n\n //Console.WriteLine(\"Sex:{0}, H:{1}, W:{2}, F:{3}, A:{4}\", x.Sex, x.HeightMean, x.WeightMean, x.FootSizeMean,x.AgeMean);\n }\n\n var minH = (from singleVal in llstHeight\n where singleVal.diff == llstHeight.Min(a => a.diff)\n select singleVal.Sex).FirstOrDefault().ToString();\n\n var minW = (from singleVal in llstWeight\n where singleVal.diff == llstWeight.Min(a => a.diff)\n select singleVal.Sex).FirstOrDefault().ToString();\n\n var minFS = (from singleVal in llstFootSize\n where singleVal.diff == llstFootSize.Min(a => a.diff)\n select singleVal.Sex).FirstOrDefault().ToString();\n\n var minA = (from singleVal in llstAge\n where singleVal.diff == llstAge.Min(a => a.diff)\n select singleVal.Sex).FirstOrDefault().ToString();\n\n List<PredictSex> llstPredictSex = new List<PredictSex>();\n llstPredictSex.Add(new PredictSex { Sex = minH, diff = llstHeight.Min(a => a.diff) });\n llstPredictSex.Add(new PredictSex { Sex = minW, diff = llstWeight.Min(a => a.diff) });\n llstPredictSex.Add(new PredictSex { Sex = minFS, diff = llstFootSize.Min(a => a.diff) });\n llstPredictSex.Add(new PredictSex { Sex = minA, diff = llstAge.Min(a => a.diff) });\n\n var predict = (from singleSex in llstPredictSex\n group singleSex by singleSex.Sex into g\n select new { Name = g.Key, Count = g.Count() }).ToList();\n\n //Console.WriteLine(minH);\n //Console.WriteLine(minW);\n //Console.WriteLine(minFS);\n\n if (predict.Any())\n {\n var bestOfAll = (from singlePredict in predict\n where singlePredict.Count == predict.Max(a => a.Count)\n select singlePredict.Name).FirstOrDefault().ToString();\n Console.WriteLine(bestOfAll);\n }\n else\n {\n\n var minDiffAll = (from singleVal in llstPredictSex\n where singleVal.diff == llstPredictSex.Min(a => a.diff)\n select singleVal.Sex).FirstOrDefault().ToString();\n Console.WriteLine(minDiffAll);\n }\n }\n catch(Exception ex)\n {\n Console.WriteLine(ex.ToString());\n }\n }\n}\n\n\n//static string Classify(double[] obj)\n//{\n// Dictionary<string,double> score = new Dictionary<string,double>();\n\n// var results = (from myRow in dataSet.Tables[0].AsEnumerable()\n// group myRow by myRow.Field<string>(\n// dataSet.Tables[0].Columns[0].ColumnName) into g\n// select new { Name = g.Key, Count = g.Count() }).ToList();\n\n// for (int i = 0; i < results.Count; i++)\n// {\n// List<double> subScoreList = new List<double>();\n// int a = 1, b = 1;\n// for (int k = 1; k < dataSet.Tables[\"Gaussian\"].Columns.Count; k = k + 2)\n// {\n// double mean = Convert.ToDouble(dataSet.Tables[\"Gaussian\"].Rows[i][a]);\n// double variance = Convert.ToDouble(dataSet.Tables[\"Gaussian\"].Rows[i][++a]);\n// double result = Helper.NormalDist(obj[b - 1], mean, Helper.SquareRoot(variance));\n// subScoreList.Add(result);\n// a++; b++;\n// }\n\n// double finalScore = 0;\n// for (int z = 0; z < subScoreList.Count; z++)\n// {\n// if (finalScore == 0)\n// {\n// finalScore = subScoreList[z];\n// continue;\n// }\n\n// finalScore = finalScore * subScoreList[z];\n// }\n\n// score.Add(results[i].Name, finalScore * 0.5);\n// }\n\n// double maxOne = score.Max(c => c.Value);\n// var name = (from c in score\n// where c.Value == maxOne\n// select c.Key).First();\n\n// return name;\n//}\n\n\n\npublic class Person\n{\n public string Sex { get; set; }\n public double Height { get; set; }\n public double Weight { get; set; }\n public double FootSize { get; set; }\n public int Age { get; set; }\n}\n\npublic class MeanPerson\n{\n public string Sex { get; set; }\n public double HeightMean { get; set; }\n public double WeightMean { get; set; }\n public double FootSizeMean { get; set; }\n public int AgeMean { get; set; }\n}\n\npublic class Height\n{\n public string Sex { get; set; }\n public double diff { get; set; }\n}\n\npublic class Weight\n{\n public string Sex { get; set; }\n public double diff { get; set; }\n}\n\npublic class FootSize\n{\n public string Sex { get; set; }\n public double diff { get; set; }\n}\n\npublic class Age\n{\n public string Sex { get; set; }\n public double diff { get; set; }\n}\n\npublic class PredictSex\n{\n public string Sex { get; set; }\n public double diff { get; set; }\n}"
},
{
"alpha_fraction": 0.7229190468788147,
"alphanum_fraction": 0.7434435486793518,
"avg_line_length": 27.29032325744629,
"blob_id": "b55f5f055ac3fafa6679fe973b1f79292dcc77f7",
"content_id": "db110179f102f2002b9ace72b19ca6d1e4f59f5b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 877,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 31,
"path": "/mlDotNetCore/README.md",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "# mlDotNetCore\n\nMachine Learning Engine in DotNet Core\n\n#Commands for Ubuntu 16.04\nsudo sh -c 'echo \"deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/dotnet-release/ xenial main\" > /etc/apt/sources.list.d/dotnetdev.list'\nsudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 417A0893\nsudo apt-get update\n\n#install core\nsudo apt-get install dotnet-dev-1.0.1\n\n#create console project\ndotnet new <template>\n\n## Templates Short Name Language Tags\n\nConsole Application console [C#], F# Common/Console\nClass library classlib [C#], F# Common/Library\nUnit Test Project mstest [C#], F# Test/MSTest \nxUnit Test Project xunit [C#], F# Test/xUnit \nASP.NET Core Empty web [C#] Web/Empty \nASP.NET Core Web App mvc [C#], F# Web/MVC \nASP.NET Core Web API webapi [C#] Web/WebAPI \nSolution File sln Solution\n\n#get dependancy\ndotnet restore\n\n#run the app\ndotnet run\n"
},
{
"alpha_fraction": 0.7890625,
"alphanum_fraction": 0.8203125,
"avg_line_length": 41.66666793823242,
"blob_id": "b49fc99c4aaaf8b7be09bb1d53b1d2651e0ef5a2",
"content_id": "377f2e5f45cb24dd9b9ec1257a5b43bcae260224",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 3,
"path": "/YouTubeTutorial_Eduonix/README.md",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "- Doing example from below YouTube video\n\n https://www.youtube.com/watch?v=T3TpdPmTLso&list=PLmAuaUS7wSOP-iTNDivR0ANKuTUhEzMe4\n"
},
{
"alpha_fraction": 0.5169628262519836,
"alphanum_fraction": 0.5735056400299072,
"avg_line_length": 12.170212745666504,
"blob_id": "78ae245fb494003d47cebb29a18724728de4740f",
"content_id": "75855b6da08ac41d031caf320c507ffaf47baa37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 47,
"path": "/YouTubeTutorial_Eduonix/2.matrix_operations.py",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "import sys\nimport numpy as np\n\nA = np.matrix([[1, 2], [3, 4]])\nB = np.ones((2, 2), dtype=np.int)\nprint(A)\nprint(B)\n\n# Addition\nC = A + B\nprint(C)\n\n# Subtraction\nC = A - B\nprint(C)\n\n# Multiplication\nC = A * B\nprint(C)\n\n# Transpose - Way 1\nA = np.array(range(9))\nA = A.reshape(3, 3)\nprint(A)\nB = A.T\nprint(B)\n\n# Transpose - Way 2\nA = np.matrix([[1, 2, 4], [2, 3, 6], [4, 7, 9]])\nprint(A)\nB = A.T\nprint(B)\n\n# Transpose - Way 3\nA = np.array(range(10))\nA = A.reshape(2, 5)\nB = A.T\nprint(B)\nprint(A.shape)\nprint(B.shape)\n\n\n# Tensor\nA = np.ones((3, 3, 3, 3, 3, 3, 3, 3, 3, 3))\nprint(A.shape)\nprint(len(A.shape))\nprint(A.size)\n"
},
{
"alpha_fraction": 0.6464937329292297,
"alphanum_fraction": 0.6810758709907532,
"avg_line_length": 22.930233001708984,
"blob_id": "0b22d8c7b89e5ac169cf04edf67beee11c33b60c",
"content_id": "2c58971dc6c4e973ee4d16d4110352e91c20f184",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1041,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 43,
"path": "/UdemyTraining/1.Regression/SBI_16Nov17_15Feb18_Predict.py",
"repo_name": "syadav214/skMachineLearning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 16 14:44:33 2018\n\n@author: santosh.yadav\n\"\"\"\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('PNB_01DecNov17_28Feb18_Stockdata.csv')\nX = dataset.iloc[:, 0:1].values\ny = dataset.iloc[:, 1:2].values\n\nfrom sklearn.svm import SVR\nsvr_lin = SVR(kernel = 'linear', C= 1e3)\n#svr_poly = SVR(kernel ='poly', C=1e3, degree=2)\nsvr_rbf = SVR(kernel='rbf',C=1e3,gamma=0.1)\n\n\nsvr_lin.fit(X, y)\n#svr_poly.fit(X, y) #this function takes time\nsvr_rbf.fit(X, y)\n\nY_predict_lin = svr_lin.predict(X)\n#Y_predict_poly = svr_poly.predict(X)\nY_predict_rbf = svr_rbf.predict(X)\n\n\nplt.scatter(X, y, color = 'black', label='Data')\nplt.plot(X,Y_predict_lin,color='green',label ='Model I')\n#plt.plot(X,Y_predict_poly,color='blue',label ='Polynomial Model')\nplt.plot(X,Y_predict_rbf,color='red',label ='Model II')\nplt.xlabel('Days')\nplt.ylabel('Price')\nplt.title('PNB')\nplt.legend()\nplt.show()\n\nsvr_rbf.predict(62)\n\n\n\n\n\n\n\n\n\n\n\n\n"
}
] | 10 |
renjw234/django
|
https://github.com/renjw234/django
|
ac8d6cdb7fe54bb9625ae5cdd07cd93e13a62135
|
5b25b9e754001697f6bc5043bbcf80c1c3201f66
|
bdee85d47d4fcdca9fa70a75de6f4317a015c1f8
|
refs/heads/master
| 2020-05-18T00:38:57.267327 | 2019-04-30T01:24:56 | 2019-04-30T01:24:56 | 184,068,295 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5807622671127319,
"alphanum_fraction": 0.6005747318267822,
"avg_line_length": 33.6230354309082,
"blob_id": "60ff9c40ada4e69fa50d7595a929f5329b3afd23",
"content_id": "9240536b643edcb47f1bcb450014ea286df1f7ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6858,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 191,
"path": "/dailyfresh/df_goods/views.py",
"repo_name": "renjw234/django",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nfrom django.shortcuts import render\nfrom models import *\nfrom .models import GoodsInfo, TypeInfo\nfrom df_cart.models import CartInfo\nfrom df_user.models import UserInfo\nfrom df_user.models import GoodsBrowser\nfrom django.core.paginator import Paginator,Page\n\ndef index(request):\n #查出typeinfo中所有的商品类型\n typelist=TypeInfo.objects.all()\n #第一个商品类型按照新品id倒序查出四个\n type0=typelist[0].goodsinfo_set.order_by('-id')[0:4]\n # 第一个商品类型按照热度倒序查出四个\n type01=typelist[0].goodsinfo_set.order_by('-gclick')[0:4]\n type1=typelist[1].goodsinfo_set.order_by('-id')[0:4]\n type11=typelist[1].goodsinfo_set.order_by('-gclick')[0:4]\n type2 = typelist[2].goodsinfo_set.order_by('-id')[0:4]\n type21 = typelist[2].goodsinfo_set.order_by('-gclick')[0:4]\n type3 = typelist[0].goodsinfo_set.order_by('-id')[0:4]\n type31 = typelist[0].goodsinfo_set.order_by('-gclick')[0:4]\n type4 = typelist[1].goodsinfo_set.order_by('-id')[0:4]\n type41 = typelist[1].goodsinfo_set.order_by('-gclick')[0:4]\n type5 = typelist[2].goodsinfo_set.order_by('-id')[0:4]\n type51 = typelist[2].goodsinfo_set.order_by('-gclick')[0:4]\n #构造上下文\n # //判断是否登录\n\n # if 'user_id' in request.seesion:\n # user_id = request.seesion['user_id']\n # cart_num = CartInfo.objects.filter(user_id=int(user_id)).count()\n cart_num = 0\n try:\n user_id = request.session['user_id']\n except:\n user_id = None\n\n if user_id:\n guest_cart = 1\n cart_num = CartInfo.objects.filter(user_id=int(user_id)).count()\n context={'title':'首页','guest_cart':1,\n 'type0':type0,'type01':type01,\n 'type1': type1, 'type11': type11,\n 'type2': type2, 'type21': type21,\n 'type3': type3, 'type31': type31,\n 'type4': type4, 'type41': type41,\n 'type5': type5, 'type51': type51,\n 'cart_num':cart_num,\n }\n return render(request, 'df_goods/index.html', context)\n\n#tid为typeinfo的id,pindex为分页数,sort按什么排序\ndef list(request, tid, pindex, sort):\n #根据tid查typeinfo的id对应的数据\n typeinfo = TypeInfo.objects.get(pk=int(tid))\n #按照id倒序从typeinfo列表中排列为最新的\n news = typeinfo.goodsinfo_set.order_by('-id')[0:4]\n # goods_list=[]\n # cart_num,guest_cart = 0,0\n # user_id = request.seesion['user_id']\n # cart_num = CartInfo.objects.filter(user_id=int(user_id)).count()\n # if user_id:\n # guest_cart =1\n # cart_num = CartInfo.objects.filter(user_id=int(user_id)).count()\n #按照id倒序查\n if sort == '1':\n goods_list=GoodsInfo.objects.filter(gtype_id=int(tid)).order_by('-id')\n #按照价格倒序查\n elif sort == '2':\n goods_list=GoodsInfo.objects.filter(gtype_id=int(tid)).order_by('gprice')\n #按照热度倒序查\n elif sort == '3':\n goods_list=GoodsInfo.objects.filter(gtype_id=int(tid)).order_by('-gclick')\n #分页数\n paginator = Paginator(goods_list, 30)\n page=paginator.page(int(pindex))\n cart_num = 0\n try:\n user_id = request.session['user_id']\n except:\n user_id = None\n\n if user_id:\n guest_cart = 1\n cart_num = CartInfo.objects.filter(user_id=int(user_id)).count()\n #构造上下文\n context={'title':typeinfo.ttitle,'guest_cart':1,\n 'page':page,\n 'paginator':paginator,\n 'typeinfo':typeinfo,\n 'sort':sort,\n 'news':news,\n 'cart_num':cart_num,}\n return render(request,'df_goods/list.html',context)\n\ndef detail(request,id):\n good_id = id\n goods=GoodsInfo.objects.get(pk=int(id))\n goods.gclick=goods.gclick+1\n goods.save()\n news=goods.gtype.goodsinfo_set.order_by('-id')[0:3]\n context={\n 'title':goods.gtype.ttitle,'guest_cart':1,\n 'cart_num':cart_count(request),\n 'g':goods,'news':news,'id':id,\n }\n response=render(request,'df_goods/detail.html',context)\n\n # if 'user_id' in request.session:\n # user_id = request.session[\"user_id\"]\n # try:\n # browsed_good = GoodsBrowser.objects.get(user_id=int(user_id), good_id=int(good_id))\n # except Exception:\n # browsed_good = None\n # if browsed_good:\n # from datetime import datetime\n # browsed_good.browser_time = datetime.now()\n # browsed_good.save()\n # else:\n # GoodsBrowser.objects.create(user_id=int(user_id), good_id=int(good_id))\n # browsed_good = GoodsBrowser.objects.filter(user_id=int(user_id))\n # browsed_good_count = browsed_good.count()\n # if browsed_good_count > 5:\n # ordered_goods = browsed_good.count()\n # for _ in ordered_goods[5:]:\n # _.delete()\n # return response\n\n goods_ids=request.COOKIES.get('goods_ids','')\n print(goods_ids)\n goods_id='%d'%goods.id\n if goods_ids != '':\n goods_ids1=goods_ids.split(',')\n if goods_ids1.count(goods_id)>=1:\n goods_ids1.remove(goods_id)\n goods_ids1.insert(0,goods_id)\n if len(goods_ids1)>=6:\n del goods_ids1[5]\n goods_ids=','.join(goods_ids1)\n else:\n goods_ids=goods_id\n response.set_cookie('goods_ids',goods_ids)\n return response\n\ndef cart_count(request):\n if 'user_id' in request.session:\n return CartInfo.objects.filter(user_id=request.session['user_id']).count\n # return render(request,'df_cart/cart.html')\n else:\n return 0\ndef ordinary_search(request):\n\n from django.db.models import Q\n\n search_keywords = request.GET.get('q', '')\n pindex = request.GET.get('pindex', 1)\n search_status = True\n cart_num, guest_cart = 0, 0\n\n try:\n user_id = request.session['user_id']\n except:\n user_id = None\n\n if user_id:\n guest_cart = 1\n cart_num = CartInfo.objects.filter(user_id=int(user_id)).count()\n\n if search_keywords:\n goods_list = GoodsInfo.objects.filter(\n Q(gtitle__icontains=search_keywords) |\n Q(gcontent__icontains=search_keywords) |\n Q(gjianjie__icontains=search_keywords)).order_by(\"gclick\")\n else:\n search_status = False\n goods_list = GoodsInfo.objects.all().order_by(\"gclick\")\n\n paginator = Paginator(goods_list, 26)\n page = paginator.page(int(pindex))\n\n context = {\n 'title': '搜索列表',\n 'search_status': search_status,\n 'guest_cart': guest_cart,\n 'cart_num': cart_num,\n 'page': page,\n 'paginator': paginator,\n }\n return render(request, 'df_goods/ordinary_search.html', context)\n # return render(request, 'search/indexes/search.html', context)"
},
{
"alpha_fraction": 0.7411003112792969,
"alphanum_fraction": 0.7518877983093262,
"avg_line_length": 33.296295166015625,
"blob_id": "85fc2b3fd9f449923d0665c1f77fae20fcb89e53",
"content_id": "e2775211d0596925bbeb1b61bad776905c0ff18b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1043,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 27,
"path": "/dailyfresh/df_order/models.py",
"repo_name": "renjw234/django",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\n\nclass OrderInfo(models.Model):\n oid=models.CharField(max_length=20,primary_key=True)\n #on_delete=models.CASCADE)为级联删除\n user=models.ForeignKey('df_user.UserInfo',on_delete=models.CASCADE)\n odate=models.DateTimeField(auto_now=True)\n oIsPay=models.BooleanField(default=False)\n # DecimalField\n # max_digits\n # 数中允许的最大数目的数字。请注意此电话号码必须是大于decimal_places的,如果存在的话。\n # decimal_places\n # 存储的小数位数的号码。\n ototal=models.DecimalField(max_digits=8,decimal_places=2)\n oaddress=models.CharField(max_length=150)\n\nclass OrderDetailInfo(models.Model):\n goods=models.ForeignKey('df_goods.GoodsInfo',on_delete=models.CASCADE)\n order=models.ForeignKey(OrderInfo,on_delete=models.CASCADE)\n price=models.DecimalField(max_digits=6,decimal_places=2)\n count=models.IntegerField()\n #迁移前记得添加app\n\n"
},
{
"alpha_fraction": 0.6212766170501709,
"alphanum_fraction": 0.6212766170501709,
"avg_line_length": 22.600000381469727,
"blob_id": "3183daa2c8e78b0d10db72a623b881a3ba510031",
"content_id": "b855eff67a83238669cf844b421ad8730422eecb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 10,
"path": "/dailyfresh/df_order/urls.py",
"repo_name": "renjw234/django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nimport views\n\napp_name = 'df_order'\n\nurlpatterns=[\n url(r'^order/$',views.order, name=\"order\"),\n url(r'^push/$', views.order_handle, name=\"push\"),\n url(r'^pay&(\\d+)/$', views.pay, name=\"pay\")\n]"
},
{
"alpha_fraction": 0.5436893105506897,
"alphanum_fraction": 0.5436893105506897,
"avg_line_length": 33.41666793823242,
"blob_id": "4ddd02c6355cbca15bd98946edcc3b49a1d0da54",
"content_id": "15c3c6b430b5b3c0126cc4d87b0fd725a11a225a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 412,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 12,
"path": "/dailyfresh/df_cart/urls.py",
"repo_name": "renjw234/django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nimport views\n#\nurlpatterns=[\n url(r'^cart/$', views.cart, name=\"cart\"),\n url(r'add(\\d+)_(\\d+)/$',views.add, name=\"add\"),\n # url(r'^demo/$',views.demo),\n url(r'^cart/edit(\\d+)_(\\d+)/$', views.edit, name=\"edit\"),\n url(r'^cart/delete(\\d+)/',views.delete, name=\"delete\"),\n # url(r'^edit(\\d+)_(\\d+)/$',views.edit),\n # url(r'^delete(\\d+)_(\\d+)/$',views.delete),\n ]"
},
{
"alpha_fraction": 0.6216216087341309,
"alphanum_fraction": 0.7882882952690125,
"avg_line_length": 19.18181800842285,
"blob_id": "787f326905654078d87e28875e192ec210497a66",
"content_id": "86598e15d03b0ad51930da54364ba42c2470acef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 222,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 11,
"path": "/dailyfresh/uwsgi.ini",
"repo_name": "renjw234/django",
"src_encoding": "UTF-8",
"text": "[uwsgi]\nsocket=192.168.85.128:8000\n#socket=127.0.0.1:8000\n#http=127.0.0.1:8000\nchdir=/home/ady/Desktop/pytest/dailyfresh\nwsgi-file=dailyfresh/wsgi.py\nprocesses=4\nthreads=2\nmaster=True\npidfile=uwsgi.pid\ndaemonize=uwsgi.log\n"
},
{
"alpha_fraction": 0.6231401562690735,
"alphanum_fraction": 0.6302465200424194,
"avg_line_length": 31.39568328857422,
"blob_id": "487bee47dbdf3ad371a0c9ddf96addb8efb29a91",
"content_id": "dee0cd7194cb77f35315a9218a97a1acfe95a1ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4587,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 139,
"path": "/dailyfresh/df_user/views.py",
"repo_name": "renjw234/django",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\nfrom django.shortcuts import render, redirect\nfrom models import *\nfrom hashlib import sha1\nfrom django.http import JsonResponse,HttpResponseRedirect,HttpResponse\nfrom hashlib import sha1\nfrom . import user_decorator\nfrom df_goods.models import *\nfrom df_order.models import OrderInfo,OrderDetailInfo\nfrom django.core.paginator import Paginator\n# from ..df_goods.models import *\n\n\n# Create your views here.\ndef register(request):\n return render(request,'df_user/register.html')\n\ndef register_handle(request):\n response=HttpResponse()\n post=request.POST\n uname=post.get('user_name')\n upwd=post.get('pwd')\n upwd2=post.get('cpwd')\n uemail=post.get('email')\n # upwd=post.get('pwd')\n if upwd != upwd2:\n return redirect('/user/register/')\n #密码加密\n s1=sha1()\n s1.update(upwd)\n upwd3=s1.hexdigest()\n user=UserInfo()\n user.uname=uname\n user.upwd=upwd3\n user.uemail=uemail\n user.save()\n return redirect('/user/login/')\n\ndef register_exist(request):\n uname=request.GET.get('uname','aa')\n count=UserInfo.objects.filter(uname=uname).count()\n return JsonResponse({'count':count})\n\n\ndef login(request):\n uname=request.COOKIES.get('uname', '')\n # {{alert(uname)}}\n # print(uname)\n context={'title': '用户登录', 'error_name': 0, 'error_pwd': 0, \"uname\": uname}\n return render(request, 'df_user/login.html')\n\n\ndef login_handle(request):\n post=request.POST\n uname=post.get('username')\n upwd=post.get('pwd')\n jizhu=post.get('jizhu',0)\n users=UserInfo.objects.filter(uname=uname)\n # print(uname)\n # context={'list':users}\n # return render(request,'df_user/demo.html',context)\n if len(users)==1:\n s1=sha1()\n s1.update(upwd)\n if s1.hexdigest()==users[0].upwd:\n red = HttpResponseRedirect('/')#登录后到首页\n # red = HttpResponseRedirect('/user/info/') 登录后到用户中心\n if jizhu !=0:\n red.set_cookie('uname',uname)\n else:\n red.set_cookie('uanme','',max_age=-1)\n request.session['user_id']=users[0].id\n request.session['user_name']=uname\n return red\n else:\n context={'title':'用户登录','error_name':0,'error_pwd':1,'uname':uname,'upwd':upwd}\n return render(request,'df_user/login.html',context)\n else:\n context = {'title':'用户登录','error_name':1,'error_pwd':0,'uname':uname,'upwd':upwd}\n return render(request,'df_user/login.html',context)\ndef logout(request):\n request.session.flush()\n return redirect('/')\n\n@user_decorator.login\ndef info(request):\n user_email=UserInfo.objects.get(id=request.session['user_id']).uemail\n user_address=UserInfo.objects.get(id=request.session['user_id']).uaddress\n\n goods_ids=request.COOKIES.get('goods_ids','')\n goods_ids1=goods_ids.split(',')\n # print goods_ids\n goods_list=[]\n if len(goods_ids):\n for goods_id in goods_ids1:\n goods_list.append(GoodsInfo.objects.get(id=int(goods_id)))\n context={'title':'用户中心',\n 'user_email':user_email,\n 'user_name':request.session['user_name'],\n 'page_name':1,\n 'user_address':user_address,\n 'goods_list':goods_list,\n }\n\n return render(request, 'df_user/user_center_info.html',context)\n\n@user_decorator.login\ndef order(request,index):\n user_id = request.session['user_id']\n orders_list = OrderInfo.objects.filter(user_id=int(user_id)).order_by('-odate')\n paginator = Paginator(orders_list,2)\n page = paginator.page(int(index))\n # price = OrderDetailInfo.objects.filter()\n # dcount = int(OrderDetailInfo.count)\n # detailtotal = float(price)*float(dcount)\n # print(len(page))\n context={'title':'用户中心',\n 'paginator':paginator,\n 'page':page,\n 'page_name':1,\n # 'detailtotal':detailtotal\n\n }\n return render(request,'df_user/user_center_order.html', context)\n\n@user_decorator.login\ndef site(request):\n user=UserInfo.objects.get(id=request.session['user_id'])\n if request.method == 'POST':\n post = request.POST\n user.ushou=post.get('ushou')\n user.uaddress=post.get('uaddress')\n user.uyoubian=post.get('uyoubian')\n user.uphone=post.get('uphone')\n user.save()\n\n context={'title':'用户中心','user':user,'page_name':1}\n return render(request,'df_user/user_center_site.html',context)\n # return HttpResponse(user.ushou)\n"
},
{
"alpha_fraction": 0.6085083484649658,
"alphanum_fraction": 0.6130856275558472,
"avg_line_length": 28.46825408935547,
"blob_id": "fb505977af8e5492fdd1a953984409822e2960e2",
"content_id": "3aba6fa18289b05adb069ac193c7872a4bba9708",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3842,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 126,
"path": "/dailyfresh/df_cart/views.py",
"repo_name": "renjw234/django",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nfrom django.shortcuts import render,redirect,reverse\nfrom django.http import JsonResponse,HttpResponseRedirect\nfrom df_cart.models import *\nfrom df_user import user_decorator\nfrom django.http import HttpResponse\n# from models import *\nfrom .models import *\nfrom df_goods.models import *\n\n# @user_decorator.login\n# def demo(request):\n# carts=GoodsInfo.objects.get(pk=2)\n# context={'gtitle':carts.gtitle}\n# return render(request,'df_cart/demo.html',context)\n# # return HttpResponse({'gtitle':carts.gtitle})\n\n@user_decorator.login\ndef cart(request):\n uid=request.session['user_id']\n carts=CartInfo.objects.filter(user_id=uid)\n lenn=len(carts)\n context={\n 'title':'购物车',\n 'page_name':1,\n 'carts':carts,\n 'lenn':lenn,\n }\n if request.is_ajax():\n count = CartInfo.objects.filter(user_id=request.session['user_id']).count()\n return JsonResponse({'count':count})\n else:\n return render(request,'df_cart/cart.html',context)\n # cartlist=CartInfor.objects.all()\n # user=cartlist.user\n # goods=cartlist.goods\n # count=cartlist.count\n # context={'user':user,'goods':goods,'count':count}\n # print (carts)\n # print(carts.gtitle)\n # print(carts.user.uname)\n\n # print(carts.goods.gtitle)\n # print(carts.gprice)\n # return render(request,'df_cart/cart.html',context)\n # print cartlist\n # return HttpResponse(cartlist)\n\n@user_decorator.login\ndef add(request,gid,count):\n uid=request.session['user_id']\n gid, count = int(gid), int(count)\n carts=CartInfo.objects.filter(user_id=uid,goods_id=gid)\n # carts=CartInfo.objects.get(user_id=7)\n # for a in carts.user.uname:\n # print a\n\n if len(carts) >= 1:\n cart=carts[0]\n cart.count=cart.count+count\n else:\n cart=CartInfo()\n cart.user_id=uid\n cart.goods_id=gid\n cart.count=count\n cart.save()\n if request.is_ajax():\n count=CartInfo.objects.filter(user_id=request.session['user_id']).count()\n return JsonResponse({'count':count})\n else:\n return redirect('/cart/')\n # return redirect(reverse(\"df_cart:cart\"))\n # return render(request,'/cart/')\n# def add(request,gid,count):\n#\n# #用户uid购买了gid商品,数量为count\n# uid=request.session['user_id']\n# gid = int(gid)\n# count = int(count)\n# #查询购物车是否已经有此商品,有则增加\n# carts = CartInfo.objects.filter(user_id=uid, goods_id=gid)\n# if len(carts)>=1:\n# cart=carts[0]\n# # print '*'*10\n# # print cart -> 购物车商品数量\n# cart.count=cart.count+count\n# else:#不存在则直接加\n# cart=CartInfo()\n# cart.user_id=uid\n# cart.goods_id=gid\n# cart.count=count\n# cart.save()\n# count_s = CartInfo.objects.filter(user_id=uid).count()\n# request.session['count'] = count_s\n# #如果是ajax请求则返回json,否则转向购物车\n# if request.is_ajax():\n# # count=CartInfo.objects.filter(user_id=request.session['user_id']).count()\n#\n# print '*'*10\n# print 'ajax'\n# #--------------未使用\n# return JsonResponse({'count':count_s})\n# else:\n# return redirect('/cart/')\n@user_decorator.login\ndef edit(request,cart_id,count):\n data = {}\n try:\n cart=CartInfo.objects.get(pk=int(cart_id))\n count1=cart.count=int(count)\n cart.save()\n data={'ok':0}\n except Exception as e:\n data={'ok':count1}\n return JsonResponse(data)\n\n@user_decorator.login\ndef delete(request,cart_id):\n try:\n cart=CartInfo.objects.get(pk=int(cart_id))\n cart.delete()\n # cart.save()\n data={'ok':1}\n except Exception as e:\n data={'ok':0}\n return JsonResponse(data)\n\n"
}
] | 7 |
DraftToday/docassemble
|
https://github.com/DraftToday/docassemble
|
f661b539611a827417faeb7023cce8f070349c28
|
1a34b6e2b64790a525418edef4be9899ec816b1f
|
0ebf9a06d2d7818b3a4c55acb27f3cd88514cd31
|
refs/heads/master
| 2020-05-01T19:13:26.406157 | 2019-03-23T02:01:54 | 2019-03-23T02:01:54 | 177,543,978 | 1 | 0 |
MIT
| 2019-03-25T08:22:23 | 2019-03-25T08:22:36 | 2019-03-25T18:07:17 |
Python
|
[
{
"alpha_fraction": 0.5919250249862671,
"alphanum_fraction": 0.6000360250473022,
"avg_line_length": 38.90647506713867,
"blob_id": "d2fe7a41ef37ebac91ce1e7dc3247c0a04e63e7d",
"content_id": "d8cb809461108c8dbe83c8ba632bbd81621abb5c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5548,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 139,
"path": "/docassemble_base/docassemble/base/rtfng/Elements.py",
"repo_name": "DraftToday/docassemble",
"src_encoding": "UTF-8",
"text": "from types import IntType, FloatType, LongType, StringTypes\nfrom copy import deepcopy\nfrom binascii import hexlify\n\nfrom Renderer import Renderer\nfrom Constants import *\nfrom Styles import *\nfrom PropertySets import StandardColours, StandardFonts, StandardPaper\n\nfrom docassemble.base.rtfng.document.base import TAB, LINE, RawCode\nfrom docassemble.base.rtfng.document.section import Section\nfrom docassemble.base.rtfng.document.character import Text, Inline\n\n#class UnhandledParamError( Exception ) : # Currently unused.\n# def __init__( self, param ) :\n# Exception.__init__( self, \"Don't know what to do with param %s\" % param )\n\n#\n# Finally a StyleSheet in which all of this stuff is put together\n#\nclass StyleSheet :\n def __init__( self, colours=None, fonts=None ) :\n\n self.Colours = colours or deepcopy( StandardColours )\n self.Fonts = fonts or deepcopy( StandardFonts )\n\n self.TextStyles = AttributedList()\n self.ParagraphStyles = AttributedList()\n\n def Copy(self):\n return deepcopy(self)\n\ndef MakeDefaultStyleSheet( ) :\n result = StyleSheet()\n\n NormalText = TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) )\n\n ps = ParagraphStyle( 'Normal',\n NormalText.Copy(),\n ParagraphPropertySet( space_before = 60,\n space_after = 60 ) )\n result.ParagraphStyles.append( ps )\n\n ps = ParagraphStyle( 'Normal Short',\n NormalText.Copy() )\n result.ParagraphStyles.append( ps )\n\n NormalText.textProps.size = 32\n ps = ParagraphStyle( 'Heading 1',\n NormalText.Copy(),\n ParagraphPropertySet( space_before = 240,\n space_after = 60 ) )\n result.ParagraphStyles.append( ps )\n\n NormalText.textProps.size = 24\n NormalText.textProps.bold = True\n ps = ParagraphStyle( 'Heading 2',\n NormalText.Copy(),\n ParagraphPropertySet( space_before = 240,\n space_after = 60 ) )\n result.ParagraphStyles.append( ps )\n\n # Add some more in that are based on the normal template but that\n # have some indenting set that makes them suitable for doing numbered\n normal_numbered = result.ParagraphStyles.Normal.Copy()\n normal_numbered.name = 'Normal Numbered'\n normal_numbered.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 )\n normal_numbered.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH )\n\n result.ParagraphStyles.append( normal_numbered )\n\n normal_numbered2 = result.ParagraphStyles.Normal.Copy()\n normal_numbered2.name = 'Normal Numbered 2'\n normal_numbered2.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 )\n normal_numbered2.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH * 2 )\n\n result.ParagraphStyles.append( normal_numbered2 )\n\n ## LIST STYLES\n for idx, indent in [ (1, TabPropertySet.DEFAULT_WIDTH ),\n (2, TabPropertySet.DEFAULT_WIDTH * 2),\n (3, TabPropertySet.DEFAULT_WIDTH * 3) ] :\n indent = TabPropertySet.DEFAULT_WIDTH\n ps = ParagraphStyle( 'List %s' % idx,\n TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) ),\n ParagraphPropertySet( space_before = 60,\n space_after = 60,\n first_line_indent = -indent,\n left_indent = indent) )\n result.ParagraphStyles.append( ps )\n\n return result\n\nPAGE_NUMBER = RawCode( r'{\\field{\\fldinst page}}' )\nTOTAL_PAGES = RawCode( r'{\\field{\\fldinst numpages}}' )\nSECTION_PAGES = RawCode( r'{\\field{\\fldinst sectionpages}}' )\nARIAL_BULLET = RawCode( r'{\\f2\\'95}' )\n\nclass Document :\n def __init__( self, style_sheet=None, default_language=None, view_kind=None, view_zoom_kind=None, view_scale=None ) :\n self.StyleSheet = style_sheet or MakeDefaultStyleSheet()\n self.Sections = AttributedList( Section )\n\n self.SetTitle( None )\n\n self.DefaultLanguage = default_language or Languages.DEFAULT\n self.ViewKind = view_kind or ViewKind.DEFAULT\n self.ViewZoomKind = view_zoom_kind\n self.ViewScale = view_scale\n\n def NewSection( self, *params, **kwargs ) :\n result = Section( *params, **kwargs )\n self.Sections.append( result )\n return result\n\n def SetTitle( self, value ) :\n self.Title = value\n return self\n\n def Copy( self ) :\n result = Document( style_sheet = self.StyleSheet.Copy(),\n default_language = self.DefaultLanguage,\n view_kind = self.ViewKind,\n view_zoom_kind = self.ViewZoomKind,\n view_scale = self.ViewScale )\n result.SetTitle( self.Title )\n result.Sections = self.Sections.Copy()\n\n return result\n\n # XXX this is a temporary fix until I figure out the best way to refactor\n # the renderer\n def write(self, fhOrFilename):\n if isinstance(fhOrFilename, str):\n fh = open(fhOrFilename, 'w+')\n else:\n fh = fhOrFilename\n r = Renderer()\n r.Write(self, fh)\n\n"
},
{
"alpha_fraction": 0.6405228972434998,
"alphanum_fraction": 0.6405228972434998,
"avg_line_length": 11.75,
"blob_id": "333a70479d021992c8aa4cdb8a7d0faf3edb1983",
"content_id": "848ce1334fc63df1de6c30f5dcd1f0a397f8083e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 153,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 12,
"path": "/docassemble_base/docassemble/base/rtfng/exceptions.py",
"repo_name": "DraftToday/docassemble",
"src_encoding": "UTF-8",
"text": "\"\"\"\npyrtf-ng Errors and Exceptions\n\"\"\"\n\nclass RTFError(Exception):\n pass\n\n\nclass ParseError(RTFError):\n \"\"\"\n Unable to parse the RTF data.\n \"\"\"\n"
},
{
"alpha_fraction": 0.726166307926178,
"alphanum_fraction": 0.726166307926178,
"avg_line_length": 35.51852035522461,
"blob_id": "570ca3d926a934998a10adda978db55058606e15",
"content_id": "0613f0af8a0d05b2abd30b39e1fc961000e6adf3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 986,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 27,
"path": "/docassemble_webapp/docassemble/webapp/app_object.py",
"repo_name": "DraftToday/docassemble",
"src_encoding": "UTF-8",
"text": "#import sys\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom flask import Flask\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_babel import Babel\n\ndef create_app():\n app = Flask(__name__)\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n from docassemble.base.config import daconfig\n import docassemble.webapp.database\n import docassemble.webapp.db_object\n connect_string = docassemble.webapp.database.connection_string()\n alchemy_connect_string = docassemble.webapp.database.alchemy_connection_string()\n app.config['SQLALCHEMY_DATABASE_URI'] = alchemy_connect_string\n app.secret_key = daconfig.get('secretkey', '38ihfiFehfoU34mcq_4clirglw3g4o87')\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db = docassemble.webapp.db_object.init_flask()\n db.init_app(app)\n csrf = CSRFProtect()\n csrf.init_app(app)\n babel = Babel()\n babel.init_app(app)\n app.wsgi_app = ProxyFix(app.wsgi_app)\n return app, csrf, babel\n\napp, csrf, flaskbabel = create_app()\n"
},
{
"alpha_fraction": 0.6366906762123108,
"alphanum_fraction": 0.6726618409156799,
"avg_line_length": 32.095237731933594,
"blob_id": "5ec292f279c6dbf852e14a2556063733e09eef62",
"content_id": "45062b49da6570fd5269dd0142397ce68b6e3029",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1390,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 42,
"path": "/Docker/reset.sh",
"repo_name": "DraftToday/docassemble",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n\nexport DA_ROOT=\"${DA_ROOT:-/usr/share/docassemble}\"\nexport DAPYTHONVERSION=\"${DAPYTHONVERSION:-2}\"\nif [ \"${DAPYTHONVERSION}\" == \"2\" ]; then\n export DA_DEFAULT_LOCAL=\"local\"\nelse\n export DA_DEFAULT_LOCAL=\"local3.5\"\nfi\nexport DA_ACTIVATE=\"${DA_PYTHON:-${DA_ROOT}/${DA_DEFAULT_LOCAL}}/bin/activate\"\nsource ${DA_ACTIVATE}\n\nexport CONTAINERROLE=\":${CONTAINERROLE:-all}:\"\nexport HOME=/var/www\n\npython -m docassemble.webapp.restart\n\n#if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then\n# supervisorctl --serverurl http://localhost:9001 stop apache2 || exit 1\n# sleep 1\n# supervisorctl --serverurl http://localhost:9001 start apache2 || exit 1\n#fi\n\nif [[ $CONTAINERROLE =~ .*:(all|celery):.* ]]; then\n supervisorctl --serverurl http://localhost:9001 stop celery || exit 1\n if [[ $CONTAINERROLE =~ .*:(all|rabbitmq):.* ]]; then\n\tsupervisorctl --serverurl http://localhost:9001 stop rabbitmq || exit 1\n fi\n sleep 1\n if [[ $CONTAINERROLE =~ .*:(all|rabbitmq):.* ]]; then\n\tsupervisorctl --serverurl http://localhost:9001 start rabbitmq || exit 1\n fi\n supervisorctl --serverurl http://localhost:9001 start celery || exit 1\nfi\n\nif [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then\n supervisorctl --serverurl http://localhost:9001 stop websockets || exit 1\n sleep 1\n supervisorctl --serverurl http://localhost:9001 start websockets || exit 1\nfi\n\nexit 0\n"
},
{
"alpha_fraction": 0.5989430546760559,
"alphanum_fraction": 0.6118614077568054,
"avg_line_length": 29.945453643798828,
"blob_id": "4fd993ae5b353a91b295d961252ea0a8a738b659",
"content_id": "168283fd852c3bbf4722e813a533c7ec3defbc46",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1703,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 55,
"path": "/docassemble_base/docassemble/base/rtfng/document/section.py",
"repo_name": "DraftToday/docassemble",
"src_encoding": "UTF-8",
"text": "from docassemble.base.rtfng.PropertySets import StandardPaper, MarginsPropertySet\n \nclass Section( list ) :\n NONE = 1\n COLUMN = 2\n PAGE = 3\n EVEN = 4\n ODD = 5\n BREAK_TYPES = [ NONE, COLUMN, PAGE, EVEN, ODD ]\n\n def __init__( self, paper=None, margins=None, break_type=None, headery=None, footery=None, landscape=None, first_page_number=None ) :\n super( Section, self ).__init__()\n\n self.Paper = paper or StandardPaper.A4\n self.SetMargins( margins )\n\n self.Header = []\n self.Footer = []\n self.FirstHeader = []\n self.FirstFooter = []\n\n self.SetBreakType( break_type or self.NONE )\n self.SetHeaderY( headery )\n self.SetFooterY( footery )\n self.SetLandscape( landscape )\n self.SetFirstPageNumber( first_page_number )\n\n def TwipsToRightMargin( self ) :\n return self.Paper.Width - ( self.Margins.Left + self.Margins.Right )\n\n def SetMargins( self, value ) :\n self.Margins = value or MarginsPropertySet( top=1000, left=1200, bottom=1000, right=1200 )\n self.Width = self.Paper.Width - ( self.Margins.Left + self.Margins.Right )\n\n def SetBreakType( self, value ) :\n assert value in self.BREAK_TYPES\n self.BreakType = value\n return self\n\n def SetHeaderY( self, value ) :\n self.HeaderY = value\n return self\n\n def SetFooterY( self, value ) :\n self.FooterY = value\n return self\n\n def SetLandscape( self, value ) :\n self.Landscape = False\n if value : self.Landscape = True\n return self\n\n def SetFirstPageNumber( self, value ) :\n self.FirstPageNumber = value\n return self\n\n"
},
{
"alpha_fraction": 0.6979866027832031,
"alphanum_fraction": 0.7069351077079773,
"avg_line_length": 25.294116973876953,
"blob_id": "fcc2b45e04271fb0f88e353ef005497de0b4eb48",
"content_id": "377d0cb5311ed1fac350717f9212890ccdc775a8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 447,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 17,
"path": "/Docker/process-email.sh",
"repo_name": "DraftToday/docassemble",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nexport DA_ROOT=\"${DA_ROOT:-/usr/share/docassemble}\"\nexport DAPYTHONVERSION=\"${DAPYTHONVERSION:-2}\"\nif [ \"${DAPYTHONVERSION}\" == \"2\" ]; then\n export DA_DEFAULT_LOCAL=\"local\"\nelse\n export DA_DEFAULT_LOCAL=\"local3.5\"\nfi\nexport DA_ACTIVATE=\"${DA_PYTHON:-${DA_ROOT}/${DA_DEFAULT_LOCAL}}/bin/activate\"\nsource ${DA_ACTIVATE}\n\nemailfile=$(mktemp)\n\ncat > $emailfile\npython -m docassemble.webapp.process_email $emailfile\nrm -f $emailfile\n"
},
{
"alpha_fraction": 0.69947350025177,
"alphanum_fraction": 0.7178007364273071,
"avg_line_length": 27.21714210510254,
"blob_id": "bc60e64f5f3dcc9745bf7fc65d9a3dbf3a7d2c11",
"content_id": "e8c8f98613fe73ede4dfa186f6e262a5484569e9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 9876,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 350,
"path": "/Dockerfile",
"repo_name": "DraftToday/docassemble",
"src_encoding": "UTF-8",
"text": "FROM debian:stretch\nRUN DEBIAN_FRONTEND=noninteractive \\\nbash -c \\\n'echo -e \"deb http://deb.debian.org/debian stretch main contrib\\n\\\ndeb http://deb.debian.org/debian stretch-updates main\\n\\\ndeb http://security.debian.org/debian-security stretch/updates main\\n\\\ndeb http://ftp.debian.org/debian stretch-backports main\" > /etc/apt/sources.list\\\n&& apt-get -y update'\nRUN DEBIAN_FRONTEND=noninteractive \\\nbash -c \\\n\"until apt-get -q -y install \\\napt-utils \\\ntzdata \\\npython \\\npython-dev \\\nwget \\\nunzip \\\ngit \\\nlocales \\\napache2 \\\npostgresql \\\nlibapache2-mod-xsendfile \\\nlibffi-dev \\\nlibffi6 \\\ngcc \\\nsupervisor \\\nmake \\\nperl \\\nlibinline-perl \\\nlibparallel-forkmanager-perl \\\nautoconf \\\nautomake \\\nlibjpeg-dev \\\nzlib1g-dev \\\nlibpq-dev \\\nlogrotate \\\nnodejs \\\ncron \\\nlibxml2 \\\nlibxslt1.1 \\\nlibxml2-dev \\\nlibxslt1-dev \\\nlibcurl4-openssl-dev \\\nlibssl-dev \\\nredis-server \\\nrabbitmq-server \\\nlibtool \\\nlibtool-bin \\\nsyslog-ng \\\nrsync \\\ns3cmd \\\ncurl \\\nmktemp \\\ndnsutils \\\nbuild-essential \\\nlibsvm3 \\\nlibsvm-dev \\\nliblinear3 \\\nliblinear-dev \\\nlibzbar-dev \\\nlibzbar0 \\\nlibgs-dev \\\ndefault-libmysqlclient-dev \\\nlibgmp-dev \\\npython-passlib \\\nlibsasl2-dev \\\nlibldap2-dev \\\npython3 \\\nexim4-daemon-heavy \\\npython3-venv \\\npython3-dev \\\nimagemagick \\\npdftk \\\npacpl \\\npandoc \\\ntexlive \\\ntexlive-luatex \\\ntexlive-latex-recommended \\\ntexlive-latex-extra \\\ntexlive-font-utils \\\ntexlive-lang-cyrillic \\\ntexlive-lang-french \\\ntexlive-lang-italian \\\ntexlive-lang-portuguese \\\ntexlive-lang-german \\\ntexlive-lang-european \\\ntexlive-lang-spanish \\\ntexlive-extra-utils \\\npoppler-utils \\\nlibaudio-flac-header-perl \\\nlibaudio-musepack-perl \\\nlibmp3-tag-perl \\\nlibogg-vorbis-header-pureperl-perl \\\nlibvorbis-dev \\\nlibcddb-perl \\\nlibcddb-get-perl \\\nlibmp3-tag-perl \\\nlibaudio-scan-perl \\\nlibaudio-flac-header-perl \\\nlibav-tools \\\ntesseract-ocr \\\ntesseract-ocr-dev \\\ntesseract-ocr-afr \\\ntesseract-ocr-ara \\\ntesseract-ocr-aze \\\ntesseract-ocr-bel \\\ntesseract-ocr-ben \\\ntesseract-ocr-bul \\\ntesseract-ocr-cat \\\ntesseract-ocr-ces \\\ntesseract-ocr-chi-sim \\\ntesseract-ocr-chi-tra \\\ntesseract-ocr-chr \\\ntesseract-ocr-dan \\\ntesseract-ocr-deu \\\ntesseract-ocr-deu-frak \\\ntesseract-ocr-ell \\\ntesseract-ocr-eng \\\ntesseract-ocr-enm \\\ntesseract-ocr-epo \\\ntesseract-ocr-equ \\\ntesseract-ocr-est \\\ntesseract-ocr-eus \\\ntesseract-ocr-fin \\\ntesseract-ocr-fra \\\ntesseract-ocr-frk \\\ntesseract-ocr-frm \\\ntesseract-ocr-glg \\\ntesseract-ocr-grc \\\ntesseract-ocr-heb \\\ntesseract-ocr-hin \\\ntesseract-ocr-hrv \\\ntesseract-ocr-hun \\\ntesseract-ocr-ind \\\ntesseract-ocr-isl \\\ntesseract-ocr-ita \\\ntesseract-ocr-ita-old \\\ntesseract-ocr-jpn \\\ntesseract-ocr-kan \\\ntesseract-ocr-kor \\\ntesseract-ocr-lav \\\ntesseract-ocr-lit \\\ntesseract-ocr-mal \\\ntesseract-ocr-mkd \\\ntesseract-ocr-mlt \\\ntesseract-ocr-msa \\\ntesseract-ocr-nld \\\ntesseract-ocr-nor \\\ntesseract-ocr-osd \\\ntesseract-ocr-pol \\\ntesseract-ocr-por \\\ntesseract-ocr-ron \\\ntesseract-ocr-rus \\\ntesseract-ocr-slk \\\ntesseract-ocr-slk-frak \\\ntesseract-ocr-slv \\\ntesseract-ocr-spa \\\ntesseract-ocr-spa-old \\\ntesseract-ocr-sqi \\\ntesseract-ocr-srp \\\ntesseract-ocr-swa \\\ntesseract-ocr-swe \\\ntesseract-ocr-tam \\\ntesseract-ocr-tel \\\ntesseract-ocr-tgl \\\ntesseract-ocr-tha \\\ntesseract-ocr-tur \\\ntesseract-ocr-ukr \\\ntesseract-ocr-vie \\\nttf-mscorefonts-installer \\\nfonts-ebgaramond-extra \\\nghostscript \\\nttf-liberation \\\nfonts-liberation \\\ncm-super \\\nqpdf; \\\ndo sleep 5; \\\ndone; \\\napt-get -q -y install -t stretch-backports libreoffice &> /dev/null\"\nRUN DEBIAN_FRONTEND=noninteractive TERM=xterm \\\ncd /tmp \\\n&& mkdir -p /etc/ssl/docassemble \\\n /usr/share/docassemble/local \\\n /usr/share/docassemble/local3.5 \\\n /usr/share/docassemble/certs \\\n /usr/share/docassemble/backup \\\n /usr/share/docassemble/config \\\n /usr/share/docassemble/webapp \\\n /usr/share/docassemble/files \\\n /var/www/.pip \\\n /var/www/.cache \\\n /usr/share/docassemble/log \\\n /tmp/docassemble \\\n /var/www/html/log \\\n&& echo '{ \"args\": [\"--no-sandbox\"] }' > /var/www/puppeteer-config.json \\\n&& chown -R www-data.www-data /var/www \\\n&& chsh -s /bin/bash www-data \\\n&& update-alternatives --install /usr/bin/node node /usr/bin/nodejs 10 \\\n&& wget -qO- https://deb.nodesource.com/setup_6.x | bash - \\\n&& apt-get -y install nodejs \\\n&& npm install -g azure-storage-cmd \\\n&& npm install -g mermaid.cli\nRUN DEBIAN_FRONTEND=noninteractive TERM=xterm \\\ncd /usr/share/docassemble \\\n&& git clone https://github.com/letsencrypt/letsencrypt \\\n&& cd letsencrypt \\\n&& ./letsencrypt-auto --help \\\n&& echo \"host all all 0.0.0.0/0 md5\" >> /etc/postgresql/9.6/main/pg_hba.conf \\\n&& echo \"listen_addresses = '*'\" >> /etc/postgresql/9.6/main/postgresql.conf\nCOPY . /tmp/docassemble/\nRUN DEBIAN_FRONTEND=noninteractive TERM=xterm \\\nln -s /var/mail/mail /var/mail/root \\\n&& cp /tmp/docassemble/docassemble_webapp/docassemble.wsgi /usr/share/docassemble/webapp/ \\\n&& cp /tmp/docassemble/Docker/*.sh /usr/share/docassemble/webapp/ \\\n&& cp /tmp/docassemble/Docker/VERSION /usr/share/docassemble/webapp/ \\\n&& cp /tmp/docassemble/Docker/pip.conf /usr/share/docassemble/local/ \\\n&& cp /tmp/docassemble/Docker/pip.conf /usr/share/docassemble/local3.5/ \\\n&& cp /tmp/docassemble/Docker/config/* /usr/share/docassemble/config/ \\\n&& cp /tmp/docassemble/Docker/cgi-bin/index.sh /usr/lib/cgi-bin/ \\\n&& cp /tmp/docassemble/Docker/syslog-ng.conf /usr/share/docassemble/webapp/syslog-ng.conf \\\n&& cp /tmp/docassemble/Docker/syslog-ng-docker.conf /usr/share/docassemble/webapp/syslog-ng-docker.conf \\\n&& cp /tmp/docassemble/Docker/docassemble-syslog-ng.conf /usr/share/docassemble/webapp/docassemble-syslog-ng.conf \\\n&& cp /tmp/docassemble/Docker/apache.logrotate /etc/logrotate.d/apache2 \\\n&& cp /tmp/docassemble/Docker/docassemble.logrotate /etc/logrotate.d/docassemble \\\n&& cp /tmp/docassemble/Docker/cron/docassemble-cron-monthly.sh /etc/cron.monthly/docassemble \\\n&& cp /tmp/docassemble/Docker/cron/docassemble-cron-weekly.sh /etc/cron.weekly/docassemble \\\n&& cp /tmp/docassemble/Docker/cron/docassemble-cron-daily.sh /etc/cron.daily/docassemble \\\n&& cp /tmp/docassemble/Docker/cron/docassemble-cron-hourly.sh /etc/cron.hourly/docassemble \\\n&& cp /tmp/docassemble/Docker/docassemble.conf /etc/apache2/conf-available/ \\\n&& cp /tmp/docassemble/Docker/docassemble-behindlb.conf /etc/apache2/conf-available/ \\\n&& cp /tmp/docassemble/Docker/docassemble-supervisor.conf /etc/supervisor/conf.d/docassemble.conf \\\n&& cp /tmp/docassemble/Docker/ssl/* /usr/share/docassemble/certs/ \\\n&& cp /tmp/docassemble/Docker/rabbitmq.config /etc/rabbitmq/ \\\n&& cp /tmp/docassemble/Docker/config/exim4-router /etc/exim4/conf.d/router/101_docassemble \\\n&& cp /tmp/docassemble/Docker/config/exim4-filter /etc/exim4/docassemble-filter \\\n&& cp /tmp/docassemble/Docker/config/exim4-main /etc/exim4/conf.d/main/01_docassemble \\\n&& cp /tmp/docassemble/Docker/config/exim4-acl /etc/exim4/conf.d/acl/29_docassemble \\\n&& cp /tmp/docassemble/Docker/config/exim4-update /etc/exim4/update-exim4.conf.conf \\\n&& update-exim4.conf \\\n&& bash -c \\\n\"chown www-data.www-data /usr/share/docassemble/config \\\n&& chown www-data.www-data \\\n /usr/share/docassemble/config/config.yml.dist \\\n /usr/share/docassemble/webapp/docassemble.wsgi \\\n&& chown -R www-data.www-data \\\n /tmp/docassemble \\\n /usr/share/docassemble/local \\\n /usr/share/docassemble/local3.5 \\\n /usr/share/docassemble/log \\\n /usr/share/docassemble/files \\\n&& chmod ogu+r /usr/share/docassemble/config/config.yml.dist \\\n&& chmod 755 /etc/ssl/docassemble \\\n&& cd /tmp \\\n&& wget https://bootstrap.pypa.io/get-pip.py \\\n&& python get-pip.py \\\n&& rm -f get-pip.py \\\n&& pip install --upgrade virtualenv\" \\\n&& echo \"en_US.UTF-8 UTF-8\" >> /etc/locale.gen \\\n&& locale-gen \\\n&& update-locale\n\nUSER www-data\nRUN LC_CTYPE=C.UTF-8 LANG=C.UTF-8 \\\nbash -c \\\n\"cd /tmp \\\n&& virtualenv /usr/share/docassemble/local \\\n&& source /usr/share/docassemble/local/bin/activate \\\n&& pip install --upgrade pip \\\n&& pip install \\\n 3to2 \\\n bcrypt \\\n flask \\\n flask-login \\\n flask-mail \\\n flask-sqlalchemy \\\n flask-wtf \\\n distutils2 \\\n passlib \\\n pycrypto \\\n six \\\n&& pip install --upgrade \\\n 'git+https://github.com/euske/pdfminer.git' \\\n simplekv==0.10.0 \\\n /tmp/docassemble/docassemble \\\n /tmp/docassemble/docassemble_base \\\n /tmp/docassemble/docassemble_demo \\\n /tmp/docassemble/docassemble_webapp\"\n\nUSER www-data\nRUN LC_CTYPE=C.UTF-8 LANG=C.UTF-8 \\\nbash -c \\\n\"cd /tmp \\\n&& python3 -m venv --copies /usr/share/docassemble/local3.5 \\\n&& source /usr/share/docassemble/local3.5/bin/activate \\\n&& pip3 install --upgrade pip \\\n&& pip3 install --upgrade \\\n 3to2 \\\n bcrypt \\\n flask \\\n flask-login \\\n flask-mail \\\n flask-sqlalchemy \\\n flask-wtf \\\n passlib \\\n pycryptodome \\\n pycryptodomex \\\n six \\\n setuptools \\\n&& pip3 install --upgrade \\\n simplekv==0.10.0 \\\n /tmp/docassemble/docassemble \\\n /tmp/docassemble/docassemble_base \\\n /tmp/docassemble/docassemble_demo \\\n /tmp/docassemble/docassemble_webapp\"\n\nUSER root\nRUN rm -rf /tmp/docassemble \\\n&& rm -f /etc/cron.daily/apt-compat \\\n&& sed -i -e 's/^\\(daemonize\\s*\\)yes\\s*$/\\1no/g' -e 's/^bind 127.0.0.1/bind 0.0.0.0/g' /etc/redis/redis.conf \\\n&& sed -i -e 's/#APACHE_ULIMIT_MAX_FILES/APACHE_ULIMIT_MAX_FILES/' -e 's/ulimit -n 65536/ulimit -n 8192/' /etc/apache2/envvars \\\n&& LANG=en_US.UTF-8 \\\n&& a2dismod ssl; \\\na2enmod rewrite; \\\na2enmod xsendfile; \\\na2enmod proxy; \\\na2enmod proxy_http; \\\na2enmod proxy_wstunnel; \\\na2enmod headers; \\\na2enconf docassemble; \\\necho 'export TERM=xterm' >> /etc/bash.bashrc\nEXPOSE 80 443 9001 514 25 465 8080 8081 5432 6379 4369 5671 5672 25672\nENV \\\nCONTAINERROLE=\"all\" \\\nLOCALE=\"en_US.UTF-8 UTF-8\" \\\nTIMEZONE=\"America/New_York\" \\\nEC2=\"\" \\\nS3ENABLE=\"\" \\\nS3BUCKET=\"\" \\\nS3ACCESSKEY=\"\" \\\nS3SECRETACCESSKEY=\"\" \\\nDAHOSTNAME=\"\" \\\nUSEHTTPS=\"\" \\\nUSELETSENCRYPT=\"\" \\\nLETSENCRYPTEMAIL=\"\" \\\nDBHOST=\"\" \\\nLOGSERVER=\"\" \\\nREDIS=\"\" \\\nRABBITMQ=\"\"\n\nCMD [\"/usr/bin/supervisord\", \"-n\", \"-c\", \"/etc/supervisor/supervisord.conf\"]\n"
}
] | 7 |
Kevinhj/pytest-api-testing
|
https://github.com/Kevinhj/pytest-api-testing
|
d5095da1c474efb2d8e2cb660c41959127402884
|
642b0e12d5ad7df618d398c522b726326519d8fd
|
9bfa18c035253313b226997ab7c36164647c9957
|
refs/heads/master
| 2023-06-24T23:42:46.597757 | 2021-05-12T04:34:29 | 2021-05-12T04:34:29 | 355,407,879 | 0 | 0 | null | 2021-04-07T04:06:18 | 2021-05-12T04:34:31 | 2021-05-13T03:40:42 |
Python
|
[
{
"alpha_fraction": 0.6391096711158752,
"alphanum_fraction": 0.6454690098762512,
"avg_line_length": 22.296297073364258,
"blob_id": "ed7d890ffd7104162e5c050f40a5f844769d1686",
"content_id": "0cebff8e057ba0b5a195349fea0ca061f6fd5c35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 629,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 27,
"path": "/pytest_tutorial/my_tests/test_suite_1_with_setup.py",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "import pytest\nimport pdb\n\n\n# the set up, the scope mean that setup is for the module\[email protected](scope='module')\ndef my_setup():\n print(\"\")\n print(\">>>> MY SETUP <<<<\")\n\n return {'id': 20, 'name': 'Kevin'}\n\n\n# tag single tests\[email protected]\ndef test_login_page_valid_user(my_setup):\n print(\"Login with valid user\")\n print(\"function: aaaaa\")\n print(\"Name: {}\".format(my_setup.get('name')))\n # pdb.set_trace() # this is a breakpoint\n\n\[email protected]\ndef test_login_page_wrong_password():\n print(\"Login with wrong password\")\n print(\"Function: bbbbb\")\n # assert 1==2, \"One is not two\"\n"
},
{
"alpha_fraction": 0.6990291476249695,
"alphanum_fraction": 0.7184466123580933,
"avg_line_length": 19.600000381469727,
"blob_id": "da41b754e7783667ea4b3bff7783cdb75e2975af",
"content_id": "d0229985dae98439793167044c54f5bfb01f084a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 5,
"path": "/wooapitest/tests/test_healthcheck.py",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "import logging as logger\n\n\ndef test_healthcheck_1():\n logger.info(\"Just running a health check 1.\")\n"
},
{
"alpha_fraction": 0.5851938724517822,
"alphanum_fraction": 0.5934194922447205,
"avg_line_length": 23.342857360839844,
"blob_id": "8920127497b04012453e6c2ba3fdbc2e402c40dd",
"content_id": "f53aa3fcedef88f46d44c5fc875f138b95e3bc13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 851,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 35,
"path": "/wooapitest/src/dao/customers_dao.py",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "from wooapitest.src.utilities.dbUtility import DBUtility\nimport random\n\nclass CustomersDAO(object):\n\n def __init__(self):\n self.db_helper = DBUtility()\n\n def get_customer_by_email(self, email):\n \"\"\"\n\n Args:\n\n Returns:\n\n \"\"\"\n\n sql = f\"SELECT * FROM wp_users WHERE user_email = '{email}';\"\n rs_sql = self.db_helper.execute_select(sql)\n\n return rs_sql\n\n def get_random_customer_from_db(self, qty=1):\n \"\"\"\n Pull from the DB existing users by a limit of 100\n Args:\n qty is the quantity of users the func will return\n Returns:\n Return one or more existing users randomly\n \"\"\"\n\n sql = \"SELECT * FROM wp_users ORDER BY id DESC LIMIT 100;\"\n rs_sql = self.db_helper.execute_select(sql)\n\n return random.sample(rs_sql, int(qty))"
},
{
"alpha_fraction": 0.6202898621559143,
"alphanum_fraction": 0.6637681126594543,
"avg_line_length": 22,
"blob_id": "59d9f6db38434e56671f963dea77ebe2f2d733a4",
"content_id": "2e00e3042e109a6c6365d8d6150fa7e376ca5753",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 345,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 15,
"path": "/pytest_tutorial/my_tests/test_suite_2_with_class.py",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "import pytest\n\n# tag the whole module\npytest = [pytest.mark.e2e, pytest.mark.slow]\n\n\nclass TestCheckout(object):\n\n def test_checkout_as_guest(self):\n print(\"Checkout as guest\")\n print(\"Class: 1111111\")\n\n def test_checkout_with_existing_user(self):\n print(\"Checkout with existing user\")\n print(\"Class: 2222222\")\n"
},
{
"alpha_fraction": 0.737864077091217,
"alphanum_fraction": 0.7427184581756592,
"avg_line_length": 30.615385055541992,
"blob_id": "3f64493cffa954fbee0f27a0fb5a3790a77d2dcd",
"content_id": "f7180a62060ab2beca39ae05dadab14860ea1190",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 412,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 13,
"path": "/wooapitest/tests/customer/test_get_customers_smoke.py",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "\n\nimport pytest\nimport logging as logger\nfrom wooapitest.src.utilities.requestsUtility import RequestUtility\n\[email protected]\[email protected]\ndef test_get_all_customers():\n req_helper = RequestUtility()\n rs_api = req_helper.get('customers')\n logger.debug(f'Response of list all: {rs_api}')\n\n # Assert the response is not empty\n assert rs_api, f\"Response of list all customers is empty.\""
},
{
"alpha_fraction": 0.6898733973503113,
"alphanum_fraction": 0.6962025165557861,
"avg_line_length": 23.384614944458008,
"blob_id": "e86346d7a277b10b66125b978981b03f95b1e2e7",
"content_id": "29093c8284ee610197a2b9e82a6f4d3838745a4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 13,
"path": "/pytest_tutorial/my_tests/test_suite_1.py",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "import pytest\n\n# tag single tests\[email protected]\ndef test_login_page_valid_user():\n print(\"Login with valid user\")\n print(\"function: aaaaa\")\n\[email protected]\ndef test_login_page_wrong_password():\n print(\"Login with wrong password\")\n print(\"Function: bbbbb\")\n #assert 1==2, \"One is not two\""
},
{
"alpha_fraction": 0.7232142686843872,
"alphanum_fraction": 0.7232142686843872,
"avg_line_length": 13.125,
"blob_id": "35b6cd6c84df7006e3982ab0503c1100386aec8c",
"content_id": "19a0b21c595f144cf8961eebc970a67dada7ee0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 8,
"path": "/pytest_tutorial/pytest.ini",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "# pytest.ini\n[pytest]\nfilterwarnings =\n ignore::pytest.PytestUnknownMarkWarning\n\nmarkers =\n slow\n smoke"
},
{
"alpha_fraction": 0.6133333444595337,
"alphanum_fraction": 0.6222222447395325,
"avg_line_length": 24,
"blob_id": "4047926e00f31b7f75a5daf576065ed163268c07",
"content_id": "9bd4d5f7eaede3115f0b4b73a10a94b6db98c589",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 9,
"path": "/setup.py",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\nsetup(name='wooapitest',\n version='1.0',\n description='Practice API testing',\n author='Kevin Hernandez',\n author_email='[email protected]',\n packages=find_packages()\n )\n"
},
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.7458193898200989,
"avg_line_length": 14,
"blob_id": "9d66f94fc1e3a26d1eaecdfcf343e9569f5a825f",
"content_id": "cc567fa5563570590ed6bf2a002b0eb03c7392ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 299,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 20,
"path": "/README.md",
"repo_name": "Kevinhj/pytest-api-testing",
"src_encoding": "UTF-8",
"text": "# pytest-api-testing\n\nTo run all tests\n\n`pytest`\n\nTo run test cases by mark\n\n`pytest -m tcid29`\n\nTo run the env file on Windows\n\n`env.bat`\n\nTo generate the test run report first install pytest-html\n\n`pip install pytest-html`\n\nThen run the command \n`pytest -m customers --html reports/customers.html`"
}
] | 9 |
caternuson/Python_LS7366R
|
https://github.com/caternuson/Python_LS7366R
|
63de5769b6e6d350117fcf8eb87b56318829138f
|
82047cf89d05bdb3916ee7093e12f4968cc7b3d5
|
ac48df0e17e52993d0fffb24105b2faaaef30643
|
refs/heads/master
| 2020-03-22T18:32:04.295054 | 2018-07-19T04:35:21 | 2018-07-19T04:35:21 | 140,465,787 | 2 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.8285714387893677,
"avg_line_length": 34,
"blob_id": "c017cf8badcda7ae0f8463fc16dc2144088b7ba4",
"content_id": "3f8fc4c7ac6522d179bc107e249c8a0f325858a8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 70,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 2,
"path": "/README.md",
"repo_name": "caternuson/Python_LS7366R",
"src_encoding": "UTF-8",
"text": "# Python_LS7366R\nPython driver for LSI/CSI LS7366R quadrature counter\n"
},
{
"alpha_fraction": 0.5707663893699646,
"alphanum_fraction": 0.6100123524665833,
"avg_line_length": 29.966506958007812,
"blob_id": "f01214d149cb5ea0420d1fdb64419d1530fd80c5",
"content_id": "e7701999427ab355f73906b386a9917e24393557",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6472,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 209,
"path": "/ls7366r.py",
"repo_name": "caternuson/Python_LS7366R",
"src_encoding": "UTF-8",
"text": "# The MIT License (MIT)\n#\n# Copyright (c) 2018 Carter Nelson\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n# MDR0 configuration data - the configuration byte is formed with\n# single segments taken from each group and ORing all together.\n\nCOUNTER_BITS = (32, 24, 16, 8)\nQUADRATURE_MODES = (0, 1, 2, 4)\n\n# Count modes\nNQUAD = 0x00 # non-quadrature mode\nQUADRX1 = 0x01 # X1 quadrature mode\nQUADRX2 = 0x02 # X2 quadrature mode\nQUADRX4 = 0x03 # X4 quadrature mode\n\n# Running modes\nFREE_RUN = 0x00\nSINGE_CYCLE = 0x04\nRANGE_LIMIT = 0x08\nMODULO_N = 0x0C\n\n# Index modes\nDISABLE_INDX = 0x00 # index_disabled\nINDX_LOADC = 0x10 # index_load_CNTR\nINDX_RESETC = 0x20 # index_rest_CNTR\nINDX_LOADO = 0x30 # index_load_OL\nASYNCH_INDX = 0x00 # asynchronous index\nSYNCH_INDX = 0x80 # synchronous index\n\n# Clock filter modes\nFILTER_1 = 0x00 # filter clock frequncy division factor 1\nFILTER_2 = 0x80 # filter clock frequncy division factor 2\n\n# MDR1 configuration data; any of these\n# data segments can be ORed together\n\n# Flag modes\nNO_FLAGS = 0x00 # all flags disabled\nIDX_FLAG = 0x10 # IDX flag\nCMP_FLAG = 0x20 # CMP flag\nBW_FLAG = 0x40 # BW flag\nCY_FLAG = 0x80 # CY flag\n\n# 1 to 4 bytes data-width\nBYTE_4 = 0x00 # four byte mode\nBYTE_3 = 0x01 # three byte mode\nBYTE_2 = 0x02 # two byte mode\nBYTE_1 = 0x03 # one byte mode\n\n# Enable/disable counter\nEN_CNTR = 0x00 # counting enabled\nDIS_CNTR = 0x04 # counting disabled\n\n# LS7366R op-code list\nCLR_MDR0 = 0x08\nCLR_MDR1 = 0x10\nCLR_CNTR = 0x20\nCLR_STR = 0x30\nREAD_MDR0 = 0x48\nREAD_MDR1 = 0x50\nREAD_CNTR = 0x60\nREAD_OTR = 0x68\nREAD_STR = 0x70\nWRITE_MDR1 = 0x90\nWRITE_MDR0 = 0x88\nWRITE_DTR = 0x98\nLOAD_CNTR = 0xE0\nLOAD_OTR = 0xE4\n\nclass LS7366R():\n \"\"\"LSI/CSI LS7366R quadrature counter.\"\"\"\n\n def __init__(self, spi):\n # This should be a SpiDev or compatible object.\n self._spi = spi\n\n # Default config\n self._write_mdr0(QUADRX4 | FREE_RUN | DISABLE_INDX | FILTER_1)\n self._write_mdr1(BYTE_4 | EN_CNTR)\n\n # Set to zero at start\n self.counts = 0\n\n @property\n def counts(self):\n \"\"\"Current counts as signed integer.\"\"\"\n return self._get_counts()\n\n @counts.setter\n def counts(self, value):\n self._set_counts(value)\n\n @property\n def bits(self):\n \"\"\"Counter bits.\"\"\"\n return COUNTER_BITS[self._read_mdr1()[0] & 0x03]\n\n @bits.setter\n def bits(self, value):\n if value not in COUNTER_BITS:\n raise ValueError(\"Bits must be one of \", *COUNTER_BITS)\n self._write_mdr1(self._read_mdr1()[0] &0xFC | COUNTER_BITS.index(value))\n\n @property\n def quadrature(self):\n \"\"\"Quadrature mode.\"\"\"\n return QUADRATURE_MODES[self._read_mdr0()[0] & 0x03]\n\n @quadrature.setter\n def quadrature(self, value):\n if value not in QUADRATURE_MODES:\n raise ValueError(\"Mode must be one of \", *QUADRATURE_MODES)\n self._write_mdr0((self._read_mdr0()[0] & 0xFC) | QUADRATURE_MODES.index(value))\n\n def _get_counts(self, ):\n \"\"\"Read the counter register value.\"\"\" \n bits = self.bits\n byte_values = self._read_cntr()\n counts = 0\n for b in byte_values:\n counts <<= 8\n counts |= b\n if counts >> (bits - 1):\n counts -= 1 << bits\n return counts\n\n def _set_counts(self, value):\n \"\"\"Set the counter register value.\"\"\"\n self._write_dtr(value)\n self._load_cntr()\n\n def _clear_mdr0(self):\n \"\"\"Clear MDR0.\"\"\"\n self._spi.writebytes([CLR_MDR0])\n\n def _clear_mdr1(self):\n \"\"\"Clear MDR1.\"\"\"\n self._spi.writebytes([CLR_MDR1])\n\n def _clear_cntr(self):\n \"\"\"Clear the counter.\"\"\"\n self._spi.writebytes([CLR_CNTR])\n\n def _clear_str(self):\n \"\"\"Clear the status register.\"\"\"\n self._spi.writebytes([CLR_STR])\n\n def _read_mdr0(self):\n \"\"\"Read the 8 bit MDR0 register.\"\"\"\n return self._spi.xfer2([READ_MDR0, 0x00])[1:]\n\n def _read_mdr1(self):\n \"\"\"Read the 8 bit MDR1 register.\"\"\"\n return self._spi.xfer2([READ_MDR1, 0x00])[1:]\n\n def _read_cntr(self):\n \"\"\"Transfer CNTR to OTR, then read OTR. Size of return depends\n on current bit setting.\"\"\"\n return self._spi.xfer2([READ_CNTR]+[0]*(self.bits//8))[1:]\n\n def _read_otr(self):\n \"\"\"Output OTR.\"\"\"\n return self._spi.xfer2([READ_OTR]+[0]*(self.bits//8))[1:]\n\n def _read_str(self):\n \"\"\"Read 8 bit STR register.\"\"\"\n return self._spi.xfer2([READ_STR,0x00])[1:]\n\n def _write_mdr0(self, mode):\n \"\"\"Write serial data at MOSI into MDR0.\"\"\"\n self._spi.writebytes([WRITE_MDR0, mode])\n\n def _write_mdr1(self, mode):\n \"\"\"Write serial data at MOSI into MDR1.\"\"\"\n self._spi.writebytes([WRITE_MDR1, mode])\n\n def _write_dtr(self, value):\n \"\"\"Write to 32 bit DTR register.\"\"\"\n self._spi.writebytes([WRITE_DTR, value >> 24 & 0xFF,\n value >> 16 & 0xFF,\n value >> 8 & 0xFF,\n value & 0xFF])\n\n def _load_cntr(self):\n \"\"\"Transfer DTR to CNTR.\"\"\"\n self._spi.writebytes([LOAD_CNTR])\n\n def _load_otr(self):\n \"\"\"Transfer CNTR to OTR.\"\"\"\n self._spi.writebytes([LOAD_OTR])\n"
}
] | 2 |
ShilpiKiran/COMRED
|
https://github.com/ShilpiKiran/COMRED
|
91f480f46fe9db415a6777600e44847a81c0063b
|
e7d280c1e8d8f6e0f3fb3518b689466238f0d3e0
|
e606561e599fac17c6ec51808597496f108f2ff3
|
refs/heads/master
| 2023-04-28T16:03:59.213466 | 2021-05-24T07:20:08 | 2021-05-24T07:20:08 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6090534925460815,
"alphanum_fraction": 0.6316872239112854,
"avg_line_length": 27.58823585510254,
"blob_id": "eafd5338953171b029908af6d5fb1b893a5f8259",
"content_id": "033801e162ddb1acec4acf7fadbdf798f29a8b15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 17,
"path": "/model_cdss.py",
"repo_name": "ShilpiKiran/COMRED",
"src_encoding": "UTF-8",
"text": "import sys\nimport joblib\nsaved_model = joblib.load('model_cdss.pkl')\nb = sys.argv[1]\na = list(range(2, 134))\nb = [int(x) for x in b.split()]\ncount = 0\nwhile count < len(b):\n item_to_replace = b[count]\n replacement_value = 1\n indices_to_replace = [i for i, x in enumerate(a) if x == item_to_replace]\n count += 1\n for i in indices_to_replace:\n a[i] = replacement_value\na = [0 if x != 1 else x for x in a]\ny_diagnosis = saved_model.predict([a])\nprint(y_diagnosis[0])\n"
},
{
"alpha_fraction": 0.47066444158554077,
"alphanum_fraction": 0.4782528281211853,
"avg_line_length": 26.020000457763672,
"blob_id": "fad7b579b8eb694df0f80daa64ff3e6a65302c36",
"content_id": "e05b8b8304ebbebb6d1b536e7924af3ece406aca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5403,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 200,
"path": "/routes/prediction.js",
"repo_name": "ShilpiKiran/COMRED",
"src_encoding": "UTF-8",
"text": "const router = require('express').Router();\nconst fetch = require('node-fetch');\nconst User = require('../models/User');\nconst Logs = require('../models/Logs');\nconst { registerValidation, loginValidation } = require('../validation');\nconst bcrypt = require('bcryptjs')\nconst jwt = require('jsonwebtoken');\nvar passport = require(\"passport\");\nconst path = require('path');\n\n//handling heart disease request\nrouter.post(\"/hp\", function (req, res) {\n var URL = `${process.env.HEART_DIAGNOSIS_TUNNEL}api`\n console.log(req.body)\n var arr = [];\n var prediction;\n var name = req.body.name;\n var username = req.body.username;\n delete req.body.name;\n delete req.body.username;\n for (var key in req.body) {\n if (req.body.hasOwnProperty(key)) {\n arr.push(req.body[key]);\n }\n }\n var arrString = arr.join(\" \");\n \n console.log(\"arrString\",arrString)\n // [20, 225.0, 150, 95, 28.58, 103]\n fetch(URL, {\n method: \"post\",\n body: JSON.stringify({\n \"data\": [\n arr\n ]\n }),\n headers: { \"Content-Type\": \"application/json\" },\n })\n .then((res) => res.json())\n .then((json) => {\n console.log(json.prediction)\n prediction = json.prediction[0]\n }).then(() => {\n\n console.log(\"body\")\n console.log(arrString)\n console.log(\"body\")\n var log = new Logs({\n username: req.user.username,\n name: name,\n type: 1,\n input: arrString,\n output: prediction\n })\n\n log.save(function (err, result) {\n if (err) {\n console.log(err);\n }\n else {\n console.log(result)\n }\n })\n\n return res.render('heartPrognosisResult', { title: \"Your Prognosis Result\", prediction: prediction , username : username});\n\n })\n\n\n\n});\n\n\n\n\n//handling misc disease logic\nrouter.post(\"/md\", function (req, res) {\n\n // var URL = `${process.env.MISC_DISEASE_PROGNOSIS_TUNNEL}api`\n // console.log(URL)\n // var name = req.body.name;\n // var arrString = req.body.symptoms.join(\" \");\n // console.log(\"arrString\",arrString)\n // fetch(URL, {\n // method: \"post\",\n // body: JSON.stringify({\n // \"data\": [\n // [arrString]\n // ]\n // }),\n // headers: { \"Content-Type\": \"application/json\" },\n // })\n // .then((res) => res.json())\n // .then((json) => {\n // console.log(json)\n // prediction = json.prediction\n // }).then(() => {\n\n\n var spawn = require(\"child_process\").spawn;\n var arrString = req.body.symptoms.join(\" \");\n var name = req.body.name;\n \n console.log(arrString)\n \n var process = spawn('python', [\"./model_cdss.py\", arrString]);\n \n process.stdout.on('data', function (data) {\n var log = new Logs({ \n username: req.user.username,\n name:name,\n type: 2,\n input: arrString,\n output: data.toString()\n })\n\n log.save(function (err, result) {\n if (err) {\n console.log(err);\n }\n else {\n console.log(result)\n return res.render('miscDiseaseResult', { title: \"Your Diagnosis Result\", prediction: data.toString(),username: req.user.username });\n }\n })\n \n \n }); \n \n // })\n\n // return;\n\n});\n\n\n//handling diabetes diagnosis request\nrouter.post(\"/dd\", function (req, res) {\n var URL = `${process.env.DIABETESE_DIAGNOSIS_TUNNEL}api`\n console.log(req.body)\n var arr = [];\n var prediction;\n var name = req.body.name;\n var username = req.body.username;\n delete req.body.name;\n delete req.body.username;\n for (var key in req.body) {\n if (req.body.hasOwnProperty(key)) {\n arr.push(req.body[key]);\n }\n }\n var arrString = arr.join(\" \");\n \n console.log(\"arrString\",arrString)\n // [20, 225.0, 150, 95, 28.58, 103]\n fetch(URL, {\n method: \"post\",\n body: JSON.stringify({\n \"data\": [\n arr\n ]\n }),\n headers: { \"Content-Type\": \"application/json\" },\n })\n .then((res) => res.json())\n .then((json) => {\n console.log(json.prediction)\n prediction = json.prediction[0]\n }).then(() => {\n\n console.log(\"body\")\n console.log(arrString)\n console.log(\"body\")\n var log = new Logs({\n username: req.user.username,\n name: name,\n type: 3,\n input: arrString,\n output: prediction\n })\n\n log.save(function (err, result) {\n if (err) {\n console.log(err);\n }\n else {\n console.log(result)\n }\n })\n\n return res.render('diabetesDiagnosisResult', { title: \"Your Diagnosis Result\", prediction: prediction , username : username});\n\n })\n\n\n\n});\n\n\nmodule.exports = router;"
}
] | 2 |
Unlogical/Modulo_calculator
|
https://github.com/Unlogical/Modulo_calculator
|
e26eba662724e7f95f8b3dc9ab8c3891e825c9d0
|
a3f2dc997caa67695924a5dd7ddd4e781943c96c
|
bc334d6e196c6fde1834eeacd11cbb37dccfac9c
|
refs/heads/master
| 2020-06-10T12:53:53.235388 | 2016-12-15T14:54:22 | 2016-12-15T14:54:22 | 75,958,982 | 0 | 1 | null | 2016-12-08T17:16:10 | 2016-12-08T17:16:51 | 2016-12-15T14:54:22 |
Python
|
[
{
"alpha_fraction": 0.2623574137687683,
"alphanum_fraction": 0.35361215472221375,
"avg_line_length": 19.30769157409668,
"blob_id": "6db629a5a6f0f98f3a1726a34bb10955e081883d",
"content_id": "d72b8d8ef932f52491395e1e1f5c2f7930d8cb83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 263,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 13,
"path": "/helpers.py",
"repo_name": "Unlogical/Modulo_calculator",
"src_encoding": "UTF-8",
"text": "def extended_euclid(a, b):\n if b == 0:\n return [a, 1, 0]\n x2 = 1\n x1 = 0\n y2 = 0\n y1 = 1\n while b > 0:\n q = a // b\n a, b = b, a - q * b\n x1, x2 = x2 - q * x1, x1\n y1, y2 = y2 - q * y1, y1\n return [a, x2, y2]"
},
{
"alpha_fraction": 0.5658536553382874,
"alphanum_fraction": 0.5719512104988098,
"avg_line_length": 16.446807861328125,
"blob_id": "7f20eeeba0930fecc0d467fc1152ad880e51f88c",
"content_id": "2473e1ea4492eedd8d9e906268cac4222e49ad46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 820,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 47,
"path": "/operations.py",
"repo_name": "Unlogical/Modulo_calculator",
"src_encoding": "UTF-8",
"text": "from helpers import *\n\n\ndef mod_sum(a, b, mod):\n return (abs(a + b)) % mod\n\n\ndef mod_subtr(a, b, mod):\n return (abs(a - b)) % mod\n\n\ndef mod_mult(a, b, mod):\n return (abs(a * b)) % mod\n\n\ndef mod_div(a, b, mod):\n inverse = mod_inverse(b, mod)\n if inverse.isnumeric():\n return mod_mult(a, inverse, mod)\n return 'Cannot divide'\n\n\ndef mod_pow(a, b, mod):\n return pow(a, b, mod)\n\n\ndef module(a, mod):\n return a % mod\n\n\ndef is_comparable(a, b, mod):\n return (a % mod) == (b % mod)\n\n\ndef is_quadratic_residue(a, mod):\n return len(modular_sqrt(a, mod)) > 0\n\n\ndef mod_inverse(a, mod):\n trinity = extended_euclid(a, mod)\n if trinity[0] > 1:\n return \"doesn't exist\"\n return trinity[1] % mod\n\n\ndef modular_sqrt(a, p):\n return [i for i in range(p) if mod_pow(i, 2, p) == a % p]\n"
},
{
"alpha_fraction": 0.61238694190979,
"alphanum_fraction": 0.61238694190979,
"avg_line_length": 21.809524536132812,
"blob_id": "59b33842e28db0371a19269e870cb2148e64281d",
"content_id": "0288b0b4e798579769ebcac9d2036266d12f5b2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1437,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 63,
"path": "/calculator.py",
"repo_name": "Unlogical/Modulo_calculator",
"src_encoding": "UTF-8",
"text": "from operations import *\n\nunary_operations = {\n \"?\": is_quadratic_residue,\n \"%\": module,\n \"~\": mod_inverse,\n \"sqrt\": modular_sqrt\n}\n\nbinary_operations = {\n \"+\": mod_sum,\n \"-\": mod_subtr,\n \"*\": mod_mult,\n \"/\": mod_div,\n \"^\": mod_pow,\n \"=\": is_comparable\n}\n\n\ndef calculate(operation, mod, a, b):\n if operation in binary_operations:\n return binary_operations[operation](a, b, mod)\n return unary_operations[operation](a, mod)\n\n\ndef input_operand(message):\n try:\n return int(input(message))\n except ValueError as ex:\n print(\"Operand must be integer\")\n return input_operand(message)\n\n\ndef input_operation(message):\n operation = input(message).strip()\n if operation in binary_operations or operation in unary_operations:\n return operation\n else:\n print(\"Invalid operation\")\n return input_operation(message)\n\n\nprint(\"It's a modulo calculator, all operations are already mod\")\nprint(\n \"\"\"\n Operations:\n + -> sum\n - -> subtraction\n * -> multiplication\n / -> division\n ^ -> power\n = -> compare\n ? -> is quadratic residue?\n % -> mod\n ~ -> inverse a\n sqrt -> square root\n \"\"\"\n)\na = input_operand(\"operand a: \")\noperation = input_operation(\"operation: \")\nb = input_operand(\"operand b: \") if operation in binary_operations else None\nmod = input_operand(\"mod: \")\nprint(\"result: \", calculate(operation, mod, a, b))\n"
}
] | 3 |
kannangr21/SimpleDjangoLogin
|
https://github.com/kannangr21/SimpleDjangoLogin
|
5e584c083f953cfe5ec0923a059257c187201064
|
ed5fee7a60d543fe4c1b409324aefdef15d82873
|
b3d9845d7f441e6daabfa0603982e2973a12f13d
|
refs/heads/main
| 2023-05-03T00:25:48.675906 | 2021-05-25T14:29:20 | 2021-05-25T14:29:20 | 370,718,057 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5982275009155273,
"alphanum_fraction": 0.60118168592453,
"avg_line_length": 39.48979568481445,
"blob_id": "d98b3f61e2bc0142b8465e6a17b54a6c199db747",
"content_id": "596befc1249fa7302a37104f5a3837126a0e07d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2031,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 49,
"path": "/enter/views.py",
"repo_name": "kannangr21/SimpleDjangoLogin",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render,redirect\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.models import User, auth\r\nfrom django.contrib.auth import authenticate\r\nfrom django.db import IntegrityError\r\n\r\ndef login(request):\r\n if request.method == \"POST\":\r\n un = request.POST['username']\r\n pw = request.POST['password']\r\n user = auth.authenticate(username=un,password=pw) \r\n if user is not None:\r\n auth.login(request,user)\r\n return redirect(\"travel/\")\r\n else:\r\n messages.error(request,\"Authentication Failed! Please re-enter the credentials\")\r\n return render(request,\"index_login.html\")\r\n else:\r\n return render(request,\"index_login.html\")\r\ndef signup(request):\r\n if request.method == \"POST\":\r\n fname = request.POST['fname']\r\n lname = request.POST['lname']\r\n email = request.POST['email']\r\n username = request.POST['username']\r\n pass1 = request.POST['pass']\r\n pass2 = request.POST['re_pass']\r\n if(pass1!=pass2 or pass1 == None or pass2 == None):\r\n messages.error(request,\"Password Mismatch!\")\r\n return render(request,'index_signup.html')\r\n else:\r\n try:\r\n user = User.objects.create_user(username=username, first_name=fname, last_name=lname, password=pass1, email=email)\r\n user.save()\r\n messages.info(request,\"User created successfully!!\")\r\n return redirect('/')\r\n except(ValueError):\r\n messages.error(request,\"Invalid Data input!\")\r\n return render(request,'index_signup.html')\r\n except(IntegrityError):\r\n messages.error(request,\"User already exists! Please login to visit the page.\")\r\n return render(request,'index_signup.html')\r\n \r\n else:\r\n return render(request,\"index_signup.html\")\r\n\r\ndef logout(request):\r\n auth.logout(request)\r\n return redirect(\"/\")"
},
{
"alpha_fraction": 0.8240740895271301,
"alphanum_fraction": 0.8240740895271301,
"avg_line_length": 35,
"blob_id": "618d98759988110947f29b79b2bd9b132472c22e",
"content_id": "066940106139d06fb7ee19b9b8bebceea6e09788",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 3,
"path": "/README.md",
"repo_name": "kannangr21/SimpleDjangoLogin",
"src_encoding": "UTF-8",
"text": "# SimpleDjangoLogin\nThis is my first project using Python Django framework.\nStatic content is not uploaded.\n"
},
{
"alpha_fraction": 0.6770833134651184,
"alphanum_fraction": 0.6927083134651184,
"avg_line_length": 25.428571701049805,
"blob_id": "4bb653b39635f49f110abf53166c006f4b6c39a4",
"content_id": "0453a1fd93c899654cfb0f8129da5f74b1b9478f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 7,
"path": "/travel/models.py",
"repo_name": "kannangr21/SimpleDjangoLogin",
"src_encoding": "UTF-8",
"text": "from django.db import models\r\n\r\n\r\nclass Destination(models.Model):\r\n place = models.CharField(max_length=100)\r\n desc = models.TextField()\r\n img = models.ImageField(upload_to='pics')\r\n"
},
{
"alpha_fraction": 0.626334547996521,
"alphanum_fraction": 0.6441280841827393,
"avg_line_length": 30.423076629638672,
"blob_id": "3217cd16f4db98402664e5ab3b05210b14901fc1",
"content_id": "650cdd032f6dad706035e177133f3090cc2fb65a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 843,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 26,
"path": "/travel/views.py",
"repo_name": "kannangr21/SimpleDjangoLogin",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render,redirect\r\nfrom .models import Destination\r\nfrom django.contrib import messages\r\n# Create your views here.\r\n\r\n\r\ndef errorlog(request):\r\n messages.info(request,\"Please login to view the content\")\r\n return redirect(\"/\")\r\n \r\ndef index(request):\r\n de1 = Destination()\r\n de1.place = \"Trichy, Tamil Nadu\"\r\n de1.desc = \"Best part of life!!\"\r\n de1.img = \"tpj.jpg\"\r\n de2 = Destination()\r\n de2.place = \"Chennai, Tamil Nadu\"\r\n de2.desc = \"The Capital\"\r\n de2.img = \"chn.jpg\"\r\n de3 = Destination()\r\n de3.place = \"Salem, Tamil Nadu\"\r\n de3.desc = \"City of Mangoes\"\r\n de3.img = \"slm.jpg\"\r\n objs = [de1,de2,de3]\r\n d=\"Thanks for visiting this Project! This is my first project using the framework 'DJANGO'\"\r\n return render(request,\"index.html\",{'objs':objs,'descrip':d})\r\n"
},
{
"alpha_fraction": 0.6679389476776123,
"alphanum_fraction": 0.6679389476776123,
"avg_line_length": 31,
"blob_id": "4ed72c532139722ee4d80ce76f5f5f5be401b1fc",
"content_id": "83973a68761c0a49e9e903eeaa68778d5fd378ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 262,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 8,
"path": "/enter/urls.py",
"repo_name": "kannangr21/SimpleDjangoLogin",
"src_encoding": "UTF-8",
"text": "from django.urls import path,include\r\nfrom . import views\r\nurlpatterns=[\r\n path(\"\",views.login,name=\"home\"),\r\n path(\"login\",views.login,name=\"login\"),\r\n path(\"signup\",views.signup,name=\"signup\"),\r\n path(\"travel/logout\",views.logout,name=\"logout\"),\r\n]"
}
] | 5 |
Mezzomaniac/goodlibrary
|
https://github.com/Mezzomaniac/goodlibrary
|
5c8149947c0e6ad432f8d35dd3fd2df3f227d819
|
c0a26f377f82087a5adf76d503349d3e0e497b6f
|
820adef7299d566151b2659910859209a9351c61
|
refs/heads/master
| 2020-07-01T10:39:12.309231 | 2019-08-14T12:43:21 | 2019-08-14T12:43:21 | 201,149,883 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7043235898017883,
"alphanum_fraction": 0.7085076570510864,
"avg_line_length": 43.75,
"blob_id": "78cf06076c7c9cf6a8a772228d2c22553cd36d92",
"content_id": "ec3d94b56fd779cb75ce6ed4d4ce6e84a0aae3b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 717,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 16,
"path": "/goodlibrary/forms.py",
"repo_name": "Mezzomaniac/goodlibrary",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileRequired, FileAllowed\nfrom wtforms import SelectMultipleField, SubmitField\nfrom wtforms.validators import InputRequired\n\n\nclass ListSearchForm(FlaskForm):\n file = FileField(\n 'Step 2: Upload your Goodreads list csv file', \n [FileRequired('Please upload the list'), FileAllowed(['csv'], 'That appears to be the wrong file')])\n libraries = SelectMultipleField(\n 'Step 3: Choose libraries to search (select multiple by holding Ctrl)', \n [InputRequired('Please select at least 1 library')],\n choices=[\n ('perth', 'Australia/WA/Perth/City of Perth Library')])\n submit = SubmitField('Search')\n\n"
},
{
"alpha_fraction": 0.5965664982795715,
"alphanum_fraction": 0.6309012770652771,
"avg_line_length": 20.18181800842285,
"blob_id": "e9f36537842b6368da306e5c71bb21e4b4f7b448",
"content_id": "d27190e5805efeb6161c5ece997f28d5592cbb6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 11,
"path": "/goodlibrary/config.py",
"repo_name": "Mezzomaniac/goodlibrary",
"src_encoding": "UTF-8",
"text": "import os\n\nclass Config:\n SECRET_KEY = os.urandom(16)\n VERSION = '0.0.0'\n \n TESTING = True\n SEND_FILE_MAX_AGE_DEFAULT = 0 # For development only\n \n SESSION_PERMANENT = False\n #PERMANENT_SESSION_LIFETIME = 60\n"
},
{
"alpha_fraction": 0.45329248905181885,
"alphanum_fraction": 0.45482388138771057,
"avg_line_length": 30.095237731933594,
"blob_id": "12d39138228a02bc935fec4f729882cf64f8d671",
"content_id": "95326bd9761d5f116c24b9929a1e47ad8b501c86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1306,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 42,
"path": "/goodlibrary/templates/base.html",
"repo_name": "Mezzomaniac/goodlibrary",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n {% if title %}\n <title>{{ title }} - GoodLibrary</title>\n <meta name=\"description\" content=\"{{ title }}\">\n {% else %}\n <title>GoodLibrary</title>\n {% endif %}\n <meta name=\"author\" content=\"Jeremy Thomas London\">\n {% block keywords %}\n <meta name=\"keywords\" content=\"book, books, Goodreads, library\">\n {% endblock %}\n <!--link rel=\"stylesheet\" type=\"text/css\" href=\"{{ url_for('static', filename='css/style.css') }}\"-->\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, user-scalable=yes\">\n {% block scripts %}{% endblock %}\n </head>\n \n <body>\n <header>\n GoodLibrary\n <nav>\n <a href=\"{{ url_for('home') }}\">Home</a>\n <a href=\"{{ url_for('test') }}\">test</a>\n </nav>\n <hr>\n </header>\n {% with messages = get_flashed_messages() %}\n {% if messages %}\n <ul>\n {% for message in messages %}\n <li>{{ message }}</li>\n {% endfor %}\n </ul>\n {% endif %}\n {% endwith %}\n <main role=\"main\">\n {% block content %}{% endblock %}\n </main>\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.6814814805984497,
"alphanum_fraction": 0.6814814805984497,
"avg_line_length": 30.904762268066406,
"blob_id": "089f22a85da0b6e342399a55fc17884f0c00391f",
"content_id": "5d2916367fe908c45d1e72b7fe09bfbb2477ff7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 675,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 21,
"path": "/goodlibrary/routes.py",
"repo_name": "Mezzomaniac/goodlibrary",
"src_encoding": "UTF-8",
"text": "from flask import redirect, render_template, url_for#, flash, g, request, session\n\nfrom goodlibrary import app\nfrom goodlibrary.forms import ListSearchForm\nfrom goodlibrary.processing import search\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n form = ListSearchForm()\n if form.validate_on_submit():\n return 'file upload test ok'\n file = form.file.data\n libraries = form.libraries.data\n results = search(file, libraries)\n return render_template('results.html', title='Search results')\n return render_template('home.html', title='Home', form=form)\n\[email protected]('/test')\ndef test():\n return redirect(url_for('home'))\n \n"
},
{
"alpha_fraction": 0.7905405163764954,
"alphanum_fraction": 0.7905405163764954,
"avg_line_length": 20.14285659790039,
"blob_id": "0247822cdcff0fffd17296bc9575633214bf691c",
"content_id": "fc2dfffc069c9c9a74c1282d74a6716696d4a790",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/goodlibrary/__init__.py",
"repo_name": "Mezzomaniac/goodlibrary",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom goodlibrary.config import Config\n\napp = Flask(__name__)\napp.config.from_object(Config)\n\nfrom goodlibrary import routes\n"
},
{
"alpha_fraction": 0.6785404086112976,
"alphanum_fraction": 0.6820156574249268,
"avg_line_length": 33.878787994384766,
"blob_id": "785ec4e142a6862919c317b4acb7911536aaa617",
"content_id": "507e5fc6b859bedd860c61e768a1ab84d9fdecc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1151,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 33,
"path": "/goodlibrary/processing.py",
"repo_name": "Mezzomaniac/goodlibrary",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\nimport csv\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nBook = namedtuple('Book', 'author title libraries')\n# TODO: Differentiate between print and electronic editions\n\ndef search(file, libraries):\n books = process_file(file)\n for book in data:\n for library in libraries:\n # TODO: Search asynchronously\n book[library] = library(book)\n return books\n\ndef process_file(file):\n with open(file) as file:\n data = csv.reader(file)\n #return (Book(col[ ], col[ ]) for row in data for col in row)\n\ndef perth(book):\n URL = 'https://perth.ent.sirsidynix.net.au/client/en_AU/internal/search/results?qu=&qu=TITLE%3D{}+&qu=AUTHOR%3D{}+'\n title = '+'.join(book.title.split())\n author = '+'.join(book.author.split())\n response = requests.get(URL.format(title, author))\n soup = BS(r.text, 'html5lib')\n search_results = (result.parent for result in soup.find(id='searchViewDISCOVERY_ALL').find_all(text=book.title))\n # TODO: Extract and return availability status\n\ndef stirling(book):\n URL = 'https://libraries.stirling.wa.gov.au/client/en_GB/stirling'\n"
}
] | 6 |
i-aki-y/dend-data-warehouse
|
https://github.com/i-aki-y/dend-data-warehouse
|
55b550a24934f21aa838a991041c20bc91b8378a
|
8ef9eb7dd517ef8cd88e06deaaa52afdab8ef707
|
04d7b75100d24520bbc6c88154d578e8054f267f
|
refs/heads/master
| 2022-12-11T04:47:39.158144 | 2019-05-12T11:03:47 | 2019-05-12T11:03:47 | 186,199,611 | 0 | 0 | null | 2019-05-12T01:50:26 | 2019-05-12T11:04:00 | 2022-12-07T23:53:41 |
Python
|
[
{
"alpha_fraction": 0.5902712941169739,
"alphanum_fraction": 0.5916744470596313,
"avg_line_length": 30.91044807434082,
"blob_id": "8b0ed79bd90198a50597752a77c8e05660649d6d",
"content_id": "0133f07fe4ab7efe43541425b14d3f9f6ca0b1c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2138,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 67,
"path": "/setupcluster.py",
"repo_name": "i-aki-y/dend-data-warehouse",
"src_encoding": "UTF-8",
"text": "import os\nimport pandas as pd\nimport configparser\n\nimport boto3\n\n\ndef main():\n \"\"\"Setup redshift cluster by using the configuration file\"\"\"\n config = configparser.ConfigParser()\n config.read_file(open('dwh.cfg'))\n\n # Some parameters are defined as Environment Variable\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n IAM_ROLE = os.environ.get(\"DWH_AWS_ROLE_ARN\")\n\n # load parameters from config file\n DWH_CLUSTER_TYPE = config.get(\"CLUSTER\", \"CLUSTER_TYPE\")\n DWH_NUM_NODES = config.get(\"CLUSTER\", \"NUM_NODES\")\n DWH_NODE_TYPE = config.get(\"CLUSTER\", \"NODE_TYPE\")\n DWH_CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n DWH_DB = config.get(\"CLUSTER\", \"DB_NAME\")\n DWH_DB_USER = config.get(\"CLUSTER\", \"DB_USER\")\n DWH_DB_PASSWORD = config.get(\"CLUSTER\", \"DB_PASSWORD\")\n DWH_PORT = config.get(\"CLUSTER\", \"DB_PORT\")\n\n settings = pd.DataFrame({\n \"Param\": [\n \"DWH_CLUSTER_TYPE\", \"DWH_NUM_NODES\", \"DWH_NODE_TYPE\",\n \"DWH_CLUSTER_IDENTIFIER\", \"DWH_DB\", \"DWH_DB_USER\",\n \"DWH_DB_PASSWORD\", \"DWH_PORT\", \"IAM_ROLE\"],\n \"Value\": [\n DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE,\n DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER,\n DWH_DB_PASSWORD, DWH_PORT, IAM_ROLE]\n })\n\n # show configs\n for _, row in settings.iterrows():\n print(row[\"Param\"], row[\"Value\"])\n\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n\n # Create cluster\n try:\n response = redshift.create_cluster(\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n IamRoles=[IAM_ROLE]\n )\n except Exception as e:\n print(e)\n\n # put resoponse\n print(response)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.672263503074646,
"alphanum_fraction": 0.6958346962928772,
"avg_line_length": 25.69827651977539,
"blob_id": "f284b19190b5018e155825424f2f3181b9d3525b",
"content_id": "eae0b97b6402f8a303f8690355f04e42f2ec8875",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6398,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 232,
"path": "/README.md",
"repo_name": "i-aki-y/dend-data-warehouse",
"src_encoding": "UTF-8",
"text": "# Data Warehouse\n\nThis is a udacity's data engineer nano-degree project.\n\n## About the project\n\nIn this project, we setup the ETL pipeline which move raw json data stored in S3 to cloud distributed database (Redshift).\nIn this process, the raw data moved from S3 to the staging table and then transform the data of the staging table to target tables which are optimized for business requirement. \n\nThe pipeline contains the following processes:\n1. Setup distributed database cluster in AWS.\n2. Define database tables which include staging and target tables.\n3. Load json data stored in S3 into staging tables.\n4. Preprocess data in the staging tables to fit the target tables where the schema is optimized for business analysis.\n5. Insert preprocessed data into target tables.\n\n\n\n\n## Contents\n\n`dwh.cfg`: definitions of parameters such as S3 url and cluster settings. Note that some parameters are passed by environment variables instead of the configuration file.\n\n`etl.py`: loads `song_data` and `log_data` data from S3 to staging tables, and transform staging data into target tables.\n\n`sql_queries.py`: defines all sql queries used this project.\n\n`README.md`: this document.\n\n`setupcluster.py`: setup redshift cluster automatically by using aws sdk (`boto3`).\n\n`create_tables.py`: drops and creates tables. You run this file to reset your tables before each time you run your ETL scripts.\n\n`sql_queries.py`: contains all your sql queries, and is imported into the last three files above.\n\n`README.md`: this document.\n\n`requirements.txt`: dependencies are defined, which is created by `pip freeze` in the project workspace.\n\n`test_count_record.py`: run queries that count records of each table.\n\n`shutdowncluster.py`: remove \n\n## Dataset\nThe raw data for this project are not included in this repository. \nThey are stored in S3. The urls are defined in `dwh.cfg`.\n\n```\nLOG_DATA='s3://udacity-dend/log-data'\nLOG_JSONPATH='s3://udacity-dend/log_json_path.json'\nSONG_DATA='s3://udacity-dend/song-data'\n```\n\nThe `song-dataset` is a subset of the [Million Song Dataset](https://labrosa.ee.columbia.edu/millionsong/).\nThe `log-dataset` is generated by this [event simulator](https://github.com/Interana/eventsim) based on the songs.\nThe `log_json_path.json` is a definition file of log dataset. The [JSONPath](https://docs.aws.amazon.com/ja_jp/redshift/latest/dg/copy-usage_notes-copy-from-json.html) specifies mappings from json's item to staging table columns.\n\nThe data are stored in the multiple files in the following layout.\n\n```\n$ tree ./data\n\ndata\n├── log_data\n│ └── 2018\n│ └── 11\n│ ├── 2018-11-01-events.json\n...\n│ └── 2018-11-30-events.json\n└── song_data\n └── A\n ├── A\n │ ├── A\n │ │ ├── TRAAAAW128F429D538.json\n...\n │ │ └── TRAAAVO128F93133D4.json\n │ ├── B\n │ │ ├── TRAABCL128F4286650.json\n...\n │ │ └── TRAABYW128F4244559.json\n │ └── C\n │ ├── TRAACCG128F92E8A55.json\n...\n │ └── TRAACZK128F4243829.json\n └── B\n ├── A\n │ ├── TRABACN128F425B784.json\n...\n │ └── TRABAZH128F930419A.json\n ├── B\n │ ├── TRABBAM128F429D223.json\n...\n │ └── TRABBZN12903CD9297.json\n └── C\n ├── TRABCAJ12903CDFCC2.json\n...\n └── TRABCYE128F934CE1D.json\n```\n\n## Requirement\n\nThis project assumes that you have created IAM user that has `AmazonRedshiftFullAccess` and `AmazonS3ReadOnlyAccess`.\nThe dependencies of python packages are defined in `requirements.txt`. In the project's directory, the following command will install the required dependencies. \n\n```\npip3 install pip install -r requirements.txt\n```\n\nTo avoid some sensitive information comes into the script directory, I defined environment variables.\nTo run the scripts correctly, you should define the following variable your system in advance.\n\n```\nDWH_AWS_KEY=\"AWS API KEY Access key ID of IAM user with full access of Redshift\"\nDWH_AWS_SECRET=\"Secret access key\"\nDWH_AWS_ROLE_ARN=\"Corresponding role's ARN\"\n\nexport DWH_AWS_KEY\nexport DWH_AWS_SECRET\nexport DWH_AWS_ROLE_ARN\n```\n\n## Usage\nPrepare AWS account and create IAM user. Set appropriate value in `dwh.cfg` and define environment variables.\n\nSetup cluster by the following command.\nYou should confirm that redshift cluster appropriately setting up at your aws console.\nYou also setup cluster manually from aws console.\n```\npython setupcluster.py \n```\n\nCreate staging and target tables.\n```\npython create_tables.py\n```\n\nRun `etl.py` to import S3 data to tables. It takes about 1 hour.\n\n```\npython etl.py\n```\n\nYou can check how many records are imported by using the following command\n```\npython test_count_record.py\n```\n\nShutdown created the cluster. You also shutdown your cluster manually from aws console.\n```\npython shutdowncluster.py \n```\n\n## About Table Schema\n\n### Table Definition\n\nIn this project, the following tables are defined.\n\n#### Fact Table\n\n* songplays - records in log data associated with song plays i.e. records with page NextSong\n - songplay_id\n - start_time\n - user_id\n - level\n - song_id\n - artist_id\n - session_id\n - location\n - user_agent\n\n#### Dimension Tables\n* users - users in the app\n - user_id\n - first_name\n - last_name\n - gender\n - level\n\n* songs - songs in the music database\n - song_id\n - title\n - artist_id\n - year\n - duration\n\n* artists - artists in the music database\n - artist_id\n - name\n - location\n - lattitude\n - longitude\n\n* time - timestamps of records in songplays broken down into specific units\n - start_time\n - hour\n - day\n - week\n - month\n - year\n - weekday\n\n#### Staging Tables\n\n* staging_events\n - artist\n - gender\n - lastName\n - firstName\n - level\n - sessionId\n - song\n - ts\n - location\n - userAgent\n - userId\n\n* staging_songs\n - artist_id\n - artist_latitude\n - artist_longitude\n - artist_name\n - song_id\n - title\n - duration\n - year\n\n### ER diagram\n\nThe table schema can be visualized by using Entity Relational Diagram. The following figure describes the table schema of the project.\n\n\n"
},
{
"alpha_fraction": 0.5746508836746216,
"alphanum_fraction": 0.5800215005874634,
"avg_line_length": 25.600000381469727,
"blob_id": "21b7b92777e1a9a9dfad3a0a5b00ec153926f802",
"content_id": "f398431038d557cc5317fc83f5a083b0462852be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 931,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 35,
"path": "/test_count_records.py",
"repo_name": "i-aki-y/dend-data-warehouse",
"src_encoding": "UTF-8",
"text": "import configparser\nimport psycopg2\nfrom create_tables import get_hostname\n\n\ndef main():\n \"\"\"Confirm that the data are correctly imported\"\"\"\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n hostname = get_hostname(config)\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(\n hostname,\n config.get(\"CLUSTER\", \"DB_NAME\"),\n config.get(\"CLUSTER\", \"DB_USER\"),\n config.get(\"CLUSTER\", \"DB_PASSWORD\"),\n config.get(\"CLUSTER\", \"DB_PORT\"))\n )\n\n cur = conn.cursor()\n count_query = \"select count(*) from {}\"\n tables = [\"staging_songs\", \"staging_events\",\n \"songplays\", \"users\", \"artists\", \"songs\", \"time\"]\n\n for table in tables:\n cur.execute(count_query.format(table))\n for r in cur:\n print(\"{0}:{1}\".format(table, r[0]))\n\n conn.commit()\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.625798225402832,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 28,
"blob_id": "934cf3226058d35fe21380f64becfd5e0edfcb1b",
"content_id": "f1d6fbe5bf4ebbb26912204d70881fdcc83c035e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 783,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 27,
"path": "/shutdowncluster.py",
"repo_name": "i-aki-y/dend-data-warehouse",
"src_encoding": "UTF-8",
"text": "import os\nimport configparser\nimport boto3\n\n\ndef main():\n \"\"\"Shutdown created redshift cluster\"\"\"\n config = configparser.ConfigParser()\n config.read_file(open('dwh.cfg'))\n\n # Some parameters are defined as Environment Variable\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n\n # load parameters from config file\n DWH_CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n\n redshift.delete_cluster(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n SkipFinalClusterSnapshot=True)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6264916658401489,
"alphanum_fraction": 0.6300715804100037,
"avg_line_length": 28.40350914001465,
"blob_id": "4ba8142dc24a033adfbbbbfda016818434cd91c8",
"content_id": "77a40e7a812934e1bc210214d95b5cf85b33bcad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1676,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 57,
"path": "/create_tables.py",
"repo_name": "i-aki-y/dend-data-warehouse",
"src_encoding": "UTF-8",
"text": "import os\nimport configparser\nimport boto3\nimport psycopg2\nfrom sql_queries import create_table_queries, drop_table_queries\n\n\ndef get_hostname(config):\n \"\"\"Get hostname of redshift by using cluster identifier\"\"\"\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n cluster_props = redshift.describe_clusters(\n ClusterIdentifier=CLUSTER_IDENTIFIER)['Clusters'][0]\n endpoint = cluster_props[\"Endpoint\"][\"Address\"]\n return endpoint\n\n\ndef drop_tables(cur, conn):\n \"\"\"Drop tables created by create_tables method if they exist\"\"\"\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef create_tables(cur, conn):\n \"\"\"Create stable and target tables if they don't exists\"\"\"\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef main():\n \"\"\"Creat tables in the redshift cluster\"\"\"\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n hostname = get_hostname(config)\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(\n hostname,\n config.get(\"CLUSTER\", \"DB_NAME\"),\n config.get(\"CLUSTER\", \"DB_USER\"),\n config.get(\"CLUSTER\", \"DB_PASSWORD\"),\n config.get(\"CLUSTER\", \"DB_PORT\"))\n )\n\n cur = conn.cursor()\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6911259889602661,
"alphanum_fraction": 0.7081149220466614,
"avg_line_length": 23.161571502685547,
"blob_id": "aa388f0275bd9bb42843cf382b76bb7f57f78b45",
"content_id": "6307e9ebdf4bed63a8178da47e04b1e327382830",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5533,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 229,
"path": "/sql_queries.py",
"repo_name": "i-aki-y/dend-data-warehouse",
"src_encoding": "UTF-8",
"text": "import os\nimport configparser\n\n\n# CONFIG\nconfig = configparser.ConfigParser()\nconfig.read('dwh.cfg')\n\n# DROP TABLES\n\nstaging_events_table_drop = \"drop table if exists staging_events\"\nstaging_songs_table_drop = \"drop table if exists staging_songs\"\nsongplay_table_drop = \"drop table if exists songplays\"\nuser_table_drop = \"drop table if exists users\"\nsong_table_drop = \"drop table if exists songs\"\nartist_table_drop = \"drop table if exists artists\"\ntime_table_drop = \"drop table if exists time\"\n\n# CREATE TABLES\n\nstaging_events_table_create = (\"\"\"\ncreate table if not exists staging_events (\n artist varchar(max),\n auth varchar(50),\n firstName varchar(15),\n gender varchar(1),\n itemInSession int,\n lastName varchar(15),\n length numeric(10, 5),\n level varchar(5),\n location varchar(max),\n method varchar(15),\n page varchar(100),\n registration varchar(25),\n sessionId int,\n song varchar(max),\n status int,\n ts bigint,\n userAgent varchar(max),\n userId int\n)\n\"\"\")\n\nstaging_songs_table_create = (\"\"\"\ncreate table if not exists staging_songs (\n artist_id varchar(18) not null,\n artist_latitude numeric(8, 5),\n artist_longitude numeric(8, 5),\n artist_location varchar(max),\n artist_name varchar(max) not null sortkey,\n song_id varchar(18) not null,\n title varchar(max) not null,\n duration numeric(10, 5),\n year int\n)\n\"\"\")\n\nsongplay_table_create = (\"\"\"\ncreate table if not exists songplays (\nsongplay_id int identity(0, 1) not null primary key,\nstart_time bigint not null,\nuser_id int not null,\nlevel varchar(10),\nsong_id varchar(18) not null,\nartist_id varchar(18) not null,\nsession_id int not null,\nlocation varchar(max),\nuser_agent varchar(max)\n);\n\"\"\")\n\nuser_table_create = (\"\"\"\ncreate table if not exists users (\nuser_id int not null primary key,\nfirst_name varchar(15),\nlast_name varchar(15),\ngender varchar(1),\nlevel varchar(5)\n)\ndiststyle all;\n\"\"\")\n\nsong_table_create = (\"\"\"\ncreate table if not exists songs (\nsong_id varchar(18) not null primary key sortkey ,\ntitle varchar(max) not null,\nartist_id varchar(18) not null,\nyear int,\nduration numeric(10,5) not null\n)\ndiststyle all;\n;\n\"\"\")\n\nartist_table_create = (\"\"\"\ncreate table if not exists artists (\nartist_id varchar(18) not null primary key sortkey,\nname varchar(max) not null,\nlocation varchar(max),\nlatitude numeric(8, 5),\nlongitude numeric(8, 5)\n)\ndiststyle all;\n\"\"\")\n\ntime_table_create = (\"\"\"\ncreate table if not exists time (\nstart_time bigint not null primary key sortkey,\nhour int not null,\nday int not null,\nweek int not null,\nmonth int not null,\nyear int not null,\nweekday int not null\n);\n\"\"\")\n\n# STAGING TABLES\n\n# load ROLE_ARN from Environment Variable\nIAM_ROLE = os.environ.get(\"DWH_AWS_ROLE_ARN\")\n\nstaging_events_copy = (\"\"\"\ncopy staging_songs\nfrom {0}\niam_role '{1}'\njson 'auto'\nCOMPUPDATE OFF STATUPDATE OFF\n\"\"\").format(\n config.get('S3', 'SONG_DATA'),\n IAM_ROLE)\n\nstaging_songs_copy = (\"\"\"\ncopy staging_events\nfrom {0}\niam_role '{1}'\njson {2}\n\"\"\").format(\n config.get('S3', 'LOG_DATA'),\n IAM_ROLE,\n config.get('S3', 'LOG_JSONPATH')\n)\n\n# FINAL TABLES\n\nsongplay_table_insert = (\"\"\"\nINSERT INTO songplays (start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)\nselect\n\tcast(se.ts as bigint) as start_time ,\n\tse.userId as user_id ,\n\tse.level ,\n\tsa.song_id ,\n\tsa.artist_id ,\n\tse.sessionid as session_id ,\n\tse.location ,\n\tse.useragent as user_agent\nfrom\n\tstaging_events as se\njoin (\n\tselect\n\t\ta.artist_id as artist_id,\n\t\ts.song_id as song_id,\n\t\ta.name as name,\n\t\ts.title as title,\n\t\ts.duration as duration\n\tfrom\n\t\tsongs as s\n\tjoin artists as a on\n\t\ta.artist_id = s.artist_id ) as sa on\n\tse.artist = sa.name\n\tand se.song = sa.title\n\tand se.length = sa.duration\n\"\"\")\n\nuser_table_insert = (\"\"\"\ninsert into users (user_id, first_name, last_name, gender, level)\nselect distinct\n userId\n , firstName\n , lastName\n , gender\n , level\nfrom staging_events\nwhere userId is not null\n\"\"\")\n\nsong_table_insert = (\"\"\"\ninsert into songs (song_id, title, artist_id, year, duration)\nselect\n song_id\n , title\n , artist_id\n , year\n , duration\nfrom staging_songs\nwhere song_id is not null\n\"\"\")\n\nartist_table_insert = (\"\"\"\ninsert into artists (artist_id, name, location, latitude, longitude)\nselect\n artist_id\n , artist_name\n , artist_location\n , artist_latitude\n , artist_longitude\nfrom staging_songs\nwhere artist_id is not null\n\"\"\")\n\ntime_table_insert = (\"\"\"\ninsert into time (start_time, hour, day, week, month, year, weekday)\nselect\n ts\n , EXTRACT(hour from timestamp 'epoch' + (ts/1000) * interval '1 second')\n , EXTRACT(day from timestamp 'epoch' + (ts/1000) * interval '1 second')\n , EXTRACT(week from timestamp 'epoch' + (ts/1000) * interval '1 second')\n , EXTRACT(month from timestamp 'epoch' + (ts/1000) * interval '1 second')\n , EXTRACT(year from timestamp 'epoch' + (ts/1000) * interval '1 second')\n , EXTRACT(weekday from timestamp 'epoch' + (ts/1000) * interval '1 second')\nfrom (select cast(ts as bigint) as ts from staging_events)\n\"\"\")\n\n# QUERY LISTS\n\ncreate_table_queries = [staging_events_table_create, staging_songs_table_create, songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]\ndrop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]\ncopy_table_queries = [staging_events_copy, staging_songs_copy]\ninsert_table_queries = [user_table_insert, song_table_insert, artist_table_insert, time_table_insert, songplay_table_insert]\n"
}
] | 6 |
chiphuyen/tvm
|
https://github.com/chiphuyen/tvm
|
feca87fe32be72eaff2a42a80544f30d5b5ef410
|
d0791d3db971a111826d96201bd1e4c9c0d531da
|
ae29067d6fc4016d355fb2f6d938a6706cfd057a
|
refs/heads/main
| 2023-06-01T11:22:17.543016 | 2021-06-23T15:15:09 | 2021-06-23T15:15:09 | 379,728,501 | 2 | 0 |
Apache-2.0
| 2021-06-23T21:02:58 | 2021-06-23T16:35:21 | 2021-06-23T20:06:37 | null |
[
{
"alpha_fraction": 0.6945736408233643,
"alphanum_fraction": 0.7003875970840454,
"avg_line_length": 32.07692337036133,
"blob_id": "ffe461f3acd14cc490d0db58024752a0ab63b368",
"content_id": "3ea15438fe8fa15032d440ece7f668accfef5e0b",
"detected_licenses": [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2580,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 78,
"path": "/src/relay/backend/utils.cc",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n/*!\n * \\file relay/backend/util.cc\n * \\brief Relay backend utilities.\n */\n\n#include \"utils.h\"\n\nnamespace tvm {\nnamespace relay {\nnamespace backend {\n\nTVM_REGISTER_NODE_TYPE(StorageInfoNode);\n\nStorageInfo::StorageInfo(std::vector<int64_t> storage_ids, std::vector<DLDeviceType> device_types,\n std::vector<int64_t> storage_sizes_in_bytes) {\n auto n = make_object<StorageInfoNode>();\n n->storage_ids = std::move(storage_ids);\n n->device_types = std::move(device_types);\n n->storage_sizes_in_bytes = std::move(storage_sizes_in_bytes);\n data_ = std::move(n);\n}\n\nTVM_REGISTER_NODE_TYPE(StaticMemoryPlanNode);\n\nStaticMemoryPlan::StaticMemoryPlan(Map<Expr, StorageInfo> expr_to_storage_info) {\n auto n = make_object<StaticMemoryPlanNode>();\n n->expr_to_storage_info = std::move(expr_to_storage_info);\n data_ = std::move(n);\n}\n\nint64_t CalculateRelayExprSizeBytes(const Type& expr_type) {\n if (expr_type->IsInstance<TupleTypeNode>()) {\n auto tuple_type = Downcast<TupleType>(expr_type);\n int64_t size = 0;\n for (const auto& field : tuple_type->fields) {\n size += CalculateRelayExprSizeBytes(field);\n }\n return size;\n }\n auto tensor_type = expr_type.as<TensorTypeNode>();\n auto shape = tensor_type->shape;\n int num_of_elements = 1;\n for (const auto& dim_index_expr : shape) {\n if (dim_index_expr->IsInstance<IntImmNode>()) {\n num_of_elements *= dim_index_expr.as<IntImmNode>()->value;\n } else {\n // If shape is dynamic, we cannot calculate workspace in compile time.\n num_of_elements = 0;\n }\n }\n auto element_size = tensor_type->dtype.bytes();\n return element_size * num_of_elements;\n}\n\nTVM_REGISTER_NODE_TYPE(FunctionInfoNode);\n\n} // namespace backend\n} // namespace relay\n} // namespace tvm\n"
},
{
"alpha_fraction": 0.627378523349762,
"alphanum_fraction": 0.6329207420349121,
"avg_line_length": 31.608434677124023,
"blob_id": "94b132cfad888dd2997ded667275e43adbc8e64a",
"content_id": "5f4c53772eecb35a7a74eccec7fe172f02ae12f0",
"detected_licenses": [
"Zlib",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5413,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 166,
"path": "/python/tvm/relay/transform/fake_quantization_to_integer.py",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Relay functions for rewriting fake quantized ops.\"\"\"\nimport tvm\nfrom tvm import relay\nfrom ..op import register_fake_quantization_to_integer\n\n\ndef fold_constant(expr):\n mod = tvm.IRModule.from_expr(expr)\n mod = relay.transform.FoldConstant()(mod)\n return mod[\"main\"].body\n\n\n@register_fake_quantization_to_integer(\"qnn.dequantize\")\ndef dequantize(expr, type_map):\n \"\"\"Remove dequantize op\"\"\"\n out = expr.args[0]\n t = type_map[expr]\n return [out, t.scale, t.zero_point, t.dtype]\n\n\n@register_fake_quantization_to_integer(\"qnn.quantize\")\ndef quantize(expr, type_map):\n \"\"\"Turn a quantize op into requantize or remove it\"\"\"\n out = expr.args[0]\n t = type_map[out]\n in_scale = fold_constant(t.scale)\n in_zero_point = fold_constant(t.zero_point)\n if not (\n tvm.ir.structural_equal(in_scale, expr.args[1])\n and tvm.ir.structural_equal(in_zero_point, expr.args[2])\n and tvm.ir.structural_equal(t.dtype, expr.attrs.out_dtype)\n ):\n out = relay.qnn.op.requantize(\n out,\n in_scale,\n in_zero_point,\n expr.args[1],\n expr.args[2],\n out_dtype=expr.attrs.out_dtype,\n )\n return [out, expr.args[1], expr.args[2], expr.attrs.out_dtype]\n\n\ndef register_unary_identity(op_name, op):\n def identity(expr, type_map):\n assert len(expr.args) == 1\n arg = expr.args[0]\n t = type_map[arg]\n out = op(arg, **expr.attrs)\n return [out, t.scale, t.zero_point, t.dtype]\n\n return register_fake_quantization_to_integer(op_name, identity)\n\n\nregister_unary_identity(\"reshape\", relay.op.reshape)\nregister_unary_identity(\"transpose\", relay.op.transpose)\nregister_unary_identity(\"nn.max_pool2d\", relay.op.nn.max_pool2d)\n\n\n@register_fake_quantization_to_integer(\"nn.avg_pool2d\")\ndef avgpool2d(expr, type_map):\n \"\"\"Rewrite a avgpool op\"\"\"\n arg = expr.args[0]\n t = type_map[arg]\n arg = relay.op.cast(arg, \"int32\")\n out = relay.op.nn.avg_pool2d(arg, **expr.attrs)\n out = relay.op.cast(out, t.dtype)\n return [out, t.scale, t.zero_point, t.dtype]\n\n\n@register_fake_quantization_to_integer(\"nn.bias_add\")\ndef bias_add(expr, type_map):\n \"\"\"Rewrite a bias_add op\"\"\"\n x, b = expr.args\n x_t = type_map[x]\n b_t = type_map[b]\n in_scale = fold_constant(x_t.scale)\n in_zero_point = fold_constant(x_t.zero_point)\n if not tvm.ir.structural_equal(x_t, b_t):\n b = relay.qnn.op.requantize(\n b,\n b_t.scale,\n b_t.zero_point,\n in_scale,\n in_zero_point,\n out_dtype=xt.dtype,\n )\n out = relay.op.nn.bias_add(x, b, **expr.attrs)\n return [out, x_t.scale, x_t.zero_point, x_t.dtype]\n\n\n@register_fake_quantization_to_integer(\"nn.conv2d\")\ndef conv2d(expr, type_map):\n \"\"\"Rewrite a conv2d op\"\"\"\n attrs = {**expr.attrs}\n attrs.pop(\"out_dtype\")\n x, weight = expr.args\n x_t = type_map[x]\n w_t = type_map[weight]\n conv_scale = fold_constant(x_t.scale * w_t.scale)\n conv_zp = relay.const(0)\n out = relay.qnn.op.conv2d(\n x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs\n )\n return [out, conv_scale, conv_zp, out.attrs.out_dtype]\n\n\n@register_fake_quantization_to_integer(\"concatenate\")\ndef concat(expr, type_map):\n \"\"\"Rewrite a concat op\"\"\"\n scales = []\n zps = []\n for arg in expr.args[0].fields:\n t = type_map[arg]\n scales.append(t.scale)\n zps.append(t.zero_point)\n\n out_type = type_map[expr]\n\n out = relay.qnn.op.concatenate(\n expr.args[0],\n relay.Tuple(scales),\n relay.Tuple(zps),\n out_type.scale,\n out_type.zero_point,\n **expr.attrs,\n )\n return [out, out_type.scale, out_type.zero_point, out_type.dtype]\n\n\n@register_fake_quantization_to_integer(\"clip\")\ndef clip(expr, type_map):\n \"\"\"Rewrite a clip op\"\"\"\n arg = expr.args[0]\n t = type_map[arg]\n amin = expr.attrs.a_min\n amax = expr.attrs.a_max\n scale = fold_constant(t.scale)\n z_p = fold_constant(t.zero_point)\n if isinstance(scale, relay.expr.Constant) and isinstance(z_p, relay.expr.Constant):\n scale = scale.data.numpy().item()\n z_p = z_p.data.numpy().item()\n new_min = int(amin / scale + z_p)\n new_max = int(amax / scale + z_p)\n out = relay.op.clip(arg, new_min, new_max)\n else:\n amin = relay.op.round(relay.op.const(amin) / scale + z_p)\n amax = relay.op.round(relay.op.const(amax) / scale + z_p)\n out = relay.op.minimum(relay.op.maximum(arg, amin), amax)\n return [out, t.scale, t.zero_point, t.dtype]\n"
},
{
"alpha_fraction": 0.7139163017272949,
"alphanum_fraction": 0.7150476574897766,
"avg_line_length": 40.52349090576172,
"blob_id": "0503f1c82c537d97ff7568e94f557c8e67b28bd1",
"content_id": "dd7fee37e2d134884563dc03eff2f4fa2221c201",
"detected_licenses": [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6187,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 149,
"path": "/src/tir/schedule/analysis.h",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n#ifndef TVM_TIR_SCHEDULE_ANALYSIS_H_\n#define TVM_TIR_SCHEDULE_ANALYSIS_H_\n\n#include <tvm/tir/schedule/state.h>\n\nnamespace tvm {\nnamespace tir {\n\n/******** Verification ********/\n/*!\n * \\brief Verifies the sref tree state is consistent with the IR\n * \\param self The schedule state containing the sref to be verified\n * \\throw An exception will be thrown if the sref tree is not valid\n */\nvoid VerifySRefTree(const ScheduleState& self);\n/*!\n * \\brief Verifies the cached flags in the schedule state, including:\n * - affine_binding\n * - region_cover\n * - stage_pipeline\n * \\param self The schedule state to be verified\n * \\throw An exception will be thrown if some srefs are not valid\n */\nvoid VerifyCachedFlags(const ScheduleState& self);\n\n/******** Scope ********/\n/*!\n * \\brief Gets the sref to the scope root block, exclusive\n * \\param sref The block or loop sref to be retrieved\n * \\return The sref to the scope root block. NullOpt if `sref` is the root block of the IR\n */\nOptional<StmtSRef> GetScopeRoot(const StmtSRef& sref);\n\n/*!\n * \\brief Checks if scope the specified sref is in is a stage-pipeline and return it\n * \\param prim The name of the schedule primitive\n * \\param self The schedule state\n * \\param sref The sref whose scope is to be checked\n * \\throw ScheduleError if the sref has been the root of the AST (so it has no scope root), or its\n * scope root is not a stage pipeline\n * \\return The block sref to the scope root\n */\nStmtSRef GetScopeRootAndCheckStagePipeline(const ScheduleState& self, const StmtSRef& sref);\n\n/*!\n * \\brief Checks whether the block is a complete block under the scope\n * \\param self The schedule state\n * \\param block_sref The block to be checked\n * \\param scope_root The sref to the root block of the scope that `block_sref` is in\n * \\return A boolean indicating if the block is a complete block\n * \\note Definition of a complete block:\n * 1) All block vars are data parallel\n * 2) Dominant: the block is the only writer of its output,\n * dominating the reader of its output buffers\n * 3) No overlap between the buffers the block reads and writes\n */\nbool IsCompleteBlock(const ScheduleState& self, const StmtSRef& block_sref,\n const StmtSRef& scope_root);\n\n/*!\n * \\brief Checks if the block is a complete block\n * \\param self The schedule state\n * \\param block_sref The sref to the block whose completeness is to be checked\n * \\param scope_root_sref The scope root of the block\n * \\throw ScheduleError If the block is not a complete block\n */\nvoid CheckCompleteBlock(const ScheduleState& self, const StmtSRef& block_sref,\n const StmtSRef& scope_root_sref);\n\n/******** Binding ********/\n/*!\n * \\brief Verifies if the block binding in a specific BlockRealize is an affine binding.\n * The binding can be represented as an injective affine map from the loop iterators.\n * \\param realize The BlockRealize to be analyzed\n * \\param loop_var_ranges The ranges of the loop variables\n * \\param analyzer The analyzer\n * \\return A boolean flag indicating if the binding is affine\n */\nbool IsAffineBinding(const BlockRealize& realize, const Map<Var, Range>& loop_var_ranges,\n arith::Analyzer* analyzer);\n\n/*!\n * \\brief Extracts the ranges of loop variables in a path of the sref tree\n * \\param low_inclusive The lowest node in the path\n * \\param high_exclusive The highest node in the path, defaults to the scope root if not specified\n * \\param extra_relax_scope If the scope is not global, the method will look beyond the limit and\n * retrieve extra domains. For example,\n * - if the storage scope is warp, it will look upwards for threadIdx.x\n * - if the storage scope is shared, it will look for threadIdx.x/y/z\n * \\return The loop domain\n */\nMap<Var, Range> LoopDomainOfSRefTreePath(const StmtSRef& low_inclusive,\n const Optional<StmtSRef>& high_exclusive = NullOpt,\n const runtime::StorageScope& extra_relax_scope = //\n runtime::StorageScope{runtime::StorageRank::kGlobal, \"\"});\n\n/*!\n * \\brief Returns the block var binding\n * \\param realize The BlockRealize to be analyzed\n * \\return The block var binding\n */\nMap<Var, PrimExpr> GetBindings(const BlockRealize& realize);\n\n/******** Block-loop relation ********/\n/*!\n * \\brief Retrieves blocks in a specific function with its name\n * \\param self The schedule state\n * \\param name The name of the blocks to be retrieved\n * \\param func_name The name of the function\n * \\return A list of blocks with the specific name\n */\nArray<StmtSRef> GetBlocks(const ScheduleState& self, const String& name, const String& func_name);\n/*!\n * \\brief Gets the parent loops of the block in its scope, from outer to inner\n * \\param self The schedule state\n * \\param block_sref The query block\n * \\return A list of loops above the given block in its scope, from outer to inner\n */\nArray<StmtSRef> GetLoops(const StmtSRef& block_sref);\n/*!\n * \\brief Gets the leaf blocks of a scope where a specific block/loop is in\n * \\param self The schedule state\n * \\param parent_sref The StmtSRef that points to the parent block/loop\n * \\return A list of leaf blocks\n */\nArray<StmtSRef> GetChildBlocks(const ScheduleState& self, const StmtSRef& parent_sref);\n\n} // namespace tir\n} // namespace tvm\n\n#endif // TVM_TIR_SCHEDULE_ANALYSIS_H_\n"
},
{
"alpha_fraction": 0.6710849404335022,
"alphanum_fraction": 0.6740304231643677,
"avg_line_length": 27.29166603088379,
"blob_id": "4b210e860f1fce4f84e6ee29cf3e90aed8ca4553",
"content_id": "2cf416c471ec4d7368778ec68b45647ba2dd241a",
"detected_licenses": [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2037,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 72,
"path": "/src/support/array.h",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n#ifndef TVM_SUPPORT_ARRAY_H_\n#define TVM_SUPPORT_ARRAY_H_\n#include <tvm/runtime/container/array.h>\n\n#include <vector>\n\nnamespace tvm {\nnamespace support {\n\n/*!\n * \\brief Checks if two arrays contain the same objects\n * \\tparam T The type of objects in the array\n * \\param a The first array\n * \\param b The second array\n * \\return A boolean indicating if they are the same\n */\ntemplate <class T>\ninline bool ArrayWithSameContent(const Array<T>& a, const Array<T>& b) {\n if (a.size() != b.size()) {\n return false;\n }\n int n = a.size();\n for (int i = 0; i < n; ++i) {\n if (!a[i].same_as(b[i])) {\n return false;\n }\n }\n return true;\n}\n\n/*!\n * \\brief Checks if two arrays contain the same objects\n * \\tparam T The type of objects in the array\n * \\param a The first array\n * \\param b The second array\n * \\return A boolean indicating if they are the same\n */\ntemplate <class T>\ninline bool ArrayWithSameContent(const std::vector<T*>& a, const std::vector<T*>& b) {\n if (a.size() != b.size()) {\n return false;\n }\n int n = a.size();\n for (int i = 0; i < n; ++i) {\n if (a[i] != b[i]) {\n return false;\n }\n }\n return true;\n}\n\n} // namespace support\n} // namespace tvm\n#endif // TVM_SUPPORT_ARRAY_H_\n"
},
{
"alpha_fraction": 0.5974314212799072,
"alphanum_fraction": 0.6391463875770569,
"avg_line_length": 36.11827850341797,
"blob_id": "e8d613d1c9e65b43017d4c6514b38197817b4a14",
"content_id": "3271379cf3ef19a5c24bc20b3730491cbf92eeb5",
"detected_licenses": [
"Zlib",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10356,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 279,
"path": "/tests/python/relay/test_pass_fake_quantization_to_integer.py",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=unused-wildcard-import\nimport numpy as np\nimport pytest\n\nimport tvm\nfrom tvm import relay\n\n\ndef test_fake_quantize_conv():\n for out_dtype in [\"int8\", \"uint8\"]:\n x = relay.var(\"x\", shape=[1, 3, 224, 224], dtype=\"int8\")\n w = relay.var(\"w\", shape=[16, 3, 5, 5], dtype=\"int8\")\n one = relay.const(1.0)\n zero = relay.const(0)\n\n op = relay.op.nn.conv2d(\n relay.qnn.op.dequantize(x, relay.const(2.0), zero),\n relay.qnn.op.dequantize(w, relay.const(0.5), zero),\n )\n op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype=\"int8\")\n w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype=\"int8\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np, w_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np, w_np).asnumpy()\n\n assert np.array_equal(result, result2)\n\n\ndef test_fake_transpose_quantize_conv():\n x = relay.var(\"x\", shape=[1, 224, 224, 3], dtype=\"int8\")\n w = relay.var(\"w\", shape=[16, 3, 5, 5], dtype=\"int8\")\n one = relay.const(1.0)\n zero = relay.const(0)\n\n x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)\n x = relay.transpose(x, [0, 3, 1, 2])\n op = relay.op.nn.conv2d(x, relay.qnn.op.dequantize(w, relay.const(0.5), zero))\n op = relay.qnn.op.quantize(op, one, zero)\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype=\"int8\")\n w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype=\"int8\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np, w_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np, w_np).asnumpy()\n\n assert np.array_equal(result, result2)\n\n\ndef test_fake_transpose_quantize_conv_bias_add():\n x = relay.var(\"x\", shape=[1, 224, 224, 3], dtype=\"int8\")\n w = relay.var(\"w\", shape=[16, 3, 5, 5], dtype=\"int8\")\n bias = relay.var(\"bias\", shape=[16], dtype=\"int32\")\n one = relay.const(1.0)\n zero = relay.const(0)\n\n x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)\n x = relay.transpose(x, [0, 3, 1, 2])\n op = relay.op.nn.conv2d(x, relay.qnn.op.dequantize(w, relay.const(0.5), zero))\n op = relay.op.nn.bias_add(op, relay.qnn.op.dequantize(bias, one, zero))\n op = relay.qnn.op.quantize(op, one, zero)\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype=\"int8\")\n w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype=\"int8\")\n bias_np = np.random.randint(-32768, 32767, size=[16], dtype=\"int32\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np, w_np, bias_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np, w_np, bias_np).asnumpy()\n\n assert np.array_equal(result, result2)\n\n\ndef test_fake_quantize_maxpool():\n x = relay.var(\"x\", shape=[1, 3, 224, 224], dtype=\"int8\")\n\n zero = relay.const(0)\n x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)\n op = relay.op.nn.max_pool2d(x, [3, 3])\n op = relay.qnn.op.quantize(op, relay.const(2.0), zero)\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype=\"int8\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np).asnumpy()\n\n assert np.array_equal(result, result2)\n\n\ndef test_fake_quantize_avgpool():\n x = relay.var(\"x\", shape=[1, 3, 224, 224], dtype=\"int8\")\n\n zero = relay.const(0)\n x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)\n op = relay.op.nn.avg_pool2d(x, [3, 3])\n op = relay.qnn.op.quantize(op, relay.const(2.0), zero)\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype=\"int8\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np).asnumpy()\n\n assert np.all(np.abs(result - result2) <= 1)\n\n\ndef test_fake_quantize_reshape():\n x = relay.var(\"x\", shape=[1, 3, 224, 224], dtype=\"int8\")\n\n zero = relay.const(0)\n x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)\n op = relay.op.reshape(x, [1, 3, -1])\n op = relay.qnn.op.quantize(op, relay.const(2.0), zero)\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype=\"int8\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np).asnumpy()\n\n assert np.array_equal(result, result2)\n\n\ndef test_fake_quantize_transpose_reshape():\n x = relay.var(\"x\", shape=[1, 3, 224, 224], dtype=\"int8\")\n\n zero = relay.const(0)\n x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)\n op = relay.op.transpose(x, [1, 0, 2, 3])\n op = relay.op.reshape(op, [3, -1])\n op = relay.qnn.op.quantize(op, relay.const(2.0), zero)\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype=\"int8\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np).asnumpy()\n\n assert np.array_equal(result, result2)\n\n\ndef test_fake_quantize_concat():\n zero = relay.const(0)\n inputs = []\n for i in range(4):\n inputs.append(\n relay.qnn.op.dequantize(\n relay.var(\"x%d\" % i, shape=[1, 4], dtype=\"int8\"), relay.const(i + 0.5), zero\n )\n )\n concat = relay.op.concatenate(inputs, axis=1)\n out = relay.qnn.op.quantize(concat, relay.const(3.5), zero)\n\n mod = tvm.IRModule.from_expr(out)\n mod = tvm.relay.transform.InferType()(mod)\n\n inputs_np = []\n for i in range(4):\n inputs_np.append(np.random.randint(-128, 127, size=[1, 4], dtype=\"int8\"))\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(*inputs_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(*inputs_np).asnumpy()\n\n assert np.array_equal(result, result2)\n\n\ndef test_fake_quantize_clip():\n x = relay.var(\"x\", shape=[1, 3, 224, 224], dtype=\"uint8\")\n\n x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))\n op = relay.op.clip(x, 0, 6)\n op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype=\"uint8\")\n\n mod = tvm.IRModule.from_expr(op)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype=\"uint8\")\n\n mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)\n assert not tvm.ir.structural_equal(mod, mod2)\n mod2 = tvm.relay.transform.FoldConstant()(mod2)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=tvm.cpu(), target=\"llvm\")\n result = ex.evaluate()(x_np).asnumpy()\n\n ex = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\")\n result2 = ex.evaluate()(x_np).asnumpy()\n\n assert np.array_equal(result, result2)\n"
},
{
"alpha_fraction": 0.6652118563652039,
"alphanum_fraction": 0.6669394373893738,
"avg_line_length": 35.41721725463867,
"blob_id": "2fb2286091e510324a67044f1a84599c6010a946",
"content_id": "d58dece3c644718298483c75d6d19688f4675f08",
"detected_licenses": [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 10998,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 302,
"path": "/src/tir/schedule/analysis/analysis.cc",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n#include \"../utils.h\"\n\nnamespace tvm {\nnamespace tir {\n\n/******** Scope ********/\n\nOptional<StmtSRef> GetScopeRoot(const StmtSRef& sref) {\n for (const StmtSRefNode* p = sref->parent; p != nullptr; p = p->parent) {\n if (p->stmt->IsInstance<BlockNode>()) {\n return GetRef<StmtSRef>(p);\n }\n }\n return NullOpt;\n}\n\nStmtSRef GetScopeRootAndCheckStagePipeline(const ScheduleState& self, const StmtSRef& sref) {\n class RootBlockError : public ScheduleError {\n public:\n explicit RootBlockError(IRModule mod) : mod_(mod) {}\n IRModule mod() const final { return mod_; }\n String FastErrorString() const final {\n return \"ScheduleError: The primitive does not operate on the root block\";\n }\n String DetailRenderTemplate() const final {\n return \"The primitive does not operate on the root block\";\n }\n Array<ObjectRef> LocationsOfInterest() const final { return {}; }\n IRModule mod_;\n };\n\n class NotStagePipelineError : public ScheduleError {\n public:\n explicit NotStagePipelineError(IRModule mod, Block block) : mod_(mod), block_(block) {}\n IRModule mod() const final { return mod_; }\n String FastErrorString() const final {\n return \"ScheduleError: The scope root is not a stage pipeline\";\n }\n String DetailRenderTemplate() const final {\n return R\"(The scope {0} is not a stage pipeline.\nDefinition of a scope that is a stage pipeline:\n- The region cover property holds for every of its child blocks\n- No write-after-read dependency or opaque dependency,\n- only read-after-write and write-after-write are allowed\n- All the statements in the scope are schedulable statements, i.e. Block and For\n)\";\n }\n Array<ObjectRef> LocationsOfInterest() const final { return {block_}; }\n IRModule mod_;\n Block block_;\n };\n\n StmtSRef scope_root_sref{nullptr};\n if (Optional<StmtSRef> opt_scope_root_sref = GetScopeRoot(sref)) {\n scope_root_sref = opt_scope_root_sref.value();\n } else {\n throw RootBlockError(self->mod);\n }\n bool stage_pipeline = self->GetBlockInfo(scope_root_sref).scope->stage_pipeline;\n if (stage_pipeline == false) {\n const BlockNode* block = TVM_SREF_TO_BLOCK(block, scope_root_sref);\n throw NotStagePipelineError(self->mod, GetRef<Block>(block));\n }\n return scope_root_sref;\n}\n\n/*!\n * \\brief Check the dominant property of a block:\n * the block is the only writer of its output, dominating the reader of its output buffers\n * \\param self The schedule state\n * \\param block_sref The block whose dominant property is to be checked\n * \\return A boolean indicating if the block is a dominant block\n */\nbool IsDominantBlock(const BlockScope& self, const StmtSRef& block_sref) {\n // Check whether the input block is the only writer of its outputs\n const BlockNode* block = TVM_SREF_TO_BLOCK(block, block_sref);\n const std::unordered_map<Buffer, Array<StmtSRef>, ObjectPtrHash, ObjectPtrEqual>& buffer_writers =\n self->buffer_writers;\n for (const BufferRegion& write_region : block->writes) {\n ICHECK(buffer_writers.count(write_region->buffer))\n << \"InternalError: buffer \\\"\" << write_region->buffer->name\n << \"\\\" does not exist in the current scope, when querying block:\\n\"\n << GetRef<Block>(block);\n if (buffer_writers.at(write_region->buffer).size() != 1) {\n return false;\n }\n }\n return true;\n}\n\nbool IsCompleteBlock(const ScheduleState& self, const StmtSRef& block_sref,\n const StmtSRef& scope_root) {\n BlockScope scope = self->GetBlockScope(scope_root);\n // Cond 1. All block vars are data parallel\n const auto* block = TVM_SREF_TO_BLOCK(block, block_sref);\n for (const IterVar& iter_var : block->iter_vars) {\n if (iter_var->iter_type != kDataPar) {\n return false;\n }\n }\n // Cond 2. Dominant: the block is the only writer of its output,\n // dominating the reader of its output buffers\n if (!IsDominantBlock(scope, block_sref)) {\n return false;\n }\n // Cond 3. No overlap between the buffers the block reads and writes\n std::unordered_set<const BufferNode*> written_buffers;\n written_buffers.reserve(block->writes.size());\n for (const BufferRegion& write : block->writes) {\n written_buffers.insert(write->buffer.get());\n }\n for (const BufferRegion& read : block->reads) {\n if (written_buffers.count(read->buffer.get())) {\n return false;\n }\n }\n return true;\n}\n\nvoid CheckCompleteBlock(const ScheduleState& self, const StmtSRef& block_sref,\n const StmtSRef& scope_root_sref) {\n class IncompleteBlockError : public ScheduleError {\n public:\n explicit IncompleteBlockError(IRModule mod, Block block) : mod_(mod), block_(block) {}\n String FastErrorString() const final { return \"ScheduleError: Incomplete block\"; }\n String DetailRenderTemplate() const final {\n return R\"(The block {0} is not a complete block.\nDefinition of a complete block:\n1) All block vars are data parallel\n2) Dominant: the block is the only writer of its output, dominating the reader of its output buffers\n3) No overlap between the buffers the block reads and writes)\";\n }\n IRModule mod() const final { return mod_; }\n Array<ObjectRef> LocationsOfInterest() const final { return {block_}; }\n IRModule mod_;\n Block block_;\n };\n\n bool result = IsCompleteBlock(self, block_sref, scope_root_sref);\n if (result == false) {\n const BlockNode* block = TVM_SREF_TO_BLOCK(block, scope_root_sref);\n throw IncompleteBlockError(self->mod, GetRef<Block>(block));\n }\n}\n\n/******** Binding ********/\n\nbool IsAffineBinding(const BlockRealize& realize, const Map<Var, Range>& loop_var_ranges,\n arith::Analyzer* analyzer) {\n if (loop_var_ranges.empty()) {\n return true;\n }\n Array<arith::IterSumExpr> results = arith::DetectIterMap(\n /*indices=*/realize->iter_values,\n /*input_iters=*/loop_var_ranges,\n /*predicate=*/realize->predicate,\n /*require_bijective=*/false,\n /*analyzer=*/analyzer);\n if (results.empty()) {\n return false;\n }\n for (const arith::IterSumExpr& sum_expr : results) {\n const Array<arith::IterSplitExpr>& args = sum_expr->args;\n if (!args.empty() && !is_one(args[0]->scale)) {\n return false;\n }\n }\n return true;\n}\n\nMap<Var, Range> LoopDomainOfSRefTreePath(const StmtSRef& low_inclusive,\n const Optional<StmtSRef>& high_exclusive,\n const runtime::StorageScope& extra_relax_scope) {\n Map<Var, Range> result;\n const StmtSRefNode* p = low_inclusive.get();\n const StmtSRefNode* limit = static_cast<const StmtSRefNode*>(high_exclusive.get());\n for (; p != limit; p = p->parent) {\n const ForNode* loop = p->StmtAs<ForNode>();\n if (loop == nullptr) {\n break;\n }\n result.Set(loop->loop_var, Range::FromMinExtent(loop->min, loop->extent));\n }\n if (extra_relax_scope.rank != runtime::StorageRank::kGlobal) {\n for (; p; p = p->parent) {\n if (const ForNode* loop = p->StmtAs<ForNode>()) {\n if (loop->kind == ForKind::kThreadBinding) {\n const String& thread_tag = loop->thread_binding.value()->thread_tag;\n if (CanRelaxStorageUndereThread(extra_relax_scope,\n runtime::ThreadScope::Create(thread_tag))) {\n result.Set(loop->loop_var, Range::FromMinExtent(loop->min, loop->extent));\n }\n }\n }\n }\n }\n return result;\n}\n\nMap<Var, PrimExpr> GetBindings(const BlockRealize& realize) {\n const BlockNode* block = realize->block.get();\n const Array<IterVar>& all_lhs = block->iter_vars;\n const Array<PrimExpr>& all_rhs = realize->iter_values;\n ICHECK_EQ(all_lhs.size(), all_rhs.size());\n Map<Var, PrimExpr> result;\n for (int i = 0, n = all_lhs.size(); i < n; ++i) {\n const IterVar& lhs = all_lhs[i];\n const PrimExpr& rhs = all_rhs[i];\n result.Set(lhs->var, rhs);\n }\n return result;\n}\n\n/******** Block-loop relation ********/\n\nArray<StmtSRef> GetBlocks(const ScheduleState& self, const String& name, const String& func_name) {\n struct Finder : public StmtVisitor {\n explicit Finder(const ScheduleState& self, const String& name) : self_(self), name_(name) {}\n\n void VisitStmt_(const BlockNode* block) override {\n if (block->name_hint == name_) {\n auto it = self_->stmt2ref.find(block);\n ICHECK(it != self_->stmt2ref.end());\n results_.push_back(it->second);\n }\n StmtVisitor::VisitStmt_(block);\n }\n\n const ScheduleState& self_;\n const String& name_;\n Array<StmtSRef> results_;\n };\n\n BaseFunc func = self->mod->Lookup(func_name);\n const auto* prim_func = TVM_TYPE_AS(prim_func, func, PrimFuncNode);\n Finder finder(self, name);\n finder(prim_func->body);\n return std::move(finder.results_);\n}\n\nArray<StmtSRef> GetLoops(const StmtSRef& block_sref) {\n std::vector<StmtSRef> result;\n for (StmtSRefNode* parent = block_sref->parent; parent && parent->stmt->IsInstance<ForNode>();\n parent = parent->parent) {\n result.push_back(GetRef<StmtSRef>(parent));\n }\n return {result.rbegin(), result.rend()};\n}\n\nArray<StmtSRef> GetChildBlocks(const ScheduleState& self, const StmtSRef& parent_sref) {\n struct Collector : public StmtVisitor {\n public:\n static Array<StmtSRef> Collect(const ScheduleState& self, const Stmt& stmt) {\n Collector collector(self);\n collector(stmt);\n return std::move(collector.result_);\n }\n\n private:\n explicit Collector(const ScheduleState& self) : self_(self) {}\n\n void VisitStmt_(const BlockNode* block) final {\n auto it = self_->stmt2ref.find(block);\n ICHECK(it != self_->stmt2ref.end());\n result_.push_back(it->second);\n }\n\n const ScheduleState& self_;\n Array<StmtSRef> result_;\n };\n\n if (parent_sref->stmt->IsInstance<ForNode>()) {\n const auto* loop = static_cast<const ForNode*>(parent_sref->stmt);\n return Collector::Collect(self, loop->body);\n } else if (parent_sref->stmt->IsInstance<BlockNode>()) {\n const auto* block = static_cast<const BlockNode*>(parent_sref->stmt);\n return Collector::Collect(self, block->body);\n }\n ICHECK(false) << \"Unreachable\";\n throw;\n}\n\n} // namespace tir\n} // namespace tvm\n"
},
{
"alpha_fraction": 0.6584841012954712,
"alphanum_fraction": 0.6605378985404968,
"avg_line_length": 33.08333206176758,
"blob_id": "30cb54fff5b809e10f8073b24ff3d00f8355d23e",
"content_id": "f883b4113656dbcb9047392b33f0f2f3e7e7d963",
"detected_licenses": [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 10225,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 300,
"path": "/src/relay/transforms/fake_quantization_to_integer.cc",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n/*!\n * \\file src/relay/transforms/quantize_fake_quantization.cc\n * \\brief A pass for taking fake quantized graphs and converting them\n * to actual integer operations.\n */\n\n#include <tvm/relay/expr.h>\n#include <tvm/relay/expr_functor.h>\n#include <tvm/relay/transform.h>\n\n/* Description of FakeQuantizationToInteger\n *\n * The purpose of this pass is to find regions of the graph that follow\n * the general pattern:\n *\n * x w\n * | |\n * dq dq\n * \\ /\n * op1\n * |\n * op2\n * |\n * q\n *\n * and convert them into subgraphs with actual integer operations on x and w\n *\n * The pass does this via a multi-pass approach:\n *\n * The main pass is a MixedModeMutator that traverses the full graph searching for\n * quantize operations\n *\n * The second pass is an ExprVisitor that recursively searches for subgraphs leading to the\n * quantize for subtraphs bounded by dequantize operations. This pass extracts the affine\n * types of the inputs for later processing, where affine denotes the transformation\n * x_real = (x_affine - zero_point) * scale\n *\n * The third pass is an ExprMutator that recursively rewrites the subgraphs using packed funcs\n * registered with the FTVMFakeQuantizationToInteger attribute. These packed funcs rewrite\n * the ops based on the affine types of their inputs and then return the affine types of the\n * new rewriten ops to pass that information down the stack during rewrite.\n *\n * After the second and third passes run, the first pass replaces the quantize with the\n * rewritten subgraph and the processing continues\n */\n\nnamespace tvm {\nnamespace relay {\n\n/*!\n * \\brief AffineType representation\n * \\sa AffineType\n */\nclass AffineTypeNode : public Object {\n public:\n /*! \\brief The scale of this type */\n Expr scale;\n /*! \\brief The zero point of this type */\n Expr zero_point;\n /*! \\brief The data type of this type */\n DataType dtype;\n\n void VisitAttrs(tvm::AttrVisitor* v) {\n v->Visit(\"scale\", &scale);\n v->Visit(\"zero_point\", &zero_point);\n v->Visit(\"dtype\", &dtype);\n }\n\n bool SEqualReduce(const AffineTypeNode* other, SEqualReducer equal) const {\n equal->MarkGraphNode();\n return equal(scale, other->scale) && equal(zero_point, other->zero_point) &&\n equal(dtype, other->dtype);\n }\n\n void SHashReduce(SHashReducer hash_reduce) const {\n hash_reduce->MarkGraphNode();\n hash_reduce(scale);\n hash_reduce(zero_point);\n hash_reduce(dtype);\n }\n\n static constexpr const bool _type_has_method_sequal_reduce = true;\n static constexpr const bool _type_has_method_shash_reduce = true;\n static constexpr const char* _type_key = \"AffineTypeNode\";\n TVM_DECLARE_BASE_OBJECT_INFO(AffineTypeNode, Object);\n};\n\n/*!\n * \\brief Managed reference to AffineTypes.\n * \\sa AffineTypeNode\n */\nclass AffineType : public ObjectRef {\n public:\n TVM_DLL AffineType(Expr scale, Expr zero_point, DataType dtype) {\n ObjectPtr<AffineTypeNode> n = make_object<AffineTypeNode>();\n n->scale = std::move(scale);\n n->zero_point = std::move(zero_point);\n n->dtype = std::move(dtype);\n data_ = std::move(n);\n }\n TVM_DEFINE_OBJECT_REF_METHODS(AffineType, ObjectRef, AffineTypeNode);\n};\n\nTVM_REGISTER_NODE_TYPE(AffineTypeNode);\n\nusing ExprSet = std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual>;\nusing ExprMap = std::unordered_map<Expr, Expr, ObjectPtrHash, ObjectPtrEqual>;\nusing AffineTypeMap = Map<Expr, AffineType>;\n\nusing FTVMFakeQuantizationToInteger =\n runtime::TypedPackedFunc<Array<ObjectRef>(const Expr& expr, const AffineTypeMap& map)>;\n\nclass SubgraphExtractor : public ExprVisitor {\n public:\n const ExprSet GetSubgraph(const Expr& expr) {\n VisitExpr(expr);\n ExprSet subgraph;\n if (is_fake_quantized_) {\n for (auto kv : this->visit_counter_) {\n if (auto call_node = GetRef<ObjectRef>(kv.first).as<CallNode>()) {\n if (call_node->op != quantize_op_) {\n subgraph.insert(Downcast<Expr>(GetRef<ObjectRef>(kv.first)));\n }\n }\n }\n }\n return subgraph;\n }\n const AffineTypeMap GetAffineTypes() { return affine_types_; }\n void VisitExpr(const Expr& expr) override {\n if (expr.as<CallNode>() == nullptr && expr.as<OpNode>() == nullptr &&\n expr.as<TupleNode>() == nullptr) {\n is_fake_quantized_ = false;\n } else {\n ExprVisitor::VisitExpr(expr);\n }\n }\n\n protected:\n void VisitExpr_(const CallNode* call_node) override {\n if (call_node->op == quantize_op_) {\n // Only look at arg0 for quantize\n VisitExpr(call_node->args[0]);\n // Collect type of quantize ops\n affine_types_.Set(GetRef<Expr>(call_node),\n AffineType(call_node->args[1], call_node->args[2],\n call_node->checked_type().as<TensorTypeNode>()->dtype));\n } else if (call_node->op == dequantize_op_) {\n // Collect type of dequantize ops\n affine_types_.Set(GetRef<Expr>(call_node),\n AffineType(call_node->args[1], call_node->args[2],\n call_node->args[0]->checked_type().as<TensorTypeNode>()->dtype));\n } else {\n // run normally on everything else.\n ExprVisitor::VisitExpr_(call_node);\n }\n }\n\n const Op quantize_op_ = Op::Get(\"qnn.quantize\");\n const Op dequantize_op_ = Op::Get(\"qnn.dequantize\");\n bool is_fake_quantized_ = true;\n AffineTypeMap affine_types_;\n};\n\nclass SubgraphMutator : public ExprMutator {\n public:\n SubgraphMutator(ExprSet subgraph, AffineTypeMap affine_types)\n : subgraph_(subgraph), affine_types_(affine_types) {}\n\n Expr MutateSubgraph(const Expr& expr) {\n if (subgraph_.size() == 0) {\n return expr;\n }\n const CallNode* quantize_node = expr.as<CallNode>();\n ICHECK(quantize_node);\n ICHECK(quantize_node->op == quantize_op_);\n out_type_ = affine_types_[expr];\n static auto fqfq =\n Op::GetAttrMap<FTVMFakeQuantizationToInteger>(\"FTVMFakeQuantizationToInteger\");\n for (auto node : subgraph_) {\n if (!fqfq.count(Downcast<Op>(node.as<CallNode>()->op))) {\n // Only modify the subgraph if we have translation\n // rules for every op\n return expr;\n }\n }\n return Mutate(expr);\n }\n\n protected:\n Expr VisitExpr_(const CallNode* call_node) {\n Expr out;\n\n static auto fqfq =\n Op::GetAttrMap<FTVMFakeQuantizationToInteger>(\"FTVMFakeQuantizationToInteger\");\n Op op = Downcast<Op>(call_node->op);\n if (fqfq.count(op)) {\n Expr expr;\n if (op == dequantize_op_) {\n expr = GetRef<Expr>(call_node);\n } else {\n expr = ExprMutator::VisitExpr_(call_node);\n // Set the current op to the output type, useful if we can't deduce output parameters\n // from input parameters\n affine_types_.Set(expr, out_type_);\n }\n // Call the rewrite\n Array<ObjectRef> vals = fqfq[op](expr, affine_types_);\n // Save teh outputs of the rewrite\n ICHECK(vals.size() == 4)\n << \"got the wrong number of returned arguments from FTVMFakeQuantizationToInteger for \"\n << AsText(op, false);\n out = Downcast<Expr>(vals[0]);\n affine_types_.Set(out, AffineType(Downcast<Expr>(vals[1]), Downcast<Expr>(vals[2]),\n DataType(String2DLDataType(Downcast<String>(vals[3])))));\n } else {\n ICHECK(false) << \"When rewriting a fake quantized graph, found an invalid node \"\n << AsText(GetRef<Expr>(call_node), false);\n }\n return out;\n }\n ExprSet subgraph_;\n AffineTypeMap affine_types_;\n AffineType out_type_;\n const Op quantize_op_ = Op::Get(\"qnn.quantize\");\n const Op dequantize_op_ = Op::Get(\"qnn.dequantize\");\n};\n\nclass FakeQuantizationRewriter : public MixedModeMutator {\n protected:\n Expr Rewrite_(const CallNode* pre, const Expr& post) override {\n if (const CallNode* call_node = post.as<CallNode>()) {\n if (call_node->op == quantize_op_) {\n SubgraphExtractor extractor;\n ExprSet subgraph = extractor.GetSubgraph(GetRef<Expr>(pre));\n AffineTypeMap affine_types = extractor.GetAffineTypes();\n\n ExprSet post_subgraph;\n AffineTypeMap post_affine_types;\n\n for (auto kv : affine_types) {\n if (pre == kv.first.as<CallNode>()) {\n // we havent memoized the current op yet\n post_affine_types.Set(post, kv.second);\n } else {\n post_affine_types.Set(memo_.at(kv.first), kv.second);\n }\n }\n for (auto expr : subgraph) {\n post_subgraph.insert(memo_[expr]);\n }\n Expr out = SubgraphMutator(post_subgraph, post_affine_types).MutateSubgraph(post);\n return out;\n }\n }\n return post;\n }\n const Op quantize_op_ = Op::Get(\"qnn.quantize\");\n};\n\nExpr FakeQuantizationToInteger(const Expr& expr, const IRModule& mod) {\n return FakeQuantizationRewriter().Mutate(expr);\n}\n\nnamespace transform {\n\nPass FakeQuantizationToInteger() {\n runtime::TypedPackedFunc<Function(Function, IRModule, PassContext)> pass_func =\n [=](Function f, IRModule m, PassContext pc) {\n return Downcast<Function>(FakeQuantizationToInteger(f, m));\n };\n return CreateFunctionPass(pass_func, 0, \"FakeQuantizationToInteger\", {\"InferType\"});\n}\n\nTVM_REGISTER_GLOBAL(\"relay._transform.FakeQuantizationToInteger\")\n .set_body_typed(FakeQuantizationToInteger);\n\n} // namespace transform\n\n} // namespace relay\n} // namespace tvm\n"
},
{
"alpha_fraction": 0.63304203748703,
"alphanum_fraction": 0.6434663534164429,
"avg_line_length": 35.81007766723633,
"blob_id": "690634d455284425ffe82787dea8c1e4a4768712",
"content_id": "afdbdc590de0e4ed1af49f73caef4a2770ecd509",
"detected_licenses": [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9497,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 258,
"path": "/tests/micro/zephyr/test_zephyr_aot.py",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport datetime\nfrom hashlib import new\nimport logging\nimport os\nimport sys\nimport logging\nimport pathlib\n\nimport pytest\nimport numpy as np\n\nimport tvm\nimport tvm.rpc\nimport tvm.micro\nimport tvm.relay as relay\n\nfrom tvm.micro.contrib import zephyr\nfrom tvm.contrib import utils\nfrom tvm.contrib.download import download_testdata\n\nimport conftest\n\n_LOG = logging.getLogger(__name__)\n\nPLATFORMS = conftest.PLATFORMS\n\n\ndef _build_session_kw(model, target, zephyr_board, west_cmd, mod, runtime_path, build_config):\n parent_dir = os.path.dirname(__file__)\n filename = os.path.splitext(os.path.basename(__file__))[0]\n prev_build = f\"{os.path.join(parent_dir, 'archive')}_{filename}_{zephyr_board}_last_build.micro\"\n workspace_root = os.path.join(\n f\"{os.path.join(parent_dir, 'workspace')}_{filename}_{zephyr_board}\",\n datetime.datetime.now().strftime(\"%Y-%m-%dT%H-%M-%S\"),\n )\n workspace_parent = os.path.dirname(workspace_root)\n if not os.path.exists(workspace_parent):\n os.makedirs(workspace_parent)\n workspace = tvm.micro.Workspace(debug=True, root=workspace_root)\n\n compiler = zephyr.ZephyrCompiler(\n project_dir=runtime_path,\n board=zephyr_board,\n zephyr_toolchain_variant=\"zephyr\",\n west_cmd=west_cmd,\n env_vars={\"ZEPHYR_RUNTIME\": \"ZEPHYR-AOT\"},\n )\n\n opts = tvm.micro.default_options(os.path.join(runtime_path, \"crt\"))\n opts[\"bin_opts\"][\"include_dirs\"].append(os.path.join(runtime_path, \"include\"))\n opts[\"lib_opts\"][\"include_dirs\"].append(os.path.join(runtime_path, \"include\"))\n\n flasher_kw = {}\n if build_config[\"debug\"]:\n flasher_kw[\"debug_rpc_session\"] = tvm.rpc.connect(\"127.0.0.1\", 9090)\n\n session_kw = {\n \"flasher\": compiler.flasher(**flasher_kw),\n }\n\n if not build_config[\"skip_build\"]:\n session_kw[\"binary\"] = tvm.micro.build_static_runtime(\n workspace,\n compiler,\n mod,\n opts,\n executor=\"aot\",\n extra_libs=[tvm.micro.get_standalone_crt_lib(\"memory\")],\n )\n if os.path.exists(prev_build):\n os.unlink(prev_build)\n session_kw[\"binary\"].archive(prev_build, metadata_only=True)\n else:\n unarchive_dir = utils.tempdir()\n session_kw[\"binary\"] = tvm.micro.MicroBinary.unarchive(\n prev_build, unarchive_dir.relpath(\"binary\")\n )\n\n return session_kw\n\n\ndef _create_header_file(tensor_name, npy_data, output_path):\n \"\"\"\n This method generates a header file containing the data contained in the numpy array provided.\n It is used to capture the tensor data (for both inputs and expected outputs).\n \"\"\"\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n if npy_data.dtype == \"int8\":\n header_file.write(f\"int8_t {tensor_name}[] =\")\n elif npy_data.dtype == \"int32\":\n header_file.write(f\"int32_t {tensor_name}[] = \")\n elif npy_data.dtype == \"uint8\":\n header_file.write(f\"uint8_t {tensor_name}[] = \")\n elif npy_data.dtype == \"float32\":\n header_file.write(f\"float {tensor_name}[] = \")\n else:\n raise ValueError(\"Data type not expected.\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")\n\n\ndef _read_line(fd):\n data = \"\"\n new_line = False\n while True:\n if new_line:\n break\n new_data = fd.read(1, timeout_sec=10)\n logging.debug(f\"read data: {new_data}\")\n for item in new_data:\n new_c = chr(item)\n data = data + new_c\n if new_c == \"\\n\":\n new_line = True\n break\n return data\n\n\ndef _get_message(fd, expr: str):\n while True:\n data = _read_line(fd)\n logging.debug(f\"new line: {data}\")\n if expr in data:\n return data\n\n\ndef test_tflite(platform, west_cmd, skip_build, tvm_debug):\n \"\"\"Testing a TFLite model.\"\"\"\n model, zephyr_board = PLATFORMS[platform]\n input_shape = (1, 32, 32, 3)\n output_shape = (1, 10)\n build_config = {\"skip_build\": skip_build, \"debug\": tvm_debug}\n\n this_dir = os.path.dirname(__file__)\n tvm_source_dir = os.path.join(this_dir, \"..\", \"..\", \"..\")\n runtime_path = os.path.join(tvm_source_dir, \"apps\", \"microtvm\", \"zephyr\", \"aot_demo\")\n model_url = \"https://github.com/eembc/ulpmark-ml/raw/fc1499c7cc83681a02820d5ddf5d97fe75d4f663/base_models/ic01/ic01_fp32.tflite\"\n model_path = download_testdata(model_url, \"ic01_fp32.tflite\", module=\"model\")\n\n # Import TFLite model\n tflite_model_buf = open(model_path, \"rb\").read()\n try:\n import tflite\n\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n except AttributeError:\n import tflite.Model\n\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n\n # Load TFLite model and convert to Relay\n relay_mod, params = relay.frontend.from_tflite(\n tflite_model, shape_dict={\"input_1\": input_shape}, dtype_dict={\"input_1 \": \"float32\"}\n )\n\n target = tvm.target.target.micro(model, options=[\"-link-params=1\", \"--executor=aot\"])\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lowered = relay.build(relay_mod, target, params=params)\n\n # Load sample and generate input/output header files\n sample_url = \"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/testdata_image_classification_fp32_8.npy\"\n sample_path = download_testdata(\n sample_url, \"testdata_image_classification_fp32_8.npy\", module=\"data\"\n )\n sample = np.load(sample_path)\n model_files_path = os.path.join(runtime_path, \"include\")\n _create_header_file((f\"input_data\"), sample, model_files_path)\n _create_header_file(\n \"output_data\", np.zeros(shape=output_shape, dtype=\"float32\"), model_files_path\n )\n\n session_kw = _build_session_kw(\n model, target, zephyr_board, west_cmd, lowered.lib, runtime_path, build_config\n )\n transport = session_kw[\"flasher\"].flash(session_kw[\"binary\"])\n transport.open()\n transport.write(b\"start\\n\", timeout_sec=5)\n\n result_line = _get_message(transport, \"#result\")\n result_line = result_line.strip(\"\\n\")\n result_line = result_line.split(\":\")\n result = int(result_line[1])\n time = int(result_line[2])\n logging.info(f\"Result: {result}\\ttime: {time} ms\")\n assert result == 8\n\n\ndef test_qemu_make_fail(platform, west_cmd, skip_build, tvm_debug):\n \"\"\"Testing QEMU make fail.\"\"\"\n model, zephyr_board = PLATFORMS[platform]\n build_config = {\"skip_build\": skip_build, \"debug\": tvm_debug}\n shape = (10,)\n dtype = \"float32\"\n\n this_dir = pathlib.Path(__file__).parent\n tvm_source_dir = this_dir / \"..\" / \"..\" / \"..\"\n runtime_path = tvm_source_dir / \"apps\" / \"microtvm\" / \"zephyr\" / \"aot_demo\"\n\n # Construct Relay program.\n x = relay.var(\"x\", relay.TensorType(shape=shape, dtype=dtype))\n xx = relay.multiply(x, x)\n z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))\n func = relay.Function([x], z)\n\n target = tvm.target.target.micro(model, options=[\"-link-params=1\", \"--executor=aot\"])\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lowered = relay.build(func, target)\n\n # Generate input/output header files\n model_files_path = os.path.join(runtime_path, \"include\")\n _create_header_file((f\"input_data\"), np.zeros(shape=shape, dtype=dtype), model_files_path)\n _create_header_file(\"output_data\", np.zeros(shape=shape, dtype=dtype), model_files_path)\n\n session_kw = _build_session_kw(\n model, target, zephyr_board, west_cmd, lowered.lib, runtime_path, build_config\n )\n\n file_path = os.path.join(session_kw[\"binary\"].base_dir, \"zephyr/CMakeFiles/run.dir/build.make\")\n assert os.path.isfile(file_path), f\"[{file_path}] does not exist.\"\n\n # Remove a file to create make failure.\n os.remove(file_path)\n transport = session_kw[\"flasher\"].flash(session_kw[\"binary\"])\n with pytest.raises(RuntimeError) as excinfo:\n transport.open()\n assert \"QEMU setup failed\" in str(excinfo.value)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n"
},
{
"alpha_fraction": 0.6992508172988892,
"alphanum_fraction": 0.7031751871109009,
"avg_line_length": 40.835819244384766,
"blob_id": "c2b2df67f8de0d6e9cb36cbed531d375492efd4a",
"content_id": "ab8299e381691f86619f7afb5546f70f291a1fc2",
"detected_licenses": [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2803,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 67,
"path": "/src/tir/schedule/primitive.h",
"repo_name": "chiphuyen/tvm",
"src_encoding": "UTF-8",
"text": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n#ifndef TVM_TIR_SCHEDULE_PRIMITIVE_H_\n#define TVM_TIR_SCHEDULE_PRIMITIVE_H_\n\n#include <tvm/tir/schedule/state.h>\n\nnamespace tvm {\nnamespace tir {\n\n/******** Schedule: loops manipulation ********/\n\n/******** Schedule: compute location ********/\n/*!\n * \\brief Inline a block into its consumer(s). It requires:\n * 1) The block is a complete non-root block, which only produces one buffer\n * 2) The block must not be the only leaf in the scope.\n * 3) The body of the block must be a BufferStore statement in the form of,\n * A[i, j, k, ...] = ...\n * where the indices of the LHS are all distinct atomic variables,\n * and no variables other than those indexing variables are allowed in the statement.\n * \\param self The state of the schedule\n * \\param block_sref The sref to the block to be inlined to its consumer(s)\n */\nTVM_DLL void ComputeInline(ScheduleState self, const StmtSRef& block_sref);\n/*!\n * \\brief Inline a block into its only producer. It requires:\n * 1) The block is a complete non-root block, which only produces and consumers one buffer\n * 2) The block must not be the only leaf in the scope.\n * 3) The only producer of the block is a read-after-write producer and a complete non-root block\n * 4) The body of the block must be a BufferStore statement in the form of,\n * B[f(i, j, k, ...)] = g(i, j, k, A[i, j, k, ...] ...)\n * where the indices of each `BufferLoad` on the RHS are all distinct atomic variables,\n * and no variables other than those indexing variables are allowed in the statement.\n * \\param self The state of the schedule\n * \\param block_sref The sref to the block to be inlined to its producer\n */\nTVM_DLL void ReverseComputeInline(ScheduleState self, const StmtSRef& block_sref);\n\n/******** Schedule: loop binding/annotation ********/\n\n/******** Schedule: cache read/write ********/\n\n/******** Schedule: reduction ********/\n\n/******** Schedule: blockize & tensorize ********/\n\n} // namespace tir\n} // namespace tvm\n\n#endif // TVM_TIR_SCHEDULE_PRIMITIVE_H_\n"
}
] | 9 |
cullen-kennedy/Earthquakes-d3
|
https://github.com/cullen-kennedy/Earthquakes-d3
|
8fe9f02f6b75c63ff5822d7bbe77726cf65b7857
|
4ce9aafaa7a6344212a49c2e3ce9c323b309c77b
|
a8a77d014843ca716ad55adda1cbffd2c88dd1da
|
refs/heads/master
| 2023-01-10T11:23:56.609537 | 2019-08-28T18:39:22 | 2019-08-28T18:39:22 | 197,091,742 | 0 | 0 | null | 2019-07-16T00:33:11 | 2019-08-28T18:39:25 | 2023-01-04T04:33:27 |
JavaScript
|
[
{
"alpha_fraction": 0.5332097411155701,
"alphanum_fraction": 0.5466996431350708,
"avg_line_length": 25.430517196655273,
"blob_id": "ed7d53d9f48739a32608fc04bef4410ae572c7fe",
"content_id": "55a1308cb6e81bf08115c3f4660a80719c884d31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 9711,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 367,
"path": "/src/index.js",
"repo_name": "cullen-kennedy/Earthquakes-d3",
"src_encoding": "UTF-8",
"text": "//Description: Earthquakes during 2004 - 2011\n//Author: Cullen Kennedy\n/*Early Version*/\n \n//Some global vars\nvar myMap, canvas, view = 'mapbtn', svg, x, y; \nconst margin = 50, colormap = {5: \"white\",6: \"green\",7:\"yellow\",8:\"orange\",9:\"red\"};\n\nwindow.addEventListener('load', load);\n\n/**\n * Loads initial view and loads static map\n */\nfunction load() \n{\n //Sets the default button as chosen\n let firstview = document.getElementById(view)\n firstview.style.backgroundColor = '#6f7070'\n\n //Adds event listeners to all buttons except cluster\n let toggles = document.querySelectorAll(\".toggleview\")\n var i;\n for (i = 0; i < toggles.length; i++) {\n toggles[i].addEventListener('click', toggleView)\n }\n\n // Public access key\n var key = \"pk.eyJ1Ijoia2VubmUxMjUiLCJhIjoiY2p4czhsNjc2MGdjZTNtcXFsMjY1ZmRmeCJ9.4md7Cc5glRtVqmMM6tENRw\";\n\n //load canvas vars\n canvas = document.getElementById(\"canvas\");\n canvasContainer = document.getElementById(\"canvasContainer\");\n ctx = canvas.getContext(\"2d\");\n \n\tconst mappa = new Mappa('Mapbox', key); \n \n /**\n * Just keep it at a fixed standard size\n * Too small and the whole map won't show anyway\n */\n const options = {\n lat: 0,\n lng: 0,\n zoom: 1,\n width: 1080,\n height: 720,\n scale: 1,\n pitch: 1,\n style: 'dark-v8'\n };\n\n //Set map var\n\tmyMap = mappa.staticMap(options);\n \n //Load Image\n\tlet img = new Image();\n img.src = myMap.imgUrl;\n\n img.onload = () => {\n console.log(img.width + \" \" +img.height)\n canvasContainer.setAttribute(\"style\", \"width:\" + img.width +\"px;\" + \" height:\"+img.height+\"px;\");\n ctx.imageSmoothingEnabled = false;\n canvas.width = img.width;\n canvas.height = img.height;\n ctx.drawImage(img, 0, 0);\t\n\n //When image is loaded, it is time to draw svg\n setupSVG()\n }\n}\n\n/**\n * Sets up the svg element over the canvas/map as well as the chart axis elements\n */ \nfunction setupSVG() {\n //Change this to easier change the domains\n let beginDate = new Date(\"01/01/2004\")\n let endDate = new Date(\"12/31/2011\")\n\n //Domains are hardcoded\n svg = d3.select(canvasContainer).append('svg')\n\n //Setting the chart element\n let chartview = svg.append(\"g\")\n .attr(\"class\", \"charts\")\n .attr(\"height\", canvas.height)\n .attr(\"width\", canvas.width)\n .attr(\"transform\", \n \"translate(\" + margin + \",\" + 0 + \")\")\n\n //x and y, used in update have global scope\n x = d3.scaleTime()\n .range([margin,canvas.width-margin])\n .domain([beginDate,endDate])\n \n y = d3.scaleLinear()\n .domain([0,4520])\n .range([canvas.height-margin,margin])\n \n //Setting the chart axis graphics\n let chartx = chartview.append(\"g\")\n .attr(\"class\", \"x axis\")\n .style('opacity', 0)\n .style(\"fill\", \"white\")\n .style(\"stroke-width\", 1)\n \n let charty = chartview.append(\"g\")\n .attr(\"class\", \"y axis\")\n .style('opacity', 0)\n .style(\"fill\", \"white\")\n .style(\"stroke-width\", 1)\n\n let xAxis = d3.axisBottom(x)\n let yAxis = d3.axisRight(y)\n\n chartx\n .attr(\"transform\", \"translate(\" + -margin + \",\" + (canvas.height - margin) + \")\")\n .call(xAxis)\n\n charty\n .attr(\"transform\", \"translate(\" + 0 + \",\" + 0 + \")\")\n .call(yAxis) \n \n addPoints()\n}\n\n/**\n * loads the data\n */\nfunction addPoints()\n{\n d3.csv('./csv/newFile.csv')\n .then((data) => {\n draw(data)\n })\n .catch((error) => {\n console.log(\"Error Loading Data:\" + error)\n })\n}\n\n/**\n * Draws the actual circles on the map and runs the worker\n * who sets the cluster positions\n * @param data \n */\nfunction draw(data) {\n\n let meter = document.querySelector(\"#progress\")\n\t\t\t\n\tlet worker = new Worker(\"src/worker.js\");\n\t\n\t\tsvg.selectAll('circle')\n\t\t.data(data)\n\t\t.enter()\n\t\t.append(\"circle\")\n \t.attr(\"r\", function(d) {return Math.floor(d['Magnitude'] - 4)})\n .attr('fill', function(d) {return colormap[Math.floor(d['Magnitude'])]})\n .attr('opacity', 0.6)\n \t.attr('class', function(d) {\n return Math.floor(d['Magnitude']);\n })\n\n //Sending the wrong canvas dimensions sometimes\n\tworker.postMessage({\n\t nodes: data,\n width: canvas.getAttribute('width'),\n height: canvas.getAttribute('height')\n\t});\n\n\tworker.onmessage = function(event) {\n\t switch (event.data.type) {\n\t case \"tick\": return ticked(event.data);\n\t case \"end\": return ended(event.data);\n\t }\n\t};\n\n\tfunction ticked(data) {\n var progress = data.progress;\n meter.style.width = 90 * progress + \"%\";\n\t}\n\n\tfunction ended(data) {\n var nodes = data.nodes\n var cluster = document.getElementById(\"cluster\")\n cluster.addEventListener(\"click\", toggleView);\n cluster.setAttribute(\"class\", \"b-ready buttons toggleview hover\")\n\t\t\n\t\t//Let circles remember their x and y cluster values\n\t\t//Won't be very good if I need x or y later, but unlikely.\n\t\t//Maybe find alternative\n\t\tsvg.selectAll('circle').data(nodes)\n\t\n\t}\n update();\n\n}\n\n/**\n * Update sets mapview, chartview, play view or cluster view \n * @param transitionTime \n */\nfunction update(transitionTime) { \n\n transitionTime = (typeof transitionTime !== 'undefined') ? transitionTime : 0\n\n if (view === 'mapbtn') {\n\n axisOpacity(2, 0)\n svg.selectAll(\"circle\").each(function(d) {\n \n circle = d3.select(this)\n\n\t\t\tlet pos = myMap.latLngToPixel(d['Latitude'], d['Longitude']);\n\n circle\n .transition()\n .duration(transitionTime)\n .attr(\"cx\", pos.x )\n .attr(\"cy\", pos.y )\n }) \n }\n else if (view === 'play'){\n \n axisOpacity(2, 0)\n\n svg.selectAll('circle')\n .transition()\n .duration(0)\n .attr(\"r\",0)\n \n var dateshow = document.createElement(\"div\")\n canvasContainer.appendChild(dateshow)\n dateshow.setAttribute(\"id\", \"dateshow\");\n\n svg.selectAll(\"circle\").each(function(d, i) {\n circle = d3.select(this)\n let pos = myMap.latLngToPixel(d['Latitude'], d['Longitude']);\n\n setTimeout(() => { dateshow.innerHTML = d['Date'] }, i*10);\n\n //basically a delayed transition of radius from 0 to showing to 0 (size and duration depends on magnitude)\n circle\n .attr(\"cx\", pos.x)\n .attr(\"cy\", pos.y)\n .transition()\n .duration((d) => {return Math.pow(Math.floor((d['Magnitude'])), 3.5)})\n .delay(10*i)\n .attr(\"r\", function(d) {return Math.pow(Math.floor((d['Magnitude'] - 4)), 3)})\n .transition()\n .duration((d) => {return Math.pow(Math.floor((d['Magnitude'])), 3.5)})\n .attr(\"r\",0) \n })\n \n } \n //Chart view is simply time / frequency\n else if (view === 'chart') {\n\n axisOpacity(2, 1)\n\n let v = 0; \n svg.selectAll('circle')\n .transition()\n .duration(transitionTime)\n .attr('cy', () => {return y(v++);})\n .attr('cx', (d, i) => {\n return x(new Date(d['Date']))\n }) \n } \n //Cluster View is setting cx, cy to d.x and d.y previously determined by the worker\n else if (view === 'cluster') {\n\t\t\n axisOpacity(2, 0);\n\t\n svg.selectAll('circle')\n .transition()\n .duration(transitionTime)\n\t\t\t.attr(\"cx\", function(d) { return d.x; })\n \t\t.attr(\"cy\", function(d) { return d.y; })\n }\t\n\n /**\n * Add Depth View maybe\n * */\t\t\t\t\t\t\t \t\n}\n\nfunction toggleView() {\n \n //reset colour and hover of last button\n let lastbtn = document.getElementById(view)\n lastbtn.style.backgroundColor = 'black'\n lastbtn.setAttribute(\"class\", \"b-ready buttons toggleview hover\")\n\n //If view was play, stop the delayed transitions, reset radius and visibility\n if (view === 'play') {\n\n let dateshow = document.getElementById(\"dateshow\")\n canvasContainer.removeChild(dateshow)\n\n svg.selectAll(\"circle\")\n .each(function() {\n d3.select(this)\n .transition()\n .duration(0)\n .attr(\"r\", function(d) {return Math.floor(d['Magnitude'] - 4)})\n .attr('visibility', 'visible') \n })\n }\n\n view = event.target.id\n //set color and activive on selected button\n let currentbtn = document.getElementById(view)\n currentbtn.style.backgroundColor = '#6f7070'\n currentbtn.setAttribute(\"class\", \"b-ready buttons toggleview\")\n\n showView();\n}\n\n\n\nfunction showView() {\n if (view == \"mapbtn\") {\n showMap()\n }\n else if (view ==\"play\") {\n showMap() \n }\n else if (view == \"chart\"){\n hideMap()\n } \n else if (view == \"cluster\") {\n hideMap()\n }\n update(500) \n}\n\nfunction showMap() {\n mapOpacity(1);\n}\n\nfunction hideMap() {\n mapOpacity(0.5);\n}\n\nfunction mapOpacity(opacity) {\n canvas.style.opacity = opacity;\n}\n\nfunction axisOpacity(axis, val) {\n if(axis == 2){\n d3.selectAll(\".axis\")\n .transition()\n .duration(500)\n .style(\"opacity\", val);\n }\n //Was previously for other axis views. Not using this else condition\n else{\n d3.selectAll(\".y\")\n .transition()\n .duration(500)\n .style(\"opacity\", val)\n\n d3.selectAll(\".x\")\n .transition()\n .duration(500)\n .style(\"opacity\", 1)\n }\n}\n\n\n\n \n \n"
},
{
"alpha_fraction": 0.7586705088615417,
"alphanum_fraction": 0.7615606784820557,
"avg_line_length": 27.75,
"blob_id": "789bd17833d3f61cb0db76dee1756ce44c7b5cd1",
"content_id": "361699154b80dadfcb2e54bc4839dd9e0d3c201f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 692,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 24,
"path": "/README.md",
"repo_name": "cullen-kennedy/Earthquakes-d3",
"src_encoding": "UTF-8",
"text": "# Earthquake Visualization\n\nThis is my earthquake visualization using d3.js. \nVisit the website [here!](https://earthquakes-d3.herokuapp.com/)\n\n## Installation\n\nIn order to use the mapbox static api, you need to get a free public access key. The one in this repo and on the heroku site are changed frequently by yours truly. \n\nYou will also need to get your own earthquake data. I got it from [kaggle](https://www.kaggle.com/usgs/earthquake-database). Signing up is free. The little python script in the python directory deletes unused fields.\n\nTo install dependencies run:\n\n`npm install`\n\nTo build:\n\n`npm run build`\n\nYou must have babel set up on your machine\n\nTo serve:\n\n`npm run serve`\n\n\n"
},
{
"alpha_fraction": 0.6625766754150391,
"alphanum_fraction": 0.6625766754150391,
"avg_line_length": 22.428571701049805,
"blob_id": "1545d183bf250182493413ae3859c06547ced77c",
"content_id": "378ecd81f1dde400b158c96e83fe8dc0ee1e8b73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 7,
"path": "/python/script.py",
"repo_name": "cullen-kennedy/Earthquakes-d3",
"src_encoding": "UTF-8",
"text": "import csv\nimport pandas as pd\n\nf=pd.read_csv(\"\")\nkeep_col = ['Date','Latitude','Longitude','Depth', \"Magnitude\"]\nnew_f = f[keep_col]\nnew_f.to_csv(\"\", index=False)"
}
] | 3 |
mmming/jlpthelper
|
https://github.com/mmming/jlpthelper
|
ab74adc918d3c71c972b7598af2f2ead51bc2534
|
257d1aed5b118cc4f537e7b9fe4bf468de6ab8b6
|
c4f44b17e9fc19dc16685ac1652016e02450cf03
|
refs/heads/master
| 2021-01-14T09:20:11.654127 | 2014-11-22T07:43:55 | 2014-11-22T07:43:55 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6346153616905212,
"alphanum_fraction": 0.6378205418586731,
"avg_line_length": 23,
"blob_id": "94baada53849fea109e951d984007617e6708993",
"content_id": "5448c9668053ed847e1edf8d261cf2b283ddd913",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 13,
"path": "/jlpthelper/urls.py",
"repo_name": "mmming/jlpthelper",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, include, url\n\nfrom . import views\n\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.home, name='home'),\n url(r'^feedback/$', views.feedback, name='feedback'),\n url(r'^kanji_analyzer/$', include('jlpthelper.apps.kanji_analyzer.urls')),\n)\n"
}
] | 1 |
YoS-private/SSD_finetuning
|
https://github.com/YoS-private/SSD_finetuning
|
28eed3ae2807f1abb615bc2fe12aa447d346c880
|
f077476c9b10a6bd30a866bb7b61d70f8889dac4
|
8eee1ac2b715a4f6aaa5e109cb66783a5d827600
|
refs/heads/master
| 2020-06-14T15:15:24.617972 | 2019-07-03T11:15:12 | 2019-07-03T11:15:12 | 195,038,156 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6627411842346191,
"alphanum_fraction": 0.6765105724334717,
"avg_line_length": 30.904624938964844,
"blob_id": "d7fbf7735f4a0d15ab0475c127dcbd3730e6fd82",
"content_id": "7ff4a25a77401e9f73f794818d00cf28ad2f5ea4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13033,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 346,
"path": "/fine_tuning_ssd.py",
"repo_name": "YoS-private/SSD_finetuning",
"src_encoding": "UTF-8",
"text": "import json\nimport matplotlib.pyplot as plt\nimport copy\nimport chainer\nfrom chainercv.chainer_experimental.datasets.sliceable import GetterDataset\nimport os\nfrom chainercv.utils import read_image\n\nfrom chainer.datasets import ConcatenatedDataset\nfrom chainer.datasets import TransformDataset\nfrom chainer.optimizer_hooks import WeightDecay\nfrom chainer import serializers\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer.training import triggers\nfrom chainer.iterators import serial_iterator\n\nimport chainermn\n\nfrom chainercv import utils\nfrom chainercv import transforms\nfrom chainercv.visualizations import vis_bbox\nfrom chainercv.chainer_experimental.datasets.sliceable import ConcatenatedDataset\nfrom chainercv.chainer_experimental.datasets.sliceable import TransformDataset\nfrom chainercv.datasets import voc_bbox_label_names\nfrom chainercv.datasets import VOCBboxDataset\nfrom chainercv.extensions import DetectionVOCEvaluator\nfrom chainercv.links.model.ssd import GradientScaling\nfrom chainercv.links.model.ssd import multibox_loss\nfrom chainercv.links.model.ssd import random_crop_with_bbox_constraints\nfrom chainercv.links.model.ssd import resize_with_random_interpolation\nfrom chainercv.links.model.ssd import random_distort\nfrom chainercv.links import SSD300\nfrom chainercv.links import SSD512\n\nfrom chainercv.links import SSD300\nimport numpy as np\n\n# 学習データをインポートするクラス\n# GetterDatasetを継承\nclass originalDataset(GetterDataset):\n # 初期値はとりあえずはsplit='train'\n def __init__(self,split='train',use_difficult=False,return_difficult=False):\n super(originalDataset, self).__init__()\n data_dir = 'ssd_picts/'+split+'/'\n\n file_names = []\n for file in os.listdir(data_dir+'image'):\n file_names.append(file)\n\n self.filenames = file_names\n self.data_dir = data_dir\n self.use_difficult = 0 # difficultラベルがないので\n\n # _get_imageと_get_annotationsで画像とそのアノテーションをインポート\n self.add_getter('img', self._get_image)\n self.add_getter(('bbox', 'label', 'difficult'), self._get_annotations)\n\n # difficultをリターンする必要がなければdifficultは入れる必要ないので\n if not return_difficult:\n self.keys = ('img', 'bbox', 'label')\n\n # ファイル数を出力\n def __len__(self):\n return len(self.filenames)\n\n # 画像のインポート\n def _get_image(self,i):\n file_name = self.filenames[i]\n img = read_image(self.data_dir+'image/'+file_name)\n return img\n\n # i番目の画像のアノテーション情報(メタデータ)をインポート\n def _get_annotations(self,i):\n bbox = np.empty((0,4), float)\n label = np.empty((0,1), int)\n difficult = []\n filename = self.filenames[i]\n # メタデータから該当データ探索\n f = open(self.data_dir+'metadata.json')\n json_data = json.load(f)['_via_img_metadata']\n tmp_picts = list(json_data.values())\n tmp_data = {}\n objs = [p['regions'] for p in tmp_picts if p['filename'] == filename][0]\n\n # 領域それぞれに対して領域の角の点を抽出,ラベルも同時に定義\n for obj in objs:\n xmax = int(obj['shape_attributes']['x']) + int(obj['shape_attributes']['width'])\n ymax = int(obj['shape_attributes']['y']) + int(obj['shape_attributes']['height'])\n tmp_bbox=np.array([int(obj['shape_attributes']['y']), int(obj['shape_attributes']['x']), ymax, xmax])\n tmp_label = np.array([int(obj['region_attributes']['label'])])\n bbox = np.append(bbox,np.array([tmp_bbox]), axis = 0)\n label = np.append(label, np.array([tmp_label]), axis = 0)\n difficult.append(False)\n bbox = np.array(bbox,dtype=np.float32)\n difficult = np.array(difficult,dtype=np.bool) # 今回は使わない\n label = np.array(label,dtype=np.int32)\n bbox = np.stack(bbox).astype(np.float32)\n label = np.stack(label).astype(np.int32)\n return bbox, label, difficult\n\n\n# 学習済みデータセットとクラス数が異なる時にスキップする重みを指定する関数\n# 入力はモデル(source,destination)\ndef get_shape_mismatch_names(src, dst):\n mismatch_names = []\n src_params = {p[0]: p[1] for p in src.namedparams()}\n # クラス数が違うところだけをmismatch_namesにappend→出力\n for dst_named_param in dst.namedparams():\n name = dst_named_param[0]\n dst_param = dst_named_param[1]\n src_param = src_params[name]\n if src_param.shape != dst_param.shape:\n mismatch_names.append(name)\n return mismatch_names\n\n\n# SSDのモデルの定義\nclass MultiboxTrainChain(chainer.Chain):\n\n def __init__(self, model, alpha=1, k=3):\n super(MultiboxTrainChain, self).__init__()\n with self.init_scope():\n self.model = model\n self.alpha = alpha\n self.k = k\n\n def forward(self, imgs, gt_mb_locs, gt_mb_labels):\n mb_locs, mb_confs = self.model(imgs)\n loc_loss, conf_loss = multibox_loss(\n mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)\n loss = loc_loss * self.alpha + conf_loss\n\n chainer.reporter.report(\n {'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},\n self)\n\n return loss\n\n# 学習データの水増しとSSDに入力するための準備処理\nclass Transform(object):\n def __init__(self, coder, size, mean):\n self.coder = copy.copy(coder)\n self.coder.to_cpu()\n\n self.size = size\n self.mean = mean\n\n def __call__(self, in_data):\n # 5段階のステップでデータの水増しを行う\n # 1. 色の拡張\n # 2. ランダムな拡大\n # 3. ランダムなトリミング\n # 4. ランダムな補完の再補正\n # 5. ランダムな水平反転\n\n\n img, bbox, label = in_data\n\n # 1. 色の拡張\n # 明るさ,コントラスト,彩度,色相を組み合わせ,データ拡張をする\n img = random_distort(img)\n\n # 2. ランダムな拡大\n if np.random.randint(2):\n # キャンバスの様々な座標に入力画像を置いて,様々な比率の画像を生成し,bounding boxを更新\n img, param = transforms.random_expand(img, fill=self.mean, return_param=True)\n bbox = transforms.translate_bbox(bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n\n # 3. ランダムなトリミング\n img, param = random_crop_with_bbox_constraints(img, bbox, return_param=True)\n # トリミングされた画像内にbounding boxが入るように調整\n bbox, param = transforms.crop_bbox(\n bbox, y_slice=param['y_slice'],\n x_slice=param['x_slice'],\n allow_outside_center=False,\n return_param=True)\n label = label[param['index']]\n\n # 4. ランダムな補完の再補正\n ## 画像とbounding boxのリサイズ\n _, H, W = img.shape\n img = resize_with_random_interpolation(img, (self.size, self.size))\n bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))\n\n # 5. ランダムな水平反転\n ## 画像とbounding boxをランダムに水平方向に反転\n img, params = transforms.random_flip(img, x_random=True, return_param=True)\n bbox = transforms.flip_bbox(bbox, (self.size, self.size), x_flip=params['x_flip'])\n\n # SSDのネットワークに入力するための準備の処理\n img -= self.mean\n ## SSDに入力するためのloc(デフォルトbounding boxのオフセットとスケール)と\n ## mb_label(クラスを表す配列)を出力\n mb_loc, mb_label = self.coder.encode(bbox, label)\n\n return img, mb_loc, mb_label\n\n\n\n\n\n\n\n# 学習データ,評価データ,テストデータの属性を読み込み\n# 引数にはその属性があるパスを入力\n# train_dataset = outDataset('./ssd_picts/train/train.json')\n# valid_dataset = outDataset('./ssd_picts/valid/valid.json')\n# test_dataset = outDataset('./ssd_picts/test/test.json')\n\n# chainerCVでSSDを動かす通常の動作\n## モデルの定義(重みは PASCAL VOC 2007と2012の学習済みデータで実行)\n### 表示するためのスコアの閾値は0.6,non_maximun_suppressionの閾値は0.45\nmodel = SSD300(n_fg_class=len(voc_bbox_label_names), pretrained_model='voc0712')\nmodel.score_thresh = 0.50\nmodel.nms_thresh = 0.45\nimg = utils.read_image('ssd_picts/test/image/000000581155.jpg', color=True)\n# モデルを使って予測\nbboxes, labels, scores = model.predict([img])\nbbox, label, score = bboxes[0], labels[0], scores[0]\n\nvis_bbox(img, bbox, label, score, label_names=voc_bbox_label_names)\n# 表示\n# plt.show()\n\n\n# fine-tuningの準備\n# pre-trainedモデルの重みの一部を使う.\nsrc = SSD300(pretrained_model='voc0712')\n# voc0712: 50種類\n# dst(fine-tuning結果のモデル)は21クラスに設定\n# dstとsrcでクラス数が異なる\ndst = SSD300(n_fg_class=21)\n# 重みをとりあえず全ての層において初期化\ndst(np.zeros((1, 3, dst.insize, dst.insize), dtype=np.float32))\n\n# ignore_names以外の層に関しては,出力先のdstにパラメータを出力\nignore_names = get_shape_mismatch_names(src, dst)\nsrc_params = {p[0]: p[1] for p in src.namedparams()}\nfor dst_named_param in dst.namedparams():\n name = dst_named_param[0]\n if name not in ignore_names:\n dst_named_param[1].array[:] = src_params[name].array[:]\n\n# 読み込まれているかチェック.\n# 読み込まれていない場合のみ出力としてAssertionエラーがでる.\nnp.testing.assert_equal(dst.extractor.conv1_1.W.data,\n src.extractor.conv1_1.W.data)\n# スキップした層の名前を出力\nprint(ignore_names)\n\n\n\n\n# 学習\n## 変数定義\ngpu = 0 # gpuのID\nbatchsize = 16 # バッチサイズ\ntest_batchsize = 8\niteration = 120\nstep = [80, 100]\nout = 'result' # 出力ファイルパス\nresume = None # 重み\n\nlabel = ['man','woman']\n\n# dstのignore_namesを学習する\nmodel = dst\n\n# score_threshとnms_threshの値を変更\nmodel.use_preset('evaluate')\ntrain_chain = MultiboxTrainChain(model)\n\nchainer.cuda.get_device_from_id(gpu).use()\nmodel.to_gpu()\n\n# 学習データの取り込み with 水増し\ntrain = TransformDataset(\n originalDataset(split='train'),\n ('img', 'mb_loc', 'mb_label'),\n Transform(model.coder, model.insize, model.mean))\ntrain_iter = chainer.iterators.MultiprocessIterator(train, batchsize)\n\n# テストデータの取り込み\ntest = originalDataset(\n split='test', use_difficult=True, return_difficult=True)\ntest_iter = chainer.iterators.SerialIterator(\n test, batchsize, repeat=False, shuffle=False)\n\n# initial lr is set to 1e-3 by ExponentialShift\n# 確率的勾配法\n# setupで初期化\noptimizer = chainer.optimizers.MomentumSGD()\noptimizer.setup(train_chain)\n\n# 線形関数:y=Wx+b(Wは重み,bはバイアス)\n# bの時:勾配のスケールを2倍にあげる.\n# Wの時:重みの減衰率を0.0005倍にする.\n# フック:プログラム中の特定の箇所に独自の処理を追加できるようにする,\nfor param in train_chain.params():\n if param.name == 'b':\n param.update_rule.add_hook(GradientScaling(2))\n else:\n param.update_rule.add_hook(WeightDecay(0.0005))\n\n# 最適化関数とトレーニングデータセットを入力\n# 学習部分ににoptimizerを繋げる.\nupdater = training.updaters.StandardUpdater(\n train_iter, optimizer, device=gpu)\ntrainer = training.Trainer(\n updater, (iteration, 'iteration'), out)\n# 指数関数的に学習率を変更できるように設定\ntrainer.extend(\n extensions.ExponentialShift('lr', 0.1, init=1e-3),\n trigger=triggers.ManualScheduleTrigger(step, 'iteration'))\n\n# 学習とともにAPやmAPを評価するように設定\ntrainer.extend(\n DetectionVOCEvaluator(\n test_iter, model, use_07_metric=True,\n label_names=(voc_bbox_label_names+('new_label',))))\n\nlog_interval = 10, 'iteration'\n# 評価結果の出力\ntrainer.extend(extensions.LogReport(trigger=log_interval))\ntrainer.extend(extensions.observe_lr(), trigger=log_interval)\ntrainer.extend(extensions.PrintReport(\n ['epoch', 'iteration', 'lr',\n 'main/loss', 'main/loss/loc', 'main/loss/conf',\n 'validation/main/map']),\n trigger=log_interval)\ntrainer.extend(extensions.ProgressBar(update_interval=10))\n\n# 結果の表示\ntrainer.extend(\n extensions.snapshot(),\n trigger=triggers.ManualScheduleTrigger(\n step + [iteration], 'iteration'))\ntrainer.extend(\n extensions.snapshot_object(model, 'model_iter_{.updater.iteration}'),\n trigger=(iteration, 'iteration'))\n\nif resume:\n serializers.load_npz(resume, trainer)\n\ntrainer.run()\n"
},
{
"alpha_fraction": 0.8472222089767456,
"alphanum_fraction": 0.8472222089767456,
"avg_line_length": 35,
"blob_id": "9c8dea268139702e985bc91548b9ca3fecb37c58",
"content_id": "aa92580103e342564e1db83b0071531ebbab60a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 2,
"path": "/README.md",
"repo_name": "YoS-private/SSD_finetuning",
"src_encoding": "UTF-8",
"text": "# SSD_finetuning\nSSD(Single Shot Multibox Detector)でFine-tuningするためのコード\n"
}
] | 2 |
RobinPrest/python_code
|
https://github.com/RobinPrest/python_code
|
dbfe7adbbb153b4767bca0e200445d69dd1a4a5b
|
7db29c9adfeab38420f74b1fe6ebdc25b7536844
|
da09dee9518386d8c6bf55590890857c76219543
|
refs/heads/master
| 2021-08-16T22:03:26.574332 | 2017-11-20T11:06:34 | 2017-11-20T11:06:34 | 110,575,145 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5518814325332642,
"alphanum_fraction": 0.5598631501197815,
"avg_line_length": 24.764705657958984,
"blob_id": "e272c78d47905c859c9926d8e71ecadaa29dc596",
"content_id": "b85ac0c02417fefa151f13bb4e5175fbdef87051",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 34,
"path": "/Batch_csv2csv.py",
"repo_name": "RobinPrest/python_code",
"src_encoding": "UTF-8",
"text": "# !/usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\nfrom utils.file_manips import find_files, path_separator, csv2csv\n\n# Input\nimport_dir = input(u'Répertoire source :')\nexport_dir = input(u'Répertoire cible :')\n\nif import_dir and export_dir:\n\n # Init compteur\n count_read = 0\n\n # Lancement du process\n try:\n print('Traitement en cours')\n\n for f in find_files(import_dir, '*.csv'):\n try:\n f_repertoire = path_separator(f)['rep']\n print('Traitement de ' + path_separator(f)['nom_ext'])\n csv2csv(f, export_dir)\n print(f + ' lu')\n count_read = count_read + 1\n except Exception as e:\n raise\n\n print(str(count_read) + u' fichiers lus')\n\n except Exception as e:\n raise\nelse:\n print(\"Il manque un répertoire - Traitement annulé\")\n\n"
},
{
"alpha_fraction": 0.7887324094772339,
"alphanum_fraction": 0.7887324094772339,
"avg_line_length": 34.5,
"blob_id": "33caa6e3da4893567a25b631ac4bbc9c6e919054",
"content_id": "12f58efce34f7ab44cebdbd851693f9f64c1a7a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 71,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 2,
"path": "/README.md",
"repo_name": "RobinPrest/python_code",
"src_encoding": "UTF-8",
"text": "# Code samples storage\nThis is just some python codes i'm keeping safe\n"
},
{
"alpha_fraction": 0.5615384578704834,
"alphanum_fraction": 0.5666666626930237,
"avg_line_length": 25,
"blob_id": "b4c7a3fa6b7f3d841584394bd49fc6dd4c5f4228",
"content_id": "2a5f91d08408d37371d218d8e212c6622f56fe8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1176,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 45,
"path": "/Batch_Read_first_line.py",
"repo_name": "RobinPrest/python_code",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport sys\nfrom utils.file_manips import find_files, csv_first_line_export,path_separator\n\n# Choix du répertoire\nstart_dir = input(r'Répertoire à traiter (ex : X:\\rep\\:')\n# start_dir = os.getcwd()\n\nif start_dir:\n\n # Choix du filtre\n pattern = input(u'Extension ou filtre de fichier texte (ex : *.txt): ')\n\n # Reset du compteur de fichiers\n count = 0\n\n if pattern:\n print('Traitement du répertoire ' + start_dir)\n\n # Lancement du process\n try:\n for f in find_files(start_dir, pattern):\n\n # nom du fichier d'export\n out_log = path_separator(f)[0] + '\\export.log'\n\n print('Found text files:', f)\n\n csv_first_line_export(f, out_log)\n count = count + 1\n print(u'Fichier produit : ' + path_separator(f)[0] + '\\export.log')\n\n except Exception as e:\n print('Unexpected error:', sys.exc_info()[0])\n raise\n\n print(str(count) + u' fichiers lus')\n\n else:\n print(u\"Il manque le motif (ex : *.txt) - traitement annulé\")\n\nelse:\n \"Pas de fichier à traiter\"\n"
},
{
"alpha_fraction": 0.6055570244789124,
"alphanum_fraction": 0.609192430973053,
"avg_line_length": 29.81599998474121,
"blob_id": "c91d67020e4c56fe5e1ac26a3e0a1049127eeea9",
"content_id": "e0b826cf6666bff141cafc9fa7d5b32dedbec820",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3880,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 125,
"path": "/utils/file_manips.py",
"repo_name": "RobinPrest/python_code",
"src_encoding": "UTF-8",
"text": "# !/usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\nimport os\nimport fnmatch\nimport pandas as pd\n\n\ndef find_files(directory, pattern):\n \"\"\"\n Cherche des fichiers selon un motif avec joker dans un répertoire\n et ses sous-répertoires\n Source\n Un répertoire en entrée : X:\\REP\n Un pattern avec joker * : *.txt\n \"\"\"\n\n for root, dirs, files in os.walk(directory):\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename\n\n\ndef is_non_zero_file(file_path):\n \"\"\"\n Vérifie si un fichier est vide\n \"\"\"\n return True if os.path.isfile(file_path) and os.path.getsize(file_path) > 0 else False\n\n\ndef csv_first_line_export(file_csv, write_csv):\n \"\"\"\n lit la première ligne d'un fichier csv\n Un fichier csv en entrée\n Un fichier csv en sortie en mode append\n :type file_csv: basestring\n :type write_csv : basestring\n \"\"\"\n\n try:\n if is_non_zero_file(file_csv):\n file_name = os.path.basename(file_csv)\n df = pd.read_csv(file_csv)\n df.drop(df.columns[0], axis=1)\n df.insert(1, \"source\", file_name)\n df_header = df[:1]\n df_header.to_csv(write_csv, sep=';', mode='a', header=True)\n else:\n print('Fichier vide : ' + file_csv)\n\n except:\n raise\n\n\ndef path_separator(input_path):\n \"\"\"\n Extrait des morceaux d'un path\n Un string path en entrée\n En sortie un dictionnaire [arborescence, dossier, nom.extension,extension]\n Valeurs possibles : dir, rep, nom_ext, ext, nom\n\n \"\"\"\n\n # Extraction de l'arborescence et création du dictionnaire\n file_dir = str(os.path.dirname(input_path))\n\n # Extraction du repertoire au-dessous\n file_rep = str(os.path.basename(file_dir))\n\n # Extraction de nom.extension\n file_nom_ext = str(os.path.basename(input_path))\n\n # Extraction de l'extension\n list_ext = input_path.split('.')[1:]\n file_ext = \".\".join(list_ext)\n\n # Extraction du nom de fichier sans extension\n file_nom = str(file_nom_ext[:-len(file_ext) - 1])\n\n parties = {'dir': file_dir, 'rep': file_rep, 'nom_ext': file_nom_ext, 'ext': file_ext, 'nom': file_nom}\n return parties\n\n\ndef excel2csv(excel_file, export_csv_dir):\n \"\"\"\n Crée un ou des fichiers CSV à partir d'un fichier Excel\n vers un répertoir donné\n Source\n Un fichier excel en entrée\n Un fichier csv en sortie\n :type excel_file: basestring\n :type export_csv_dir : basestring\n \"\"\"\n\n # Lecture de tous les noms de feuilles\n xls = pd.ExcelFile(excel_file)\n\n # Boucle sur les noms de feuilles\n for sheet_name in xls.sheet_names:\n df = pd.read_excel(excel_file, sheet_name)\n export_name = path_separator(excel_file)['rep']+'_'+path_separator(excel_file)['nom']+'_'+sheet_name+'.csv'\n # -->Possibilité ici de créer des modules de controle, modif ou autre\n\n # export en csv vers le répertoire spécifié\n df.to_csv(export_csv_dir + '\\\\' + export_name, sep=';', encoding='utf-8', index=False)\n\n\ndef csv2csv(csv_file, export_csv_dir):\n \"\"\"\n Crée un ou des fichiers CSV à partir d'un fichier CSV\n vers un répertoir donné\n Source\n Un fichier excel en entrée\n Un fichier csv en sortie\n :type csv_file: basestring\n :type export_csv_dir : basestring\n \"\"\"\n\n df = pd.read_csv(csv_file)\n # -->Possibilité ici de créer des modules de controle, modif ou autre\n\n # export en csv vers le répertoire spécifié\n export_name = path_separator(csv_file)['rep'] + '_' + path_separator(csv_file)['nom'] + '.csv'\n df.to_csv(export_csv_dir + '\\\\' + export_name, sep=';', encoding='utf-8', index=False)"
},
{
"alpha_fraction": 0.625745952129364,
"alphanum_fraction": 0.6308610439300537,
"avg_line_length": 29.076923370361328,
"blob_id": "a0dce4e353ca80354640296e5063718cb6196b54",
"content_id": "057d495857425558af198a67e92a851f515a9d13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1182,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 39,
"path": "/Batch_Excel2CSV.py",
"repo_name": "RobinPrest/python_code",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# Attention, si la première ligne ne commence pas en haut à gauche,\n# seules les colonnes après sont prises en compte\n\n# Le code find_files va trouver des fichiers correspondant à un pattern de type *.xls*\n# Le code excel2csv va convertir chaque fichier excel et ses feuilles en csv dans un répertoire cible\n\nfrom utils.file_manips import find_files, excel2csv, path_separator\n\n# Input\nimport_dir = input(u'Répertoire source :') or r'\\\\geosrv\\Temporaire\\RP'\nexport_dir = input(u'Répertoire cible :') or r'\\\\geosrv\\Temporaire\\RP'\n\nif import_dir and export_dir:\n\n # Init compteur\n count_read = 0\n\n # Lancement du process\n try:\n print('Traitement en cours')\n\n for f in find_files(import_dir, '*.xls*'):\n try:\n f_repertoire = path_separator(f)['rep']\n excel2csv(f, export_dir)\n print(f + ' lu')\n count_read = count_read + 1\n except Exception as e:\n raise\n\n print(str(count_read) + u' fichiers lus')\n\n except Exception as e:\n raise\nelse:\n print(\"Il manque un répertoire - Traitement annulé\")\n"
}
] | 5 |
eleGAN23/Digits_classification_Nonlinear_SVM
|
https://github.com/eleGAN23/Digits_classification_Nonlinear_SVM
|
4f26144a612f85f07c26df90308fe88fc7ec666b
|
1ea68fe4fccd23e8ac0cc6c9ceb9630ff453d265
|
56455be8ab061cbc2649e929d696921c9d6e2787
|
refs/heads/master
| 2020-04-14T12:19:23.630184 | 2019-01-02T13:05:34 | 2019-01-02T13:05:34 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7461629509925842,
"alphanum_fraction": 0.763872504234314,
"avg_line_length": 64.15384674072266,
"blob_id": "455b35e5adc39886b4fa71f88a53fd2f36d1781e",
"content_id": "251f15c21bc593d067d71302ea6dca69b939c7ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 847,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 13,
"path": "/README.md",
"repo_name": "eleGAN23/Digits_classification_Nonlinear_SVM",
"src_encoding": "UTF-8",
"text": "# Digits classification with Non linear SVM in Python\n#### Eleonora Grassucci, Daniele Mocavini, Dario Stagnitto\n\n\n\n\nThe aim of this project is to classify the handwritten digits scanned from envelopes by the U.S. Postal Service by means the Non linear SVM algorithm.\nThe training dataset is from 0 to 9 but we have to classify just the 2 and 8 digit in Point 1, 2 and 3. Instead, in Point 4, we have to classify the 1, 2 and 8 digit.\n\nWe applied different algorithms to solve the problem. In particular, firstly we solved the problem by using the CVXOpt **optimizer** in Python (Point 1 and Point 2), then we implemented the analytical solution to find the MVP (Point 3).\nFinally, in Point 4 we implemented a **One versus All** approach to classify three digits together with the CVXOpt optimizer.\n\nTo go deeper in the algorithms and to look at the results, read Report.ipynb.\n"
},
{
"alpha_fraction": 0.5391294360160828,
"alphanum_fraction": 0.5546422600746155,
"avg_line_length": 29.617021560668945,
"blob_id": "89a22952bcbed651630851dd83905f31aa61d97e",
"content_id": "c1ce379f19b0a7d7b9d09332f5ff2526a3be2e3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8638,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 282,
"path": "/Point 2.py",
"repo_name": "eleGAN23/Digits_classification_Nonlinear_SVM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Library\n\nimport sklearn\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport cvxopt\nfrom cvxopt import matrix\nfrom cvxopt import solvers\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport math\nimport copy\n\nfrom sklearn import svm\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom matplotlib.colors import Normalize\n\n\n# In[2]:\n\n\n# Read files\n\ntrain2=pd.read_csv('train_2.csv', sep=',')\ntrain8=pd.read_csv('train_8.csv', sep=',')\ntest2=pd.read_csv('test_2.csv', sep=',')\ntest8=pd.read_csv('test_8.csv', sep=',')\n\n\n# In[3]:\n\n\n# Clean Data\n\nresult = train2.append(train8)\nresult.drop(\"257\", axis=1, inplace=True)\ny_train=np.asarray(result[\"0\"])\nx_train=result.drop(\"0\", axis=1)\nx_train=np.asarray(x_train)\ntest = test2.append(test8)\ny_test=np.asarray(test[\"0\"])\nx_test=test.drop(\"0\", axis=1)\nx_test=np.asarray(x_test)\n\n\n# In[4]:\n\n\n#Convert 2 in -1 and 8 in 1\n\nfor i in range(len(y_train)):\n if y_train[i]==2:\n y_train[i]=-1\n else:\n y_train[i]=1\n \nfor i in range(len(y_test)):\n if y_test[i]==2:\n y_test[i]=-1\n else:\n y_test[i]=1\n\n\n# In[5]:\n\n\n# Function for calculate accuracy of train and test set\n\ndef accuracy(alpha_k,x_train,y_train,x_test,gamma):\n sv = alpha_k > 1e-15\n ind = np.arange(len(alpha_k))[sv]\n alpha_sv= alpha_k[sv]\n X_train_sv = x_train[sv]\n y_train_sv = np.array(y_train)[sv]\n Y_train_sv = np.diag(y_train_sv)\n \n w=sum(alpha_sv*y_train_sv*gaussian_k(X_train_sv,X_train_sv, gamma))\n w=w.reshape(1,len(ind))\n y_train_sv=y_train_sv.reshape(len(ind),1)\n b=np.mean((1-(y_train_sv*w.T)*X_train_sv)/y_train_sv)\n \n # Preditct\n y_train_pred = np.sign(sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_train, gamma) for i in range(len(ind))) + b)[0]\n y_test_pred = np.sign(sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_test, gamma) for i in range(len(ind))) + b)[0]\n \n # Accuracy measures\n conf_mat_train = confusion_matrix(y_train,y_train_pred)\n accuracy_train=sklearn.metrics.accuracy_score(y_train, y_train_pred)\n conf_mat_test = confusion_matrix(y_test,y_test_pred)\n accuracy_test=sklearn.metrics.accuracy_score(y_test, y_test_pred)\n \n\n return accuracy_train, accuracy_test, conf_mat_train, conf_mat_test\n\n\n# In[6]:\n\n\ndef R_S(alpha_k, C, y, tollerance=0):\n alpha_k = np.array(alpha_k)\n y = np.array(y)\n \n L = alpha_k <= tollerance\n U = alpha_k >= C-tollerance\n F = np.where((alpha_k > tollerance) * (alpha_k < C - tollerance))\n L_meno = np.where(L * (y == - 1))\n L_pi = np.where(L * (y == 1))\n U_meno = np.where(U * (y == - 1))\n U_pi = np.where(U * (y == 1))\n R = np.concatenate((L_pi, U_meno, F), axis = None)\n S = np.concatenate((L_meno, U_pi, F), axis = None)\n return R, S\n\n\n# In[7]:\n\n\ndef gaussian_k(x, y, gamma):\n return sklearn.metrics.pairwise.rbf_kernel(x, y, gamma)\n\n\n# In[8]:\n\n\n# function returning the Hessian matrix\n\ndef Q_matrix(x, gamma, y):\n Y=np.diag(y)\n # gaussian kernel\n Ke = gaussian_k(x, x, gamma)\n Q = np.dot(np.dot(Y, Ke), Y)\n return Q\n\n\n# In[9]:\n\n\ndef Decomposition(x_train,y_train,x_test,y_test,q=2, maxiter=10000,C=0.861,gamma=0.01, epsilon=1e-5):\n solvers.options['show_progress'] = False\n solvers.options['abstol'] = 1e-12\n solvers.options['feastol'] = 1e-12 \n \n if q%2 == 0:\n qr = qs = q//2\n else:\n qr = q//2\n qs = qr + 1\n \n K= Q_matrix(x_train,gamma,y_train)\n \n alpha_k = np.array([0.0]*len(y_train)) #Initial 0 array alpha\n \n grad_k = np.array([-1.0]*len(y_train))\n \n start_time = time.time()\n k=0\n evaluations=0\n opt=False\n \n while opt != True:\n if k==maxiter:\n end_time = time.time()\n seco=end_time-start_time\n accuracy_train, accuracy_test,conf_mat_train,conf_mat_test = accuracy(alpha_k,x_train,y_train,x_test,gamma)\n print(\"Reached maximum number of iterations: %s after %s seconds\"%(k,(end_time - start_time)))\n final_obj = np.dot(np.dot(alpha_k.T,K),alpha_k)*0.5-np.sum(alpha_k)\n print(\"Classification Rate Training Set: \", accuracy_train)\n print(\"Missclassificated points: \", conf_mat_train[0][1]+conf_mat_train[1][0])\n print(\"Training Error: \",(1-accuracy_train))\n print(\"Classification Rate Test Set: %s\"%(accuracy_test))\n print(\"Missclassificated points: \", conf_mat_test[0][1]+conf_mat_test[1][0])\n print(\"Test Error: \",(1-accuracy_test))\n print(\"Time: %s seconds\"%(seco))\n print(\"Number of Iterations: \", k)\n print(\"Number of function evaluations: \", evaluations)\n print(\"Gamma value: %s\"%(gamma))\n print(\"C value: %s\"%(C))\n print(\"Initial value of the objective function of the dual problem: 0\" )\n print(\"Final value of the objective function of the dual problem: \", final_obj )\n print(\"q Values: \", q)\n print(\"Difference m and M: \", (m - M - epsilon))\n print()\n print(\"-------------------------------\")\n print()\n print(\"Train Confusion Matrix: \\n\", conf_mat_train)\n print()\n print(\"-------------------------------\")\n print()\n print(\"Test Confusion Matrix: \\n\", conf_mat_test)\n break\n \n grad_y = -np.dot(np.diag(y_train),grad_k)\n \n R,S=R_S(alpha_k,C,y_train,tollerance=1e-2)\n \n m = max(np.take(grad_y, R))\n M = min(np.take(grad_y, S))\n if m - M <= epsilon:\n end_time = time.time()\n seco=end_time-start_time\n accuracy_train, accuracy_test,conf_mat_train,conf_mat_test = accuracy(alpha_k,x_train,y_train,x_test,gamma)\n final_obj = np.dot(np.dot(alpha_k.T,K),alpha_k)*0.5-np.sum(alpha_k)\n print(\"Classification Rate Training Set: \", accuracy_train)\n print(\"Missclassificated points: \", conf_mat_train[0][1]+conf_mat_train[1][0])\n print(\"Training Error: \",(1-accuracy_train))\n print(\"Classification Rate Test Set: %s\"%(accuracy_test))\n print(\"Missclassificated points: \", conf_mat_test[0][1]+conf_mat_test[1][0])\n print(\"Test Error: \",(1-accuracy_test))\n print(\"Time: %s seconds\"%(seco))\n print(\"Number of Iterations: \", k)\n print(\"Number of function evaluations: \", evaluations)\n print(\"Gamma value: %s\"%(gamma))\n print(\"C value: %s\"%(C))\n print(\"Initial value of the objective function of the dual problem: 0\" )\n print(\"Final value of the objective function of the dual problem: \", final_obj )\n print(\"q Values: \", q)\n print(\"Difference m and M: \", (m - M - epsilon))\n print()\n print(\"-------------------------------\")\n print()\n print(\"Train Confusion Matrix: \\n\", conf_mat_train)\n print()\n print(\"-------------------------------\")\n print()\n print(\"Test Confusion Matrix: \\n\", conf_mat_test)\n\n break\n \n \n grad_y_ord = np.vstack((grad_y, np.array(range(len(y_train))))).T\n grad_y_R = grad_y_ord[R, :]\n index_R = (np.array(sorted(grad_y_R, key = lambda i: - i[0]))[:, 1].T)[:qr]\n \n grad_y_S = grad_y_ord[S, :]\n index_S = (np.array(sorted(grad_y_S, key = lambda i: i[0]))[:, 1].T)[:qs]\n \n \n W=(list(np.concatenate((index_R, index_S))))\n W=[int(x) for x in W]\n \n \n compl = list(set(range(len(y_train))) - (set(W)))\n tot = W + compl\n\n Q = K[:, tot][tot, :]\n\n alpha_k_n=alpha_k[tot]\n y_k = y_train[tot]\n P = matrix(Q[:q, :q])\n \n q_piccoletto = matrix(np.dot(alpha_k_n[q:], Q[q:,:q] )-1)\n G = matrix(np.concatenate((- np.diag(np.ones(q)), np.diag(np.ones(q))), axis = 0))\n h = matrix(np.concatenate((np.zeros(q), C * np.ones(q)), axis = 0))\n A = matrix((y_k[:q]).reshape(1, q))\n b = matrix(np.dot(- y_k[q:], alpha_k_n[q:]))\n \n sol = solvers.qp(P, q_piccoletto, G, h, A, b)\n alpha_star = np.ravel(sol['x']) \n \n ite=sol['iterations']\n evaluations=evaluations+ite\n alpha_k[W] = alpha_star\n grad_k = np.dot(K , alpha_k) - 1\n k+=1 \n\n\n# In[10]:\n\n\nDecomposition(x_train,y_train,x_test,y_test,q=10, maxiter=2000)\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.5208292007446289,
"alphanum_fraction": 0.5389803647994995,
"avg_line_length": 26.091398239135742,
"blob_id": "f8e5d52d59ec2e8745575750472dafa962f7fb25",
"content_id": "be883596d254daf3b43b5cf9706e00c293ea46db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10082,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 372,
"path": "/Point 3.py",
"repo_name": "eleGAN23/Digits_classification_Nonlinear_SVM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Library\n\nimport sklearn\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport cvxopt\nfrom cvxopt import matrix\nfrom cvxopt import solvers\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport math\nimport copy\n\nfrom sklearn import svm\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom matplotlib.colors import Normalize\n\n\n# In[2]:\n\n\n# Read files\n\ntrain2=pd.read_csv('train_2.csv', sep=',')\ntrain8=pd.read_csv('train_8.csv', sep=',')\ntest2=pd.read_csv('test_2.csv', sep=',')\ntest8=pd.read_csv('test_8.csv', sep=',')\n\n\n# In[3]:\n\n\n# Clean Data\n\nresult = train2.append(train8)\nresult.drop(\"257\", axis=1, inplace=True)\ny_train=np.asarray(result[\"0\"])\nx_train=result.drop(\"0\", axis=1)\nx_train=np.asarray(x_train)\ntest = test2.append(test8)\ny_test=np.asarray(test[\"0\"])\nx_test=test.drop(\"0\", axis=1)\nx_test=np.asarray(x_test)\n\n\n# In[4]:\n\n\n#Convert 2 in -1 and 8 in 1\n\nfor i in range(len(y_train)):\n if y_train[i]==2:\n y_train[i]=-1\n else:\n y_train[i]=1\n \nfor i in range(len(y_test)):\n if y_test[i]==2:\n y_test[i]=-1\n else:\n y_test[i]=1\n\n\n# In[5]:\n\n\n# Function for calculate accuracy of train and test set\n\ndef accuracy(alpha_k,x_train,y_train,x_test,gamma):\n sv = alpha_k > 1e-15\n ind = np.arange(len(alpha_k))[sv]\n alpha_sv= alpha_k[sv]\n X_train_sv = x_train[sv]\n y_train_sv = np.array(y_train)[sv]\n Y_train_sv = np.diag(y_train_sv)\n \n w=sum(alpha_sv*y_train_sv*gaussian_k(X_train_sv,X_train_sv, gamma))\n w=w.reshape(1,len(ind))\n y_train_sv=y_train_sv.reshape(len(ind),1)\n b=np.mean((1-(y_train_sv*w.T)*X_train_sv)/y_train_sv)\n \n # Preditct\n y_train_pred = np.sign(sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_train, gamma) for i in range(len(ind))) + b)[0]\n y_test_pred = np.sign(sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_test, gamma) for i in range(len(ind))) + b)[0]\n \n # Accuracy measures\n conf_mat_train = confusion_matrix(y_train,y_train_pred)\n accuracy_train=sklearn.metrics.accuracy_score(y_train, y_train_pred)\n conf_mat_test = confusion_matrix(y_test,y_test_pred)\n accuracy_test=sklearn.metrics.accuracy_score(y_test, y_test_pred)\n \n\n return accuracy_train, accuracy_test, conf_mat_train, conf_mat_test\n\n\n# In[6]:\n\n\n# function returning the gradient of f(alpha)\n\ndef Gradient(Q, alpha):\n return np.dot(Q, alpha) - [1]*len(alpha)\n\n\n# In[7]:\n\n\n# function returning the Hessian matrix\n\ndef Q_matrix(X, gamma, Y):\n # gaussian kernel\n K = sklearn.metrics.pairwise.rbf_kernel(X, X, gamma)\n Q = np.dot(np.dot(Y, K), Y)\n return Q\n\n\n# In[8]:\n\n\ndef gaussian_k(x, y, gamma):\n return sklearn.metrics.pairwise.rbf_kernel(x, y, gamma)\n\n\n# In[9]:\n\n\ndef R_alpha(alpha, y, C):\n alpha = np.array(alpha)\n y = np.array(y)\n \n r_1 = np.array([alpha==0]) & np.array([y==1])\n r_1=np.arange(len(alpha))[r_1[0]]\n r_2 = np.array([alpha==C]) & np.array([y==-1])\n r_2=np.arange(len(alpha))[r_2[0]]\n r_3 = np.array([alpha > 0]) & np.array([alpha < C])\n r_3=np.arange(len(alpha))[r_3[0]]\n \n condition_r=np.sort(np.concatenate((r_1, r_2, r_3)))\n return(condition_r)\n \ndef S_alpha(alpha, y, C):\n alpha = np.array(alpha)\n y = np.array(y)\n \n s_1 = np.array([alpha==0]) & np.array([y==-1])\n s_1=np.arange(len(alpha))[s_1[0]]\n s_2 = np.array([alpha==C]) & np.array([y==1])\n s_2=np.arange(len(alpha))[s_2[0]]\n s_3 = np.array([alpha > 0]) & np.array([alpha < C])\n s_3=np.arange(len(alpha))[s_3[0]]\n \n condition_s=np.sort(np.concatenate((s_1, s_2, s_3)))\n return(condition_s)\n \n\n\n# In[10]:\n\n\n# Use q=2 for MVP problem\n\ndef Decomposition_PSA(x_train, y_train, x_test,y_test, q=2, C=0.861, gamma=0.01, epsilon= 1e-5, maxiter=10000):\n \n # Take 1 point from r and 1 from s\n q1 = q2 = q//2\n \n # transform the y vectors into a diagonal matrix\n Y_train = np.diag(y_train)\n Y_test = np.diag(y_test)\n \n start_time = time.time()\n \n alpha_k = np.array([0.0]*len(y_train)) #Initial 0 array alpha\n k=0 #Counter\n opt=False\n #Create the Hessian Matrix\n QQ=Q_matrix(x_train,gamma,Y_train)\n\n \n while opt != True:\n # Gradient Matrix\n grad=Gradient(QQ,alpha_k)\n \n ws_r=R_alpha(alpha_k,y_train,C)\n seq_r =-grad[ws_r]/y_train[ws_r]\n i_r = np.argsort(seq_r)\n # Take index associated at the highest values\n indices_r = ws_r[i_r[-q1:]]\n \n \n ws_s=S_alpha(alpha_k,y_train,C)\n seq_s=-grad[ws_s]/y_train[ws_s]\n i_s = np.argsort(seq_s)\n # Take index associated at the lowest values\n indices_s = ws_s[i_s[:q2]]\n \n # Create working set\n W_k=np.concatenate((indices_r,indices_s))\n \n \n y_k = y_train[W_k]\n x_k = x_train[W_k]\n Y_k = np.diag(y_k)\n Q_k=Q_matrix(x_k,gamma,Y_k)\n alpha_k_n=alpha_k[W_k]\n dij=np.array([y_k[0],-y_k[1]])\n \n a1 = alpha_k_n[0]\n a2 = alpha_k_n[1]\n d1 = dij[0]\n d2 = dij[1]\n \n # Constraint\n if d1 > 0:\n if d2 > 0:\n t = min(C-a1, C-a2)\n else:\n t = min(C-a1, a2)\n else:\n if d2 > 0:\n t = min(a1, C-a2)\n else:\n t = min(a1, a2)\n \n # PSA\n if np.dot(grad[W_k].T,dij) == 0:\n t_star=0\n else:\n if np.dot(grad[W_k].T,dij) < 0:\n d_star=dij\n else:\n d_star=-dij\n\n if t == 0:\n t_star =0\n elif np.dot(np.dot(d_star.T,Q_k),d_star) == 0:\n t_star = t\n\n else:\n if np.dot(np.dot(d_star.T,Q_k),d_star) > 0:\n t_max = (np.dot(-grad[W_k].T,d_star))/(np.dot(np.dot(d_star.T,Q_k),d_star))\n t_star= min(t,t_max)\n \n # Find best alpha\n alpha_star=alpha_k[W_k]+np.dot(t_star,d_star)\n \n # Update alpha_k\n alpha_k[W_k]=alpha_star\n \n grad_n=Gradient(QQ,alpha_k)\n ws_r_n=R_alpha(alpha_k,y_train,C)\n m_alpha =max(-grad_n[ws_r_n]/y_train[ws_r_n])\n\n ws_s_n=S_alpha(alpha_k,y_train,C)\n M_alpha = min(-grad_n[ws_s_n]/y_train[ws_s_n])\n \n k += 1\n \n \n # Check first stopping criteria\n if m_alpha - M_alpha <= epsilon:\n opt = True\n end_time = time.time()\n seco=end_time-start_time\n accuracy_train, accuracy_test,conf_mat_train,conf_mat_test = accuracy(alpha_k,x_train,y_train,x_test,gamma)\n final_obj = np.dot(np.dot(alpha_k.T,QQ),alpha_k)*0.5-np.sum(alpha_k)\n print(\"Classification Rate Training Set: \", accuracy_train)\n print(\"Missclassificated points: \", conf_mat_train[0][1]+conf_mat_train[1][0])\n print(\"Training Error: \",(1-accuracy_train))\n print(\"Classification Rate Test Set: %s\"%(accuracy_test))\n print(\"Missclassificated points: \", conf_mat_test[0][1]+conf_mat_test[1][0])\n print(\"Test Error: \",(1-accuracy_test))\n print(\"Time: %s seconds\"%(seco))\n print(\"Number of Iterations: \", k)\n #print(\"Number of function evaluations: \", evaluations)\n print(\"Gamma value: %s\"%(gamma))\n print(\"C value: %s\"%(C))\n print(\"Initial value of the objective function of the dual problem: 0\" )\n print(\"Final value of the objective function of the dual problem: \", final_obj )\n print(\"q Values: \", q)\n print(\"Difference m and M: \", (m_alpha - M_alpha - epsilon))\n print()\n print(\"-------------------------------\")\n print()\n print(\"Train Confusion Matrix: \\n\", conf_mat_train)\n print()\n print(\"-------------------------------\")\n print()\n print(\"Test Confusion Matrix: \\n\", conf_mat_test)\n return\n \n \n # Check second stopping criteria\n if k == maxiter:\n end_time = time.time()\n seco=end_time-start_time\n accuracy_train, accuracy_test,conf_mat_train,conf_mat_test = accuracy(alpha_k,x_train,y_train,x_test,gamma)\n final_obj = np.dot(np.dot(alpha_k.T,QQ),alpha_k)*0.5-np.sum(alpha_k)\n print(\"Reached maximum number of iterations: %s after %s seconds\"%(k,(end_time - start_time)))\n print(\"Classification Rate Training Set: \", accuracy_train)\n print(\"Missclassificated points: \", conf_mat_train[0][1]+conf_mat_train[1][0])\n print(\"Training Error: \",(1-accuracy_train))\n print(\"Classification Rate Test Set: %s\"%(accuracy_test))\n print(\"Missclassificated points: \", conf_mat_test[0][1]+conf_mat_test[1][0])\n print(\"Test Error: \",(1-accuracy_test))\n print(\"Time: %s seconds\"%(seco))\n print(\"Number of Iterations: \", k)\n #print(\"Number of function evaluations: \", evaluations)\n print(\"Gamma value: %s\"%(gamma))\n print(\"C value: %s\"%(C))\n print(\"Initial value of the objective function of the dual problem: 0\" )\n print(\"Final value of the objective function of the dual problem: \", final_obj )\n print(\"q Values: \", q)\n print(\"Difference m and M: \", (m_alpha - M_alpha - epsilon))\n print()\n print(\"-------------------------------\")\n print()\n print(\"Train Confusion Matrix: \\n\", conf_mat_train)\n print()\n print(\"-------------------------------\")\n print()\n print(\"Test Confusion Matrix: \\n\", conf_mat_test)\n \n return\n\n\n# In[11]:\n\n\nDecomposition_PSA(x_train,y_train,x_test,y_test, maxiter=10000)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.6018789410591125,
"alphanum_fraction": 0.631449282169342,
"avg_line_length": 26.61276626586914,
"blob_id": "b6bdb50fb1809a83dd73aecc7e3bfaa73c8d7651",
"content_id": "b3d726fa825a4a877a984ae0a302b8b1c482d492",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6493,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 235,
"path": "/Point 4.py",
"repo_name": "eleGAN23/Digits_classification_Nonlinear_SVM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Library\n\nimport sklearn\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport cvxopt\nfrom cvxopt import matrix\nfrom cvxopt import solvers\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport math\nimport copy\n\nfrom sklearn import svm\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom matplotlib.colors import Normalize\n\n\n# In[2]:\n\n\ndef converter(train,test,n1):\n '''\n convert n1 in 1 and the other in -1\n '''\n for i in range(len(train)):\n if train[i] == n1:\n train[i] = 1\n \n else:\n train[i] = -1\n \n for i in range(len(test)):\n if test[i] == n1:\n test[i] = 1\n \n else:\n test[i] = -1\n \n return train, test\n\n\n# In[3]:\n\n\n# define the kernel used: Gaussian kernel\n\ndef gaussian_k(x, y, gamma):\n return sklearn.metrics.pairwise.rbf_kernel(x, y, gamma)\n\n\n# In[4]:\n\n\ndef process_g_2(C, gamma, x_train,y_train, x_test,y_test):\n \n start_time = time.time()\n\n Y_train = np.diag(y_train)\n Y_test = np.diag(y_test)\n \n solvers.options['show_progress'] = False\n K = gaussian_k(x_train, x_train, gamma)\n # define the parameters for cvxopt (quadratic programming solver)\n P = matrix(np.dot(np.dot(Y_train, K), Y_train))\n # q has to have 1 col (the package does the transpose)\n q = matrix([-1]*len(y_train), (len(y_train), 1), tc = 'd')\n\n # create the first constraint (- alpha <= 0)\n first_constr = np.diag([-1]*len(y_train))\n first_limit = np.array([0]*len(y_train))\n # create the second constraint (alpha <= C)\n second_constr = np.diag([1]*len(y_train))\n second_limit = np.array([C]*len(y_train))\n G = matrix(np.concatenate((first_constr, second_constr)), tc='d')\n h = matrix(np.concatenate((first_limit, second_limit)), tc='d')\n\n A = matrix(y_train, (1, len(y_train)), tc='d')\n b = matrix(0, tc='d')\n sol = solvers.qp(P=P, q=q, G=G, h=h, A=A, b=b)\n # Lagrange multipliers\n alpha = np.ravel(sol['x'])\n funct_eva=sol[\"iterations\"]\n \n # Support vectors have non zero lagrange multipliers\n sv = alpha > 1e-5\n \n ind = np.arange(len(alpha))[sv]\n alpha_sv= alpha[sv]\n X_train_sv = x_train[sv]\n y_train_sv = np.array(y_train)[sv]\n Y_train_sv = np.diag(y_train_sv)\n \n \n w = sum(alpha_sv*y_train_sv*gaussian_k(X_train_sv,X_train_sv, gamma))\n w=w.reshape(1,len(ind))\n y_train_sv=y_train_sv.reshape(len(ind),1)\n b=np.mean((1-(y_train_sv*w.T)*X_train_sv)/y_train_sv)\n \n end_time = time.time()\n seco=end_time - start_time\n \n # Predict\n y_train_pred = (sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_train, gamma) for i in range(len(ind))) + b)[0]\n\n y_test_pred = (sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_test, gamma) for i in range(len(ind))) + b)[0]\n\n \n \n return y_train_pred, y_test_pred,seco,funct_eva,gamma,C\n\n\n# In[5]:\n\n\n# Read files\n\ntrain1=pd.read_csv('train_1.csv', sep=',')\ntrain2=pd.read_csv('train_2.csv', sep=',')\ntrain8=pd.read_csv('train_8.csv', sep=',')\ntest1=pd.read_csv('test_1.csv', sep=',')\ntest2=pd.read_csv('test_2.csv', sep=',')\ntest8=pd.read_csv('test_8.csv', sep=',')\n\n\n# In[6]:\n\n\n# Clean Data\n\nresult=pd.concat([train1,train2,train8])\nresult.drop(\"257\", axis=1, inplace=True)\ny_train=np.asarray(result[\"0\"])\nx_train=result.drop(\"0\", axis=1)\nx_train=np.asarray(x_train)\n\ntest=pd.concat([test1,test2,test8])\ny_test=np.asarray(test[\"0\"])\nx_test=test.drop(\"0\", axis=1)\nx_test=np.asarray(x_test)\n\n\n# In[7]:\n\n\n# make a copy of initial dataset\n\ny_train_1=copy.deepcopy(y_train)\ny_test_1=copy.deepcopy(y_test)\ny_train_2=copy.deepcopy(y_train)\ny_test_2=copy.deepcopy(y_test)\ny_train_8=copy.deepcopy(y_train)\ny_test_8=copy.deepcopy(y_test)\n\n\n# In[8]:\n\n\ny_train_1, y_test_1 = converter(y_train_1,y_test_1,1)\nsolution_1_train, solution_1_test,seco_1,funct_eva_1,gamma_1,C_1 = process_g_2(0.7, 0.019,x_train,y_train_1,x_test,y_test_1)\ny_train_2, y_test_2 = converter(y_train_2,y_test_2,2)\nsolution_2_train, solution_2_test,seco_2,funct_eva_2,gamma_2,C_2 = process_g_2(1, 0.025,x_train,y_train_2,x_test,y_test_2)\ny_train_8, y_test_8 = converter(y_train_8,y_test_8,8)\nsolution_8_train, solution_8_test,seco_8,funct_eva_8,gamma_8,C_8 = process_g_2(0.8, 0.013,x_train,y_train_8,x_test,y_test_8)\n\n# Store all the predicted values in a list of lists\naa=list(zip((solution_1_train),(solution_2_train),(solution_8_train)))\nval=[i.index(max(i)) for i in aa]\npredicted=[]\nfor i in val:\n if i == 0:\n predicted.append(1)\n if i == 1:\n predicted.append(2)\n if i == 2:\n predicted.append(8)\nconf_mat_train = confusion_matrix(y_train,predicted)\naccuracy_train=sklearn.metrics.accuracy_score(y_train, predicted)\n\n# Find max for every prediction\n\naa=list(zip((solution_1_test),(solution_2_test),(solution_8_test)))\nval=[i.index(max(i)) for i in aa]\npredicted=[]\nfor i in val:\n if i == 0:\n predicted.append(1)\n if i == 1:\n predicted.append(2)\n if i == 2:\n predicted.append(8)\n\nconf_mat_test = confusion_matrix(y_test,predicted)\naccuracy_test=sklearn.metrics.accuracy_score(y_test, predicted)\n\n\n# In[9]:\n\n\nprint(\"Strategy: One Against All (OAA)\")\n\nprint(\"Classification Rate Training Set: %s\"%(accuracy_train))\nprint(\"Missclassificated points: \", conf_mat_train[0][1]+conf_mat_train[0][2]+conf_mat_train[1][0]+conf_mat_train[1][2]+conf_mat_train[2][0]+conf_mat_train[2][1])\nprint(\"Training Error: \",(1-accuracy_train))\nprint(\"Classification Rate Test Set: %s\"%(accuracy_test))\nprint(\"Missclassificated points: \", conf_mat_test[0][1]+conf_mat_test[0][2]+conf_mat_test[1][0]+conf_mat_test[1][2]+conf_mat_test[2][0]+conf_mat_test[2][1])\nprint(\"Test Error: \",(1-accuracy_test))\nprint(\"Time: %s seconds\"%(seco_1 + seco_2 + seco_8))\nprint(\"Number of function evaluations: \", funct_eva_1+funct_eva_2+funct_eva_8)\nprint(\"Gamma value for 1 vs All: %s\"%(gamma_1))\nprint(\"Gamma value for 2 vs All: %s\"%(gamma_2))\nprint(\"Gamma value for 8 vs All: %s\"%(gamma_8))\nprint(\"C value for 1 vs All: %s\"%(C_1))\nprint(\"C value for 2 vs All: %s\"%(C_2))\nprint(\"C value for 8 vs All: %s\"%(C_8))\nprint()\nprint(\"-------------------------------\")\nprint()\nprint(\"Train Confusion Matrix: \\n\", conf_mat_train)\nprint()\nprint(\"-------------------------------\")\nprint()\nprint(\"Test Confusion Matrix: \\n\", conf_mat_test)\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.6223238706588745,
"alphanum_fraction": 0.6381209492683411,
"avg_line_length": 25.267759323120117,
"blob_id": "41ca5eeac055d053f6c55c3d3dbdbc3b7ea8d87a",
"content_id": "13e71abe9f7399304a63d2d122966d9e2d192415",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4811,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 183,
"path": "/Point 1.py",
"repo_name": "eleGAN23/Digits_classification_Nonlinear_SVM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Library\n\nimport sklearn\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport cvxopt\nfrom cvxopt import matrix\nfrom cvxopt import solvers\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport math\nimport copy\nfrom sklearn import svm\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\n\n\n# In[2]:\n\n\n# Read files\n\ntrain2=pd.read_csv('train_2.csv', sep=',')\ntrain8=pd.read_csv('train_8.csv', sep=',')\ntest2=pd.read_csv('test_2.csv', sep=',')\ntest8=pd.read_csv('test_8.csv', sep=',')\n\n\n# In[3]:\n\n\n# Clean Data\n\nresult = train2.append(train8)\nresult.drop(\"257\", axis=1, inplace=True)\ny_train=np.asarray(result[\"0\"])\nx_train=result.drop(\"0\", axis=1)\nx_train=np.asarray(x_train)\ntest = test2.append(test8)\ny_test=np.asarray(test[\"0\"])\nx_test=test.drop(\"0\", axis=1)\nx_test=np.asarray(x_test)\n\n\n# In[4]:\n\n\n#Convert 2 in -1 and 8 in 1\n\nfor i in range(len(y_train)):\n if y_train[i]==2:\n y_train[i]=-1\n else:\n y_train[i]=1\n \nfor i in range(len(y_test)):\n if y_test[i]==2:\n y_test[i]=-1\n else:\n y_test[i]=1\n\n\n# In[5]:\n\n\n# define the kernel used: Gaussian kernel\n\ndef gaussian_k(x, y, gamma):\n return sklearn.metrics.pairwise.rbf_kernel(x, y, gamma)\n\n\n# In[6]:\n\n\ndef process_g(x_train,y_train,x_test, y_test,C=0.861, gamma=0.01):\n solvers.options['show_progress'] = False\n \n start_time = time.time()\n # transform the y vectors into a diagonal matrix\n Y_train = np.diag(y_train)\n Y_test = np.diag(y_test)\n \n K = gaussian_k(x_train, x_train, gamma)\n # define the parameters for cvxopt (quadratic programming solver)\n P = matrix(np.dot(np.dot(Y_train, K), Y_train))\n # q has to have 1 col (the package does the transpose)\n q = matrix([-1]*len(y_train), (len(y_train), 1), tc = 'd')\n\n # create the first constraint (- alpha <= 0)\n first_constr = np.diag([-1]*len(y_train))\n first_limit = np.array([0]*len(y_train))\n # create the second constraint (alpha <= C)\n second_constr = np.diag([1]*len(y_train))\n second_limit = np.array([C]*len(y_train))\n G = matrix(np.concatenate((first_constr, second_constr)), tc='d')\n h = matrix(np.concatenate((first_limit, second_limit)), tc='d')\n\n A = matrix(y_train, (1, len(y_train)), tc='d')\n b = matrix(0, tc='d')\n \n sol = solvers.qp(P=P, q=q, G=G, h=h, A=A, b=b)\n #print(sol)\n \n # Lagrange multipliers\n alpha = np.ravel(sol['x'])\n funct_eva=sol[\"iterations\"]\n final_obj=sol['primal objective']\n \n # Support vectors have non zero lagrange multipliers\n sv = alpha > 1e-5\n \n ind = np.arange(len(alpha))[sv]\n alpha_sv= alpha[sv]\n X_train_sv = x_train[sv]\n y_train_sv = np.array(y_train)[sv]\n Y_train_sv = np.diag(y_train_sv)\n \n \n w = sum(alpha_sv*y_train_sv*gaussian_k(X_train_sv,X_train_sv, gamma))\n w=w.reshape(1,len(ind))\n y_train_sv=y_train_sv.reshape(len(ind),1)\n b=np.mean((1-(y_train_sv*w.T)*X_train_sv)/y_train_sv)\n \n end_time = time.time()\n seco=end_time - start_time\n \n # Predict\n y_train_pred = np.sign(sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_train, gamma) for i in range(len(ind))) + b)[0]\n y_test_pred = np.sign(sum(alpha_sv[i]*y_train_sv[i]*gaussian_k([X_train_sv[i]], x_test, gamma) for i in range(len(ind))) + b)[0]\n \n return C, gamma, funct_eva, final_obj, y_train_pred, y_test_pred, seco\n\n\n# In[7]:\n\n\nC, gamma, funct_eva, final_obj, y_train_pred, y_test_pred, seco = process_g(x_train,y_train,x_test,y_test)\n\n\n# In[8]:\n\n\n# Accuracy measures\nconf_mat_train = confusion_matrix(y_train,y_train_pred)\naccuracy_train=sklearn.metrics.accuracy_score(y_train, y_train_pred)\nconf_mat_test = confusion_matrix(y_test,y_test_pred)\naccuracy_test=sklearn.metrics.accuracy_score(y_test, y_test_pred)\n\n\n# In[9]:\n\n\nprint(\"Classification Rate Training Set: %s\"%(accuracy_train))\nprint(\"Missclassificated points: \", conf_mat_train[0][1]+conf_mat_train[1][0])\nprint(\"Training Error: \",(1-accuracy_train))\nprint(\"Classification Rate Test Set: %s\"%(accuracy_test))\nprint(\"Missclassificated points: \", conf_mat_test[0][1]+conf_mat_test[1][0])\nprint(\"Test Error: \",(1-accuracy_test))\nprint(\"Time: %s seconds\"%(seco))\nprint(\"Number of function evaluations: \", funct_eva)\nprint(\"Gamma value: %s\"%(gamma))\nprint(\"C value: %s\"%(C))\nprint(\"Initial value of the objective function of the dual problem: 0\" )\nprint(\"Final value of the objective function of the dual problem: \", final_obj )\nprint()\nprint(\"-------------------------------\")\nprint()\nprint(\"Train Confusion Matrix: \\n\", conf_mat_train)\nprint()\nprint(\"-------------------------------\")\nprint()\nprint(\"Test Confusion Matrix: \\n\", conf_mat_test)\n\n\n# In[ ]:\n\n\n\n\n"
}
] | 5 |
manasibhagwat21/E-LearningWebsite
|
https://github.com/manasibhagwat21/E-LearningWebsite
|
da402a6d5bad44071ed75ca5fe53efba99b34f5d
|
3e4cbae9a016403b3317710b88f3bc179ac08ed2
|
25b943eeb8a46f6f4f3eafb686b60ab6b1f59f3e
|
refs/heads/master
| 2023-06-22T18:31:13.514829 | 2021-07-13T13:30:46 | 2021-07-13T13:30:46 | 363,169,140 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6585366129875183,
"alphanum_fraction": 0.6768292784690857,
"avg_line_length": 26.33333396911621,
"blob_id": "894409b36f52772e8ef7fdb543278ab7c9b68535",
"content_id": "99787e5aaa3c663831a871ce8745c64b5f58ac3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 6,
"path": "/static/dom.js",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "function showCourse(){\n let main=document.querySelector('.main');\n let h1=main.querySelector('h1');\n\tvar name={{Material.topic_name}};\n h1.textContent=;\n}\n"
},
{
"alpha_fraction": 0.4806629717350006,
"alphanum_fraction": 0.5662983655929565,
"avg_line_length": 19.11111068725586,
"blob_id": "7ff0b142342ebe00e6411b56df3ddea65de941f3",
"content_id": "669bbb3cb215653f21b9030ddb6468ac3d308583",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/Home/migrations/0003_auto_20210327_1653.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-03-27 11:23\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0002_auto_20210327_1620'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='student',\n old_name='lastname',\n new_name='email',\n ),\n ]\n"
},
{
"alpha_fraction": 0.7957446575164795,
"alphanum_fraction": 0.7957446575164795,
"avg_line_length": 38.08333206176758,
"blob_id": "700afec64f764b0e2daf258e10f04bd8f682e355",
"content_id": "627c28d69bcec82e97639e55d1900776223dbc53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 369,
"num_lines": 12,
"path": "/README.md",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# E-LearningWebsite\n\n# Problem Statement\nThe main objective of the E-Learning Website is to help the students get over the traditional methods of learning and make them accustomed to the internet where the notes and video lectures for their respective subjects are easily available. The implementation of this project helps both the students and the teachers. This system has two modules – Student and Faculty.\n\n# Tech Stack\nFrontend -\n- HTML\n- CSS\n\nBackend -\n- Django\n\n"
},
{
"alpha_fraction": 0.5253333449363708,
"alphanum_fraction": 0.5759999752044678,
"avg_line_length": 19.83333396911621,
"blob_id": "a865b4f8d7d26842d9e76a25bfd262300553838f",
"content_id": "a05e4b132c696d2578a7ac3e5f2ffdab7907f19b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/Home/migrations/0030_auto_20210514_1027.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-05-14 04:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0029_course_upload_slug'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='course_upload',\n name='slug',\n field=models.SlugField(),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5304054021835327,
"alphanum_fraction": 0.5827702879905701,
"avg_line_length": 24.7391300201416,
"blob_id": "997cf1157ff1f89922618479b29a3e3e9d6a5d2e",
"content_id": "c660d8b079a9214191a9d1baa6d6b8bcd3c81eca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 592,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 23,
"path": "/Home/migrations/0024_auto_20210422_0009.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-04-21 18:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0023_auto_20210422_0007'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='upload_files',\n name='notes_file',\n field=models.FileField(upload_to='Material/Javascript'),\n ),\n migrations.AlterField(\n model_name='upload_files',\n name='video_file',\n field=models.FileField(upload_to='Material/Javascript'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.51408451795578,
"alphanum_fraction": 0.5809859037399292,
"avg_line_length": 16.75,
"blob_id": "803580f30ef0f507ca89137257689403fa2d61aa",
"content_id": "2dd1ecffa9441ca15d5f2405b1d68f7f246bcd3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 16,
"path": "/Home/migrations/0014_delete_post.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-04-21 15:57\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0013_upload_files'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Post',\n ),\n ]\n"
},
{
"alpha_fraction": 0.6774193644523621,
"alphanum_fraction": 0.7006751894950867,
"avg_line_length": 32.29999923706055,
"blob_id": "3683db4a0e515b6abe5f0338540203e9d5b5d818",
"content_id": "f4ccfb6e21eaaceb00b15d933ed4fc9eae7b5262",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1333,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 40,
"path": "/Home/models.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.db.models import When\n\n\n# Create your models here.\n# class Student(models.Model):\n# username = models.CharField(max_length=300, unique=True)\n# password = models.TextField(default='abc123')\n# cpassword = models.TextField(default='abc123')\n# email = models.TextField()\n# firstname = models.TextField(default='asc')\n# lastname = models.TextField(default='asc')\n\nclass Contact(models.Model):\n msgid = models.AutoField(primary_key=True)\n name = models.CharField(max_length=300, default='')\n email = models.CharField(max_length=300, default='')\n phone = models.CharField(max_length=300, default='')\n desc = models.CharField(max_length=300, default='')\n\n\nclass faculty(models.Model):\n username = models.CharField(max_length=50)\n password = models.CharField(max_length=50)\n\n def __str__(self):\n return self.username\n\n\nclass Upload_Files(models.Model):\n topic_name = models.CharField(max_length=200)\n notes_file = models.FileField(upload_to=\"Material/\")\n\n\nclass course_upload(models.Model):\n topic_name = models.CharField(max_length=200)\n link=models.SlugField()\n desc = models.CharField(max_length=500)\n notes_file = models.FileField(upload_to=\"Material/\")\n video_link = models.CharField(max_length=800,default='')\n\n"
},
{
"alpha_fraction": 0.5035545229911804,
"alphanum_fraction": 0.5296208262443542,
"avg_line_length": 24.57575798034668,
"blob_id": "0966eaceb20696e0050a14495ec4657edc5bdfcb",
"content_id": "d3f599e23955f201df218f182898a7f66624e9df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 844,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 33,
"path": "/Home/migrations/0002_auto_20210327_1620.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-03-27 10:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='student',\n name='cpassword',\n field=models.TextField(default='abc123'),\n ),\n migrations.AddField(\n model_name='student',\n name='password',\n field=models.TextField(default='abc123'),\n ),\n migrations.AddField(\n model_name='student',\n name='phone',\n field=models.TextField(default='123'),\n ),\n migrations.AddField(\n model_name='student',\n name='username',\n field=models.TextField(default=''),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5625,
"avg_line_length": 22.65217399597168,
"blob_id": "fe280afa40eb2ba4951bc8ea31c3c8507329cc9a",
"content_id": "627caf53162e1ee3efb3bc547ef115d987b023db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 544,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 23,
"path": "/Home/migrations/0004_auto_20210327_1657.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-03-27 11:27\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0003_auto_20210327_1653'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='student',\n name='firstname',\n field=models.TextField(),\n ),\n migrations.AlterField(\n model_name='student',\n name='username',\n field=models.CharField(max_length=300, unique=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5012787580490112,
"alphanum_fraction": 0.5805626511573792,
"avg_line_length": 20.72222137451172,
"blob_id": "444c282f4308c630572b6eb3413f31d17aa3d2ba",
"content_id": "9b274c51c0ef4ab98ca8448ca130bdf3d8adf7cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 18,
"path": "/Home/migrations/0012_auto_20210421_2109.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-04-21 15:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0011_auto_20210420_2305'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='file_field',\n field=models.FileField(upload_to='static/'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5038759708404541,
"alphanum_fraction": 0.5839793086051941,
"avg_line_length": 20.5,
"blob_id": "cc9b86fd97ec56a6abd9021fa6d2d9f0974e081c",
"content_id": "ab1ace78676616b6e406cfafbb63478fa214cdc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 18,
"path": "/Home/migrations/0005_auto_20210327_1720.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-03-27 11:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0004_auto_20210327_1657'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='student',\n name='firstname',\n field=models.TextField(default='asc'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.678219199180603,
"alphanum_fraction": 0.7055508494377136,
"avg_line_length": 29.860870361328125,
"blob_id": "939b2da8fc1a28380658d9d84036327058934f9f",
"content_id": "6baa70530aed04cb896877e17193a0d8abd6fa86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3549,
"license_type": "no_license",
"max_line_length": 548,
"num_lines": 115,
"path": "/templates/c5.html",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n\n{% block body %}\n\n<head>\n\t<title>JavaScript</title>\n\t<meta charset=\"utf-8\">\n\t<link rel=\"stylesheet\" href=\"https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css\" integrity=\"sha384-JcKb8q3iqJ61gNV9KGb8thSsNjpSL0n8PARn9HuZOnIxN0hoP+VmmDGMN5t9UJ0Z\" crossorigin=\"anonymous\">\n\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\t<link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css\">\n</head>\n<style>\nBody{text-align:justify;}\n\t#courseinfo{\n\t\tmargin-top:30px;\n\t\tborder-bottom: 2px solid #ddd;\n\t\tpadding-bottom:20px;\n\t}\n\t#courseinfo h2 {\n\ttext-transform: capitalize;\n\tfont-size:30px;\n\t}\n\t.video-container {\n overflow: hidden;\n position: relative;\n width:100%;\n\t}\n\t.video-container::after {\n\t\tpadding-top: 50%;\n\t\tdisplay: block;\n\t\tcontent: '';\n\t}\n\t.video-container iframe {\n\t\tposition: absolute;\n\t\ttop: 0;\n\t\tleft: 0;\n\t\twidth: 100%;\n\t\theight: 100%;\n\t}\n\n #showcase {\n\tbackground:url('../img/sc.png') no-repeat ;\n\tbackground-color:#05386B;\n\tbackground-position: center;\n \tbackground-size: cover;\n \tmin-height:400px;\n\tpadding-top:50px;\n\tborder-bottom:;\n\t}\n\t#showcase h2{\n\t\tmargin-top:100px;\n\t\ttext-align:center;\n\t\tfont-weight: bold;\n\t\tfont-size:50px;\n\t\tcolor:#fff;\n\t}\n\t#showcase p{\n\t\ttext-align:center;\n\t\tcolor:#ffffff;\n\t\tfont-size:20px;\n\t}\n\t#sideC li{\n\t\tpadding:15px;\n\t\tfont-size:20px;\n\t}\n\t#sideC{\n\t\tborder-left: 2px solid #ddd;\n\t}\n\t#back button{\n\t\tmargin:10px;\n\t\tpadding:10px;\n\t\twidth:100%;\n\t\tbackground:#263238;\n\t\tcolor:#fff;\n\t}\n</style>\n<body>\n\n<div id='showcase'>\n <h2>JavaScript</h2>\n <p>Learn the essentials of the JavaScript programming language to advance your skills in the lucrative field of web development.</p>\n</div>\n\n<div class='container'>\n\n\t<div class=\"row\" id=\"courseinfo\">\n\n\t\t<div class='col-md-9 col-sm-12'>\n\t\t\t<h2>About this course</h2>\n\t\t\t<p>JavaScript is an object-oriented programming language employed by most websites along with HTML and CSS to create robust, dynamic and interactive user experiences. The JavaScript programming language was introduced in 1995 and has since become one of the most popular with support by all major web browsers. JavaScript programs are used both client-side and server-side to add functionality to web pages. A 2016 Stack Overflow developer survey listed JavaScript as the most popular front end developer and back end development technology.</p>\n\t\t<p>Take an introductory level course at edX to learn Javascript for absolute beginners and learn about syntax, conditionals, and its web application. The World Wide Web Consortium (W3C)'s JavaScript Introduction teaches you how to add JavaScript code in your Web site/Web app, debug it, make interactive Web sites through the DOM API, change the CSS styles of HTML5 elements from JavaScript, deal with HTML5 forms, and much more. </p>\n\t\t</div>\n\n\t\t<div class='col-md-3 col-sm-12' id='sideC'>\n\t\t\t<ul>\n\t\t\t\t<li>Shareable Certificate</li>\n\t\t\t\t<li>100 % Online</li>\n\t\t\t\t<LI>English</LI>\n\t\t\t\t<li>Beginner level</li>\n\t\t\t</ul>\n\t\t</div>\n\n\t</div>\n\t<div class=\"video-container\">\n\t\t<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/c-I5S_zTwAc\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen></iframe>\n\t</div>\n\n <br>\n\n\n\t<div id='back'><a href=\"resources.html\"><button type=\"submit\" class=\"btn\" >Resources</button></a></div> <br>\n\n\t<div id='back'><a href=\"catalog\"><button type=\"submit\" class=\"btn\" >Back to Courses</button></a></div>\n</div>\n{% endblock body %}\n"
},
{
"alpha_fraction": 0.4879120886325836,
"alphanum_fraction": 0.5296703577041626,
"avg_line_length": 18.782608032226562,
"blob_id": "ffccce22f7d24e9abe79fc4b832c31af10770527",
"content_id": "f382e028ae2acbaf9b01ee009f9b430ed1ea18d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 23,
"path": "/Home/migrations/0011_auto_20210420_2305.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-04-20 17:35\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0010_profile'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='profile',\n name='user',\n ),\n migrations.DeleteModel(\n name='Student',\n ),\n migrations.DeleteModel(\n name='Profile',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5037037134170532,
"alphanum_fraction": 0.5876542925834656,
"avg_line_length": 21.5,
"blob_id": "f7a672093c961d82a66af7f1445e41becf7a736c",
"content_id": "301bac8cc89fb9b8bf5dcf6ce371c3f521a4bbbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 18,
"path": "/Home/migrations/0028_course_upload_video_link.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-04-30 06:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0027_auto_20210426_0121'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course_upload',\n name='video_link',\n field=models.CharField(default='', max_length=800),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4793388545513153,
"alphanum_fraction": 0.5647382736206055,
"avg_line_length": 19.16666603088379,
"blob_id": "36e9026c6d06f370164d56bccdac69d78c77a200",
"content_id": "4c4940ce81e00164e3ebf6017e06401de742a3e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 363,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/Home/migrations/0031_auto_20210518_1142.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-05-18 06:12\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0030_auto_20210514_1027'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='course_upload',\n old_name='slug',\n new_name='link',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5068492889404297,
"alphanum_fraction": 0.5631659030914307,
"avg_line_length": 28.863636016845703,
"blob_id": "fdc59c33bd466ce7bfa3dd1dcaebf49a4603c50e",
"content_id": "0e343357b8e872674004f7cf3d25c9075dce238d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 657,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 22,
"path": "/Home/migrations/0026_upload_course.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-04-25 19:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0025_auto_20210422_0103'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='upload_course',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('topic_name', models.CharField(max_length=200)),\n ('desc', models.CharField(max_length=500)),\n ('notes_file', models.FileField(upload_to='Material/')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.5005675554275513,
"alphanum_fraction": 0.5357547998428345,
"avg_line_length": 25.696969985961914,
"blob_id": "42672216a6ed45065b48e926f79b89e9eb7833a7",
"content_id": "ea099970aa11a936ce8057d01e8e0b59ad650702",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 33,
"path": "/Home/migrations/0007_auto_20210420_0942.py",
"repo_name": "manasibhagwat21/E-LearningWebsite",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-04-20 04:12\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Home', '0006_contact'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='contact',\n name='desc',\n field=models.CharField(default='', max_length=300),\n ),\n migrations.AlterField(\n model_name='contact',\n name='email',\n field=models.CharField(default='', max_length=300),\n ),\n migrations.AlterField(\n model_name='contact',\n name='name',\n field=models.CharField(default='', max_length=300),\n ),\n migrations.AlterField(\n model_name='contact',\n name='phone',\n field=models.CharField(default='', max_length=300),\n ),\n ]\n"
}
] | 17 |
sjyoon/30daysofcode
|
https://github.com/sjyoon/30daysofcode
|
e59ef974db33cfb12551f602e95f9a51d28830f2
|
5e5c960cea51b47103e1bd78156dbb37c688c9da
|
c6b5c991f52ba5731d7a15aa7dc50f1e9c43ff71
|
refs/heads/master
| 2018-01-04T02:01:26.205015 | 2016-11-21T18:20:08 | 2016-11-21T18:20:08 | 71,192,287 | 0 | 0 | null | 2016-10-18T00:19:18 | 2016-10-17T21:03:37 | 2016-10-17T19:02:01 | null |
[
{
"alpha_fraction": 0.5196850299835205,
"alphanum_fraction": 0.5196850299835205,
"avg_line_length": 14.875,
"blob_id": "8f36384552d608b21fdb7afb6eb704bb771c8f01",
"content_id": "6085e3826d0e7cd14fd13e61bbcbae05f804a809",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 16,
"path": "/cpp/day1.cpp",
"repo_name": "sjyoon/30daysofcode",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <string>\nusing namespace std;\n\nint main() {\n\n int i; double d; string s;\n cin >> i >> d;\n cout << \"Enter string\" << endl;\n getline(cin, s);\n\n cout << i << endl;\n cout << d << endl;\n cout << s << endl;\n\n}\n"
},
{
"alpha_fraction": 0.7200000286102295,
"alphanum_fraction": 0.7200000286102295,
"avg_line_length": 36.5,
"blob_id": "f4abc2d54fd38268569b74024dae1e200c05ef1a",
"content_id": "831065f4cb1e0fbc40935de4181288c31daea99e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 2,
"path": "/python/day0.py",
"repo_name": "sjyoon/30daysofcode",
"src_encoding": "UTF-8",
"text": "inputString = input(\"Enter String: \")\nprint(\"Hello, World\\n\", inputString)\n"
},
{
"alpha_fraction": 0.6926316022872925,
"alphanum_fraction": 0.703157901763916,
"avg_line_length": 15.379310607910156,
"blob_id": "8265caafc9860f743b9589d221a62785d6c8320f",
"content_id": "2565c9727c539a9d46b732fb0cf5393325f30d5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 475,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 29,
"path": "/cpp/day0.cpp",
"repo_name": "sjyoon/30daysofcode",
"src_encoding": "UTF-8",
"text": "#include <iostream>\nusing namespace std;\n\n/*\nInput Format\n\nA single line of text denoting (the variable whose contents must be printed).\n\nOutput Format\n\nPrint Hello, World. on the first line, and the contents of on the second line.\n\nSample Input\n\nWelcome to 30 Days of Code!\nSample Output\n\nHello, World.\nWelcome to 30 Days of Code!\n*/\n\nint main()\n{\n string inputString;\n cin >> inputString;\n cout << \"Hello, World!\" << endl;\n cout << inputString << endl;\n return 0;\n}\n"
}
] | 3 |
wfarah/snake
|
https://github.com/wfarah/snake
|
3587e184fb9c5ccbae6112241286c8345db151b0
|
4bd4589c0e0f3b07a18b7216b3bc4c49dc89069f
|
a5b05beb0467bcd5aaf59920cdd25f11413a17a2
|
refs/heads/master
| 2021-04-26T23:42:11.738090 | 2018-03-05T00:06:10 | 2018-03-05T00:06:10 | 123,840,988 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5268799066543579,
"alphanum_fraction": 0.5369173288345337,
"avg_line_length": 30.940217971801758,
"blob_id": "695883e59e310efb8a9e6debb664a07682ebf589",
"content_id": "8f71b7c24c095c490bd7c00fa921ae9c4466bc1c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5878,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 184,
"path": "/snake/game/game.py",
"repo_name": "wfarah/snake",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 21:13:03 2018\n\n@author: wfarah\n\"\"\"\n\nimport numpy as np\nfrom utils import mapLetterToPosition, getOppositeDir,numToLetter\n\nEMPTY = 0\nEDGES = -1\nSNAKE_HEAD = 1\nSNAKE_BODY = -1\nAPPLE = 2\n\n\nclass GameLostException(Exception):\n pass\n\n\nclass GameWonException(Exception):\n pass\n\n\nclass Snake(object):\n def __init__(self,dimension=50, start_size=5, start_position=None,\n start_direction=None, godmode=False, survivalmode=False):\n # if start_direction not in [\"u\",\"d\",\"l\",\"r\"]:\n # raise ValueError(\"Start dir can only be 'u', 'd', 'l' or 'r'\")\n self.dim = dimension\n self.start_size = start_size - 1 # without head\n if not start_position:\n start_position = int(self.dim/2.),int(self.dim/2.)\n self.position = tuple(start_position)\n # self.direction = start_direction\n \n self.godmode = godmode\n self.survivalmode = survivalmode\n \n self.resetSnake(start_direction)\n \n def resetSnake(self, start_direction=None):\n if not start_direction:\n self.direction = numToLetter(np.random.randint(4))\n else:\n self.direction = start_direction\n \n self._initialiseBoard()\n self._initialiseSnake()\n if not self.survivalmode:\n self._spawnApple()\n else:\n self.applePosition = None\n self._eatenCoords = []\n self.napples = 0\n self.iteration = 0\n \n \n def _initialiseBoard(self):\n self._emptyBoard = np.zeros((self.dim+2,self.dim+2)) + EMPTY\n self._emptyBoard[:,0] = EDGES\n self._emptyBoard[0] = EDGES\n self._emptyBoard[:,-1] = EDGES\n self._emptyBoard[-1] = EDGES\n \n def _initialiseSnake(self):\n self._snakeCoords = {}\n for i in range(self.start_size):\n self._snakeCoords[str(i)] = \\\n mapLetterToPosition(self.position,\n getOppositeDir(self.direction),\n i+1)\n \n \n def returnBoard(self, trimmed=False):\n #Return a representation of the current board\n board = self._emptyBoard.copy()\n board[self.position] = SNAKE_HEAD\n for coord in self._snakeCoords.itervalues():\n board[coord] = SNAKE_BODY\n if not self.survivalmode:\n board[self.applePosition] = APPLE\n if trimmed:\n board = np.delete(board,[0,self.dim+1],0)\n board = np.delete(board,[0,self.dim+1],1)\n return board\n\n def move(self, move=None):\n if not move:\n pass\n elif move.lower() not in [\"u\",\"d\",\"l\",\"r\"]:\n raise ValueError(\"Move can only be 'u', 'd', 'l' or 'r'\")\n else:\n if move == getOppositeDir(self.direction):\n pass\n else:\n self.direction = move\n \n old_position = self.position # For godmode\n old_snakeCoords = self._snakeCoords # For godmode\n \n self._updatePosition()\n \n try:\n if self._hitEdge():\n raise GameLostException(\"Hit Edge. Gameover\")\n elif self._hitBody():\n raise GameLostException(\"Hit Body. Gameover\")\n except GameLostException as e:\n if self.godmode:\n self.position = old_position\n self._snakeCoords = old_snakeCoords\n else:\n raise e\n \n self.iteration += 1\n \n if not self.survivalmode:\n self._increaseSize()\n if self._hitApple():\n self.napples += 1\n if len(self._snakeCoords) == self.dim*self.dim - 1:\n raise GameWonException(\"Game Won!\")\n self._eatenCoords.append(self.position)\n self._spawnApple()\n \n \n def _updatePosition(self):\n # Update snake head/body on board after a move\n old_position = self.position\n self.position = mapLetterToPosition(self.position,\n self.direction)\n for i in range(len(self._snakeCoords))[::-1]:\n if i == 0:\n continue\n else:\n self._snakeCoords[str(i)] = self._snakeCoords[str(i-1)]\n self._snakeCoords[str(0)] = old_position\n \n def _increaseSize(self):\n # Check for previous eaten apple positions, and\n # increase size of snake\n last_chunk = len(self._snakeCoords) - 1\n for eatenCoord in self._eatenCoords:\n if self._snakeCoords[str(last_chunk)] == eatenCoord:\n self._snakeCoords[str(last_chunk+1)] = eatenCoord\n self._eatenCoords.remove(eatenCoord)\n\n \n def _hitEdge(self):\n # Return True if snake head hit edge\n if self.position[0] in [0,self.dim+1] or\\\n self.position[1] in [0,self.dim+1]:\n return True\n return False\n \n def _hitBody(self):\n # Return True if snake head hit body\n for coord in self._snakeCoords.itervalues():\n if coord == self.position:\n return True\n return False\n \n \n def _hitApple(self):\n # Return True if snake ate apple\n if self.position == self.applePosition:\n return True\n return False\n \n def _spawnApple(self):\n # Spawn apple somewhere on board, except on snake's head/body\n flag = 0\n while not flag:\n tmp = np.random.randint(self.dim) + 1,\\\n np.random.randint(self.dim) + 1\n if tmp in self._snakeCoords.values() or tmp == self.position:\n pass\n else:\n flag = 1\n \n self.applePosition = tmp\n\n"
},
{
"alpha_fraction": 0.5353448390960693,
"alphanum_fraction": 0.5517241358757019,
"avg_line_length": 23.29787254333496,
"blob_id": "537497e856ea5e769c22591faeca78c66c670d46",
"content_id": "10ef35cf34c136c8ad955a7db5bf10923fcd27bc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 47,
"path": "/play.py",
"repo_name": "wfarah/snake",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 22:19:23 2018\n\n@author: wfarah\n\"\"\"\n\nimport numpy as np\nfrom snake.game import Snake,GameLostException,GameWonException\nfrom snake.gui import gameGUI\nfrom snake.game.utils import numToLetter\n\ndef play():\n snake = Snake(dimension=10,start_size=5,survivalmode=True)\n gui = gameGUI()\n \n board = snake.returnBoard(trimmed = True)\n\n gui.updateFrame(board)\n gameover = 0\n\n while not gameover: \n #inp = raw_input(\"How to move? (u,d,l,r):\")\n r = np.random.randint(4)\n inp = numToLetter(r)\n #time.sleep(1)\n# if inp not in [\"u\",\"d\",\"l\",\"r\"]:\n# print \"Error, input u,d,l or r only\"\n# continue\n try:\n snake.move(inp)\n except GameLostException as e:\n raise e\n except GameWonException as e:\n raise e\n \n board = snake.returnBoard(trimmed = True)\n gui.updateFrame(board)\n\nif __name__ == \"__main__\":\n while True:\n try:\n play()\n except GameLostException as e:\n print e\n pass\n \n \n"
},
{
"alpha_fraction": 0.6413043737411499,
"alphanum_fraction": 0.7119565010070801,
"avg_line_length": 19.55555534362793,
"blob_id": "cce4d32d8b80f1e1c6f51ebe25279f520d62a88e",
"content_id": "b829f9bc885c8dd4fbe56b9d206e0660518eb907",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 9,
"path": "/snake/game/__init__.py",
"repo_name": "wfarah/snake",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 21:16:11 2018\n\n@author: wfarah\n\"\"\"\nimport numpy as np\nfrom game import Snake,GameLostException,GameWonException"
},
{
"alpha_fraction": 0.5157342553138733,
"alphanum_fraction": 0.5428321957588196,
"avg_line_length": 25.929410934448242,
"blob_id": "657a671d81aa04f2bf324608e6f206d023b80815",
"content_id": "6ee8a961c49c5a95a822191d3aa00cb19004771e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2288,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 85,
"path": "/snake/game/utils.py",
"repo_name": "wfarah/snake",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 4 12:46:49 2018\n\n@author: wfarah\n\"\"\"\n\nimport numpy as np\n\ndef oneHotVector(letter):\n if not isinstance(letter,str):\n raise TypeError(\"Letter should be string, %s (%s) given\"\n %(letter, type(letter)))\n if letter == \"d\":\n return np.array([0,0,1,0])\n elif letter == \"u\":\n return np.array([1,0,0,0])\n elif letter == \"l\":\n return np.array([0,0,0,1])\n elif letter == \"r\":\n return np.array([0,1,0,0])\n else:\n raise ValueError(\"Letter %s couldn't be encoded to vector\" %letter)\n \ndef vectorToLetter(vector):\n# if not isinstance(vector,(np.ndarray,list)):\n# raise TypeError(\"Vector should be np.ndarray (or eq), %s (%s) given\"\n# %(vector, type(vector)))\n if np.array_equal(vector,[1,0,0,0]):\n return \"u\"\n elif np.array_equal(vector,[0,1,0,0]):\n return \"r\"\n elif np.array_equal(vector,[0,0,1,0]):\n return \"d\"\n elif np.array_equal(vector,[0,0,0,1]):\n return \"l\"\n else:\n raise ValueError(\"Vector %s couldn't be decoded to letter\" %vector)\n\ndef numToLetter(number):\n if number == 0:\n return \"u\"\n elif number == 1:\n return \"r\"\n elif number == 2:\n return \"d\"\n elif number == 3:\n return \"l\"\n else:\n raise ValueError(\"Number %s couldn't be translated to letter\" %number)\n\ndef numToVector(number):\n if number == 0:\n return oneHotVector(\"u\")\n elif number == 1:\n return oneHotVector(\"r\")\n elif number == 2:\n return oneHotVector(\"d\")\n elif number == 3:\n return oneHotVector(\"l\")\n else:\n raise ValueError(\"Number %s couldn't be translated to vector\" %number)\n \ndef mapLetterToPosition(old,letter,inc=1):\n new = None\n if letter == \"u\":\n new = old[0] - inc, old[1]\n elif letter == \"d\":\n new = old[0] + inc, old[1]\n elif letter == \"l\":\n new = old[0], old[1] - inc\n elif letter == \"r\":\n new = old[0], old[1] + inc\n return new\n \ndef getOppositeDir(letter):\n if letter == \"d\":\n return \"u\"\n elif letter == \"u\":\n return \"d\"\n elif letter == \"l\":\n return \"r\"\n elif letter == \"r\":\n return \"l\""
},
{
"alpha_fraction": 0.6036036014556885,
"alphanum_fraction": 0.6325272917747498,
"avg_line_length": 23.85714340209961,
"blob_id": "b341eec5ff860f4599815db7f9e01de6dd028c19",
"content_id": "5ac9a8110925f0862e04c9fc844378700eab84d1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2109,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 84,
"path": "/trainer.py",
"repo_name": "wfarah/snake",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 4 13:16:46 2018\n\n@author: wfarah\n\"\"\"\n\nimport time\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\nfrom snake.game import Snake,GameLostException,GameWonException\nfrom snake.gui import gameGUI\nfrom snake.game.utils import numToLetter,oneHotVector\n\n\nGUI_SLEEP_TIME = 0.1\n\n\nboard_dim = 50\n\ninput_num_units = board_dim * board_dim\nhidden_num_units = 100\nnum_actions = 4\n\nactions = tf.placeholder(tf.int32, [None])\nrewards = tf.placeholder(tf.float32, [None])\nobs = tf.placeholder(tf.float32, [None, input_num_units])\n\n\nepochs = 5\nbatch_size = 128\nlearning_rate = 0.01\ndecay = 0.99\n\nseed = 128\n\nY = tf.layers.dense(obs,hidden_num_units, activation=tf.nn.relu)\nYlogits = tf.layers.dense(Y, num_actions)\n\nsample_op = tf.multinomial(logits=tf.reshape(Ylogits, shape=(1, num_actions)), \n num_samples=1)\n\n\n\ncross_entropies = tf.losses.softmax_cross_entropy(\n onehot_labels=tf.one_hot(actions,num_actions),\n logits=Ylogits)\n\noptimiser = tf.train.RMSPropOptimizer(learning_rate, decay)\n\nloss = tf.reduce_sum(rewards * cross_entropies)\n\nsnake = Snake(dimension=board_dim,start_size=10,survivalmode=True)\nboard = snake.returnBoard(trimmed=True).astype(\"float32\")\n\ngui = gameGUI()\ngui.updateFrame(board)\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(100):\n _obs = []\n _rewards = []\n _actions = []\n gameover = 0\n niter = 0\n snake.resetSnake()\n while not gameover or niter > 600:\n out = sess.run(sample_op, feed_dict = {obs: [board.flatten()]})[0][0]\n letter = numToLetter(out)\n vector = oneHotVector(letter)\n _actions.append(vector)\n try:\n snake.move(letter)\n except GameLostException:\n gameover = 1\n continue\n board = snake.returnBoard(trimmed = True).astype(\"float32\")\n gui.updateFrame(board,GUI_SLEEP_TIME)\n niter += 1\n \n "
},
{
"alpha_fraction": 0.574999988079071,
"alphanum_fraction": 0.65625,
"avg_line_length": 15.100000381469727,
"blob_id": "b2c05ee1d051a5997506ade50b1d0ef3a6d0ff4e",
"content_id": "c6b3e28ce930ffa840673341eff6163683285b6a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 10,
"path": "/snake/__init__.py",
"repo_name": "wfarah/snake",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 20:42:16 2018\n\n@author: wfarah\n\"\"\"\n\nfrom game.game import *\nfrom gui.gui import gameGUI"
},
{
"alpha_fraction": 0.5253863334655762,
"alphanum_fraction": 0.5739514231681824,
"avg_line_length": 21.600000381469727,
"blob_id": "aa7c842f7b05d4d571cfce4d36eb731404375dd2",
"content_id": "156316bab0c8072fd4624ab486b3b72c4b2ac67e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 453,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 20,
"path": "/snake/gui/gui.py",
"repo_name": "wfarah/snake",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 4 16:13:37 2018\n\n@author: wfarah\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nclass gameGUI(object):\n def __init__(self):\n self.fig = plt.figure(1,figsize=(10,14))\n plt.ion()\n plt.show()\n def updateFrame(self,board,pause=0.001):\n self.fig.clf()\n plt.imshow(board,interpolation='nearest',\n aspect='auto')\n plt.pause(pause)\n\n"
}
] | 7 |
pwkraft/nyt
|
https://github.com/pwkraft/nyt
|
ea2dc98b324468d936551478a9f6e4a665bbc753
|
310ced21640ac3b22ccf289db9d6e59a6f2fe091
|
bfc1842890619063cfad09b0a0a28d326ae33de6
|
refs/heads/master
| 2020-05-29T11:04:53.342918 | 2019-07-08T12:51:38 | 2019-07-08T12:51:38 | 39,524,123 | 2 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.540330171585083,
"alphanum_fraction": 0.5509433746337891,
"avg_line_length": 41.79798126220703,
"blob_id": "86201669bc14ee1b052c0529076601b4eaeea84a",
"content_id": "dcb6d120f8f799846dbfe53da7df16f3f209641b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4240,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 99,
"path": "/calc/scraping/get_texts.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "rm(list=ls())\nsetwd(\"/data/Uni/projects/2015/nyt/calc/scraping\")\nlibrary(rvest)\nlibrary(stringr)\nlibrary(magrittr)\nlibrary(xlsx)\n\n## reading in the xlsx file -> not run every time since it takes too long\nreadxlsx <- F\nif(readxlsx == T){\n src_shared <- read.xlsx(\"../in/NYTimes Shared Digital Front Page 0218-0428_check.xlsx\"\n , sheetName = \"Shared\")\n src_front <- read.xlsx(\"../in/NYTimes Shared Digital Front Page 0218-0428_check.xlsx\"\n , sheetName = \"FrontPage\")\n src_digital <- read.xlsx(\"../in/NYTimes Shared Digital Front Page 0218-0428_check.xlsx\"\n , sheetName = \"DigitalEdition\")\n save.image(\"../in/nyt_src.Rdata\")\n} else load(\"../in/nyt_src.Rdata\")\n\n## function to parse the article from nyt, maybe I could switch from a loop to apply/plyr\nget_text <- function(urlvec, printurl = F){\n urlvec <- as.character(urlvec)\n out <- NULL\n if(printurl == F) pb <- txtProgressBar(min = 0, max = length(urlvec), style = 3)\n for(i in 1:length(urlvec)){\n # print url in each iteration instead of progress bar\n if(printurl == T) print(paste0(i,\": \", urlvec[i]))\n if(!is.na(urlvec[i])){\n # parse html page\n page <- try(read_html(urlvec[i]), silent = T)\n if(class(page)[1] == \"try-error\"){\n warning(paste0(\"Error in url: \",urlvec[i]))\n out <- rbind(out, c(urlvec[i], NA, NA, NA, NA, NA))\n } else {\n ## get title information\n title <- page %>% html_nodes(\"title\") %>% html_text()\n title <- sub(\" - The New York Times\", \"\", title[1])\n title <- sub(\" - NYTimes.com\", \"\", title)\n\n ## get full text of article\n text <- page %>% html_nodes(\"p\") %>% html_text()\n text <- text[text != \"Advertisement\"]\n text <- paste(text, collapse = \" \")\n\n ## get keywords etc\n meta <- page %>% html_nodes(\"meta\")\n news_keywords <- meta[(xml_attr(meta, attr=\"name\") %in% c(\"news_keywords\",\"subj\"))]\n if(length(news_keywords) > 0){\n news_keywords <- xml_attr(news_keywords, attr=\"content\")[1]\n } else news_keywords <- NA\n keywords <- meta[(xml_attr(meta, attr=\"name\") %in% c(\"keywords\",\"subj\"))]\n if(length(keywords) > 0){\n keywords <- xml_attr(keywords, attr=\"content\")[1]\n } else keywords <- NA\n articleid <- meta[(xml_attr(meta, attr=\"name\") %in% \"articleid\")]\n if(length(articleid) > 0){\n articleid <- xml_attr(articleid, attr=\"content\")[1]\n } else articleid <- NA\n\n ## combine outout\n out <- rbind(out, c(urlvec[i], title, keywords, news_keywords, articleid, text))\n }\n } else out <- rbind(out, c(urlvec[i], NA, NA, NA, NA, NA))\n if(printurl == F) setTxtProgressBar(pb, i)\n }\n if(printurl == F) close(pb)\n out <- data.frame(out, stringsAsFactors = FALSE)\n colnames(out) <- c(\"link\", \"title\", \"keywords\", \"news_keywords\", \"articleid\" ,\"text\")\n return(out)\n}\n\n\n### download articles\n\n## combine urls in single string, delete dublicates\nurls <- unique(na.omit(c(as.character(src_shared$Most.Viewed.URL)\n , as.character(src_shared$Most.Facebook.URL)\n , as.character(src_shared$Most.Emailed.URL)\n , as.character(src_shared$Most.Tweeted.URL)\n , as.character(src_front$url)\n , as.character(src_digital$url))))\n\n## scrape text and meta info\nnyt_articles <- get_text(urls)\n\n## repeat scraping for missing texts\ntmp <- length(urls) - sum(is.na(nyt_articles$text))\niteration <- 1\nwhile(tmp > 0){\n urls_tmp <- urls[is.na(nyt_articles$text)]\n nyt_tmp <- get_text(urls_tmp)\n nyt_articles[is.na(nyt_articles$text), ] <- nyt_tmp\n tmp <- length(urls_tmp) - sum(is.na(nyt_articles$text))\n print(paste0(\"Iteration \",iteration,\", improvements: \",tmp))\n iteration <- iteration + 1\n}\n\n## save dataset\nsave(nyt_articles, file=\"../in/nyt_articles.Rdata\")\n\n\n\n"
},
{
"alpha_fraction": 0.5655737519264221,
"alphanum_fraction": 0.6107799410820007,
"avg_line_length": 43.14285659790039,
"blob_id": "f49b42cd0573a6c7f909ec69b1f446d98598b77c",
"content_id": "8a34721be636d7447730ca472cd24680ba163b75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4026,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 91,
"path": "/calc/prelim/stm_prelim.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "#################################################\n# Preliminary analyses of nytimes articles (stm)\n#################################################\n\n\nrm(list=ls())\nsetwd(\"/data/Uni/projects/2015/nyt/calc/scraping\")\nload(\"../in/nyt_combined.Rdata\")\nlibrary(stm)\n\n\n### reduce dataset to single observations for each article\n\nnyt_combined <- nyt_combined[order(nyt_combined$id),]\nnyt_combined$select <- c(1,diff(nyt_combined$id))\nnyt_reduced <- nyt_combined[nyt_combined$select == 1\n , !is.element(colnames(nyt_combined), c(\"uniqueid\",\"type\",\"select\"))]\n\n\n### add metadata to reduced dataset\n\nmeta <- data.frame(model.matrix(~ type, nyt_combined)[,-1])\ncolnames(meta) <- gsub(\"type\",\"\",colnames(meta))\nmeta$digital <- as.numeric(apply(meta,1,sum) == 0)\nnyt_combined <- cbind(nyt_combined, meta)\nmeta <- aggregate(cbind(emailed,facebook,front,tweeted,viewed,digital) ~ id\n , data = nyt_combined, function(x) as.numeric(sum(x) > 0))\nnyt_reduced <- merge(nyt_reduced, meta)\nrm(meta)\n\n\n### stm analyses of unique articles\n\nprocessed <- textProcessor(nyt_reduced$text, metadata = nyt_reduced[c(\"id\"\n ,\"emailed\",\"facebook\",\"front\",\"tweeted\",\"viewed\",\"digital\")])\nout <- prepDocuments(processed$documents, processed$vocab, processed$meta, lower.thresh = 15)\nlength(out$vocab) # vocabulary is almost too long for \"Spectral\" analyses\ntest <- stm(out$documents, out$vocab, K = 20\n , prevalence =~ emailed + facebook + front + tweeted + viewed + digital\n , max.em.its = 75, data = out$meta, init.type = \"Spectral\")\nsave.image(\"../in/test.Rdata\")\n#load(\"../in/test.Rdata\")\n\n\n### summarize results\n\n# explore words associated with each topic\nlabelTopics(test)\nplot.STM(test, type = \"summary\", xlim = c(0, .3))\npar(mfrow = c(4,5))\ncloud(test, topic = 1, scale = c(2,.25))\ncloud(test, topic = 2, scale = c(2,.25))\ncloud(test, topic = 3, scale = c(2,.25))\ncloud(test, topic = 4, scale = c(2,.25))\ncloud(test, topic = 5, scale = c(2,.25))\ncloud(test, topic = 6, scale = c(2,.25))\ncloud(test, topic = 7, scale = c(2,.25))\ncloud(test, topic = 8, scale = c(2,.25))\ncloud(test, topic = 9, scale = c(2,.25))\ncloud(test, topic = 10, scale = c(2,.25))\ncloud(test, topic = 11, scale = c(2,.25))\ncloud(test, topic = 12, scale = c(2,.25))\ncloud(test, topic = 13, scale = c(2,.25))\ncloud(test, topic = 14, scale = c(2,.25))\ncloud(test, topic = 15, scale = c(2,.25))\ncloud(test, topic = 16, scale = c(2,.25))\ncloud(test, topic = 17, scale = c(2,.25))\ncloud(test, topic = 18, scale = c(2,.25))\ncloud(test, topic = 19, scale = c(2,.25))\ncloud(test, topic = 20, scale = c(2,.25))\npar(mfrow = c(1,1))\n\n# topic correlations\nplot.topicCorr(topicCorr(test))\n\n\n# estimate effects\nprep <- estimateEffect(1:20 ~ emailed + facebook + front + tweeted + viewed + digital\n , test, meta = out$meta, uncertainty = \"Global\")\nplot.estimateEffect(prep, covariate = \"emailed\", topics = 1:20, model = test, xlim = c(-0.1,0.1)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0, main = \"emailed\")\nplot.estimateEffect(prep, covariate = \"facebook\", topics = 1:20, model = test, xlim = c(-0.1,0.1)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0, main = \"facebook\")\nplot.estimateEffect(prep, covariate = \"front\", topics = 1:20, model = test, xlim = c(-0.1,0.1)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0, main = \"front\")\nplot.estimateEffect(prep, covariate = \"tweeted\", topics = 1:20, model = test, xlim = c(-0.1,0.1)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0, main = \"tweeted\")\nplot.estimateEffect(prep, covariate = \"viewed\", topics = 1:20, model = test, xlim = c(-0.1,0.1)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0, main = \"viewed\")\nplot.estimateEffect(prep, covariate = \"digital\", topics = 1:20, model = test, xlim = c(-0.1,0.1)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0, main = \"digital\")\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5891945362091064,
"alphanum_fraction": 0.6050765514373779,
"avg_line_length": 44.477420806884766,
"blob_id": "49663f2839a731ca777f7c031e37a763f091148a",
"content_id": "45230065320d3ad460a7ab67fae6767762862a9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 7052,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 155,
"path": "/calc/prelim/stm_topics_prelim.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "#################################################\n# Preliminary analyses of nytimes articles (stm)\n#################################################\n## Part I: Topic models, selection of articles, measure complexity\n\nrm(list=ls())\nsetwd(\"/data/Uni/projects/2015/nyt/calc\")\nload(\"in/nyt_articles.Rdata\")\nload(\"in/nyt_src.Rdata\")\nlibrary(stm)\nlibrary(dplyr)\nlibrary(car)\nlibrary(quanteda)\n\n\n### function taken from\n# improved list of objects\n.ls.objects <- function (pos = 1, pattern, order.by,\n decreasing=FALSE, head=FALSE, n=5) {\n napply <- function(names, fn) sapply(names, function(x)\n fn(get(x, pos = pos)))\n names <- ls(pos = pos, pattern = pattern)\n obj.class <- napply(names, function(x) as.character(class(x))[1])\n obj.mode <- napply(names, mode)\n obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)\n obj.prettysize <- napply(names, function(x) {\n capture.output(format(utils::object.size(x), units = \"auto\")) })\n obj.size <- napply(names, object.size)\n obj.dim <- t(napply(names, function(x)\n as.numeric(dim(x))[1:2]))\n vec <- is.na(obj.dim)[, 1] & (obj.type != \"function\")\n obj.dim[vec, 1] <- napply(names, length)[vec]\n out <- data.frame(obj.type, obj.size, obj.prettysize, obj.dim)\n names(out) <- c(\"Type\", \"Size\", \"PrettySize\", \"Rows\", \"Columns\")\n if (!missing(order.by))\n out <- out[order(out[[order.by]], decreasing=decreasing), ]\n if (head)\n out <- head(out, n)\n out\n}\n\n# shorthand\nlsos <- function(..., n=10) {\n .ls.objects(..., order.by=\"Size\", decreasing=TRUE, head=TRUE, n=n)\n}\n\nlsos()\n\n\n### merge original link lists with scraped articles\n\nnyt_viewed <- data.frame(link = src_shared$Most.Viewed.URL, date = src_shared$Date\n , time = src_shared$Time) %>% left_join(nyt_articles) %>%\n mutate(uniqueid = 200001:(200000 + nrow(src_shared)), type = \"viewed\")\nnyt_facebook <- data.frame(link = src_shared$Most.Facebook.URL, date = src_shared$Date\n , time = src_shared$Time) %>% left_join(nyt_articles) %>%\n mutate(uniqueid = 300001:(300000 + nrow(src_shared)), type = \"facebook\")\nnyt_emailed <- data.frame(link = src_shared$Most.Emailed.URL, date = src_shared$Date\n , time = src_shared$Time) %>% left_join(nyt_articles) %>%\n mutate(uniqueid = 400001:(400000 + nrow(src_shared)), type = \"emailed\")\nnyt_tweeted <- data.frame(link = src_shared$Most.Tweeted.URL, date = src_shared$Date\n , time = src_shared$Time) %>% left_join(nyt_articles) %>%\n mutate(uniqueid = 500001:(500000 + nrow(src_shared)), type = \"tweeted\")\nnyt_front <- data.frame(link = src_front$url, date = src_front$Date\n , time = src_front$Time, author = src_front$Author\n , srctitle = src_front$Title) %>% left_join(nyt_articles) %>%\n mutate(uniqueid = 600001:(600000 + nrow(src_front)), type = \"front\")\nnyt_digital <- data.frame(link = src_digital$url, date = src_digital$Date\n , time = src_digital$Time, author = src_digital$Author\n , srctitle = src_digital$Title, section = src_digital$Section\n , subsection = src_digital$Subsection) %>% left_join(nyt_articles) %>%\n mutate(uniqueid = 700001:(700000 + nrow(src_digital)))\nnyt_digital$type <- recode(as.numeric(nyt_digital$section)\n , \"1 = 'digital_opinion'; 2 = 'digital_bottom'; 3:5 = 'digital_topnews'\")\n\n\n### combine merged articles in single dataframe\n\nnyt_combined <- bind_rows(nyt_viewed, nyt_facebook, nyt_emailed\n , nyt_tweeted, nyt_front, nyt_digital)\nsave(nyt_combined, file = \"in/nyt_combined.Rdata\")\nrm(nyt_viewed, nyt_facebook, nyt_emailed, nyt_tweeted, nyt_front, nyt_digital, nyt_articles\n , src_shared, src_front, src_digital)\ngc()\n\n\n### reduce dataset to single observations for each article, add meta data\n\nnyt_reduced <- nyt_combined %>% filter(!duplicated(nyt_combined$title) & text != \"\") %>%\n select(link, author, title, keywords, news_keywords, text) \nmeta <- data.frame(model.matrix(~ type, nyt_combined)[,-1])\ncolnames(meta) <- gsub(\"type\",\"\",colnames(meta))\nmeta$digital_bottom <- as.numeric(apply(meta,1,sum) == 0)\nmeta$title <- nyt_combined$title\nmeta <- meta %>% group_by(title) %>% summarize_each(funs(max))\nnyt_reduced <- nyt_reduced %>% left_join(meta)\nsave(nyt_reduced, file = \"in/nyt_reduced.Rdata\")\nrm(nyt_combined)\ngc()\n\n\n### calculate readability\n\nnyt_readab <- readability(nyt_reduced$text)\nsave(nyt_readab, file = \"in/nyt_readab.Rdata\")\nrm(nyt_readab)\ngc()\n\n\n### stm analyses to select politics/econ articles\n\nprocessed_select <- textProcessor(nyt_reduced$text\n , metadata = nyt_reduced[c(\"title\",\"emailed\",\"facebook\",\"front\",\"tweeted\"\n ,\"viewed\",\"digital_opinion\",\"digital_topnews\"\n ,\"digital_bottom\")])\nout_select <- prepDocuments(processed_select$documents, processed_select$vocab\n , processed_select$meta, lower.thresh = 10)\nlength(out_select$vocab) # vocabulary < 10000 for \"Spectral\" analyses\nstm_select <- stm(out_select$documents, out_select$vocab, K = 5\n , prevalence =~ emailed + facebook + front + tweeted + viewed +\n digital_opinion + digital_topnews + digital_bottom\n , max.em.its = 75, data = out_select$meta, init.type = \"Spectral\")\n\n## explore words associated with each topic\nlabelTopics(stm_select)\ntopic_polecon <- apply(stm_select$theta, 1, function(x) which(x == max(x)))\ntopic_polecon <- topic_polecon == 1 | topic_polecon == 3 | topic_polecon == 5\n\n## reduce dataset to politics/econ topic\nnyt_polecon <- nyt_reduced[topic_polecon,]\nsave(nyt_polecon, file = \"in/nyt_polecon.Rdata\")\n\n## save and delete stm for selection\nsave(processed_select, out_select, stm_select, file = \"in/stm_select.Rdata\")\nrm(processed_select, out_select, stm_select)\ngc()\n\n\n### stm analysis on remaining articles\n\nprocessed_polecon <- textProcessor(nyt_polecon$text\n , metadata = nyt_polecon[c(\"title\",\"emailed\",\"facebook\",\"front\",\"tweeted\"\n ,\"viewed\",\"digital_opinion\",\"digital_topnews\"\n ,\"digital_bottom\")])\nout_polecon <- prepDocuments(processed_polecon$documents, processed_polecon$vocab\n , processed_polecon$meta, lower.thresh = 10)\nlength(out_polecon$vocab)\nstm_polecon <- stm(out_polecon$documents, out_polecon$vocab, K = 10\n , prevalence =~ emailed + facebook + front + tweeted + viewed +\n digital_opinion + digital_topnews + digital_bottom\n , max.em.its = 75, data = out_polecon$meta, init.type = \"Spectral\")\n\n\n### save results\nsave(processed_polecon, out_polecon, stm_polecon, file = \"in/stm_polecon.Rdata\")\n\n\n\n"
},
{
"alpha_fraction": 0.5829938650131226,
"alphanum_fraction": 0.5987780094146729,
"avg_line_length": 38.2599983215332,
"blob_id": "de235070315620407aa621109067ac4436ec9169",
"content_id": "b0133ae941dc3c04b61f01d350208a6560ffe50d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1964,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 50,
"path": "/calc/nyt_readability.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "#################################################\n# Readability of all articles (including non-political)\n#################################################\n## NOTE: this is just a quick check based on my old code, could be much more efficient...\n\nlibrary(tidyverse)\nlibrary(quanteda)\n\n## load data\nload(\"in/nyt_reduced.Rdata\")\nload(\"in/nyt_readab.Rdata\")\n\n## separate metadata\nnyt_part <- c(\"Front Page\",\"Opinion (Digital Edition)\"\n ,\"Top News (Digital Edition)\",\"Bottom Part (Digital Edition)\")\nnyt_share <- c(\"Most Viewed\",\"Shared on Facebook\",\"Most Emailed\",\"Tweeted\")\nnyt_part_var <- c(\"front\",\"digital_opinion\",\"digital_topnews\",\"digital_bottom\")\nnyt_share_var <- c(\"viewed\",\"facebook\",\"emailed\",\"tweeted\")\n\nci <- function(x){\n mu <- mean(x, na.rm = T)\n se <- sd(x, na.rm = T)/sqrt(length(na.omit(x)))\n ci_lo <- mu - 1.96 * se\n ci_hi <- mu + 1.96 * se\n out <- c(mu, ci_lo, ci_hi)\n return(out)\n}\n\nreadab <- nyt_reduced %>% \n mutate(readab = nyt_readab) %>%\n data.frame()\n\nreadab_summary <- data.frame(NULL)\nfor(i in c(nyt_part_var,nyt_share_var)){\n tmp <- data.frame(t(ci(readab[which(readab[i] == 1), \"readab\"])))\n tmp$variable = i\n readab_summary <- rbind(readab_summary, tmp)\n}\ncolnames(readab_summary)[1:3] <- c(\"mean\",\"cilo\",\"cihi\")\nreadab_summary$variable <- factor(readab_summary$variable\n , levels = c(nyt_share_var[c(1,3,2,4)],nyt_part_var[c(1,3,2,4)])\n , labels = c(nyt_share[c(1,3,2,4)],nyt_part[c(1,3,2,4)]))\nreadab_summary$group <- rep(c(\"Newspaper section\",\"Shared/Viewed\"),each=4)\n\np <- ggplot(readab_summary, aes(y = mean, ymin = cilo, ymax = cihi, x = variable)) + \n theme_classic() + geom_pointrange() + facet_wrap(~group, scales=\"free_x\") + \n ylab(\"Flesch-Kincaid Grade Level\") + xlab(NULL) + \n theme(axis.text.x = element_text(angle = 40, hjust = 1))\np + theme(panel.border = element_rect(fill=NA))\nggsave(\"fig/readability.png\",height=5,width=9)\n\n"
},
{
"alpha_fraction": 0.6480447053909302,
"alphanum_fraction": 0.6824953556060791,
"avg_line_length": 40.30769348144531,
"blob_id": "27759249f3f23ceba893eccbc3f9c5bbc3c0d06a",
"content_id": "6754ff3e42ae634fd805f1a4dd5e0ba2eb92776e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 26,
"path": "/calc/nyt_narticles.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "library(tidyverse)\n\n## load data\nload(\"out/nyt_top10.Rdata\")\nload(\"in/nyt_reduced.Rdata\")\n\n## unique articles in top10 data\nlength(unique(unlist(list(nyt_top10[,grep(\"Title\", colnames(nyt_top10))]))))\nlength(unique(unlist(list(nyt_top10[,grep(\"URL\", colnames(nyt_top10))]))))\n\n## compare duplicate titles and urls\ntitles_dup <- matrix(duplicated(unlist(list((nyt_top10[,grep(\"Title\", colnames(nyt_top10))])))), ncol = 4)\nurls_dup <- matrix(duplicated(unlist(list((nyt_top10[,grep(\"URL\", colnames(nyt_top10))])))), ncol = 4)\nView(nyt_top10[apply((titles_dup - urls_dup) != 0, 1, sum) > 0,])\n\n## different urls link to the same articles!\ncbind(select(nyt_top10, contains(\"Title\"))[(titles_dup - urls_dup) != 0],\n select(nyt_top10, contains(\"URL\"))[(titles_dup - urls_dup) != 0]) %>%\n View()\n\n## unique articles in nyt_reduced\nnyt_reduced %>%\n filter(emailed == 1 | facebook == 1 | tweeted == 1 | viewed == 1) %>%\n View()\n# not sure why the number of articles is lower in the redced dataset\n# probably because I removed empty articles, but I should investigate further\n"
},
{
"alpha_fraction": 0.582227349281311,
"alphanum_fraction": 0.6018465161323547,
"avg_line_length": 37.511112213134766,
"blob_id": "855468adadcf4c845550a50467811caa9e109356",
"content_id": "007cdd421305bae0ea5ca76f928ea5db89fb1985",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1733,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 45,
"path": "/calc/nyt_overlap.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "#################################################\n# Overlap between top 10 articles\n#################################################\n\nrm(list=ls())\nsetwd(\"~/Dropbox/Uni/Projects/2015/nyt/calc\")\nload(\"out/nyt_top10.Rdata\")\n\n\n## function to count percent of duplicates out of total number of unique articles\npercentDuplicates <- function(x, y){\n ## remove duplicates in each vector, save as character, combine both vectors (creating new duplicates)\n xy <- c(as.character(unique(x)), as.character(unique(y)))\n ## percent of duplicates out of total number of unique articles\n (length(xy) - length(unique(xy))) / length(unique(xy))\n}\n\n## check function\npercentDuplicates(c(\"A\",\"B\",\"C\"), c(\"D\", \"E\"))\npercentDuplicates(c(\"A\",\"B\",\"C\"), c(\"A\",\"B\",\"C\"))\npercentDuplicates(c(\"A\",\"B\",\"C\"), c(\"A\",\"B\",\"B\",\"D\"))\n\n## % overlap in unique articles (i.e., not counting how often or on what day they appear in each group)\ntitles <- c(\"Most.Emailed.Title\", \"Most.Facebook.Title\", \"Most.Tweeted.Title\", \"Most.Viewed.Title\")\noverlap_total <- matrix(NA, ncol = 4, nrow = 4, dimnames = list(titles, titles))\nfor(i in 1:4){\n for(j in i:4){\n overlap_total[i,j] <- percentDuplicates(nyt_top10[,titles[i]], nyt_top10[,titles[j]])\n }\n}\noverlap_total\n\n## average % overlap on the same day\ndates <- as.character(na.omit(unique(nyt_top10$Date)))\noverlap_day <- array(NA, dim = c(4, 4, length(dates)), \n dimnames = list(titles, titles, as.character(dates)))\nfor(t in dates){\n for(i in 1:4){\n for(j in i:4){\n overlap_day[i,j,t] <- percentDuplicates(nyt_top10[nyt_top10$Date == t, titles[i]], \n nyt_top10[nyt_top10$Date == t, titles[j]])\n }\n }\n}\napply(overlap_day, c(1,2), mean)\n"
},
{
"alpha_fraction": 0.5711340308189392,
"alphanum_fraction": 0.5826804041862488,
"avg_line_length": 34.661766052246094,
"blob_id": "9328997cd0eead8104a9d74b749591e6b3ba05fd",
"content_id": "609c747e5df4178e9c336a28a7ab6a0a99dd5678",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2425,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 68,
"path": "/calc/nyt_frontpage-sentiment.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "#################################################\n# Sentiment anlysis of front page articles\n#################################################\n\nlibrary(tidyverse)\nlibrary(quanteda)\n\n## load data\nload(\"in/nyt_reduced.Rdata\")\n\n## select front page only from reduced dataset\nfront <- nyt_reduced %>% filter(front == 1)\n\n## compare with full data to make sure nothing is missing\nload(\"in/nyt_combined.Rdata\")\ntmp <- nyt_combined %>% filter(type == \"front\")\ntable(front$title %in% tmp$title)\ntable(tmp$title %in% front$title)\ntmp[!tmp$title %in% front$title,]\nrm(tmp, nyt_combined, nyt_reduced)\n\n## pre-processing specific for lexicoder sentiment dictionary \nsource(\"LSDprep_jan2018.R\") # (code from lexicoder.com)\ntext <- front$text %>%\n LSDprep_contr() %>% # replace contractions\n LSDprep_dict_punct() %>% # remove false dict terms\n remove_punctuation_from_acronyms() %>% # optional\n remove_punctuation_from_abbreviations %>% # optional\n LSDprep_punctspace() %>% # insert spaces around punctuation\n LSDprep_negation() %>% # process negation\n LSDprep_dict() %>% # remove additional false dict terms\n mark_proper_nouns() # optional\n\n## sentiment dictionary count\nlsd <- dfm(text, \n remove_numbers = TRUE, \n remove_punct = TRUE,\n remove_symbols = TRUE,\n dictionary = data_dictionary_LSD2015) %>%\n convert(to = \"data.frame\") %>%\n mutate(total = ntoken(text,\n remove_numbers = TRUE, \n remove_punct = TRUE,\n remove_symbols = TRUE),\n tone = 100 * (positive - neg_positive - (negative - neg_negative))/total)\n\n## plots\nlsd %>% summarize(mean = mean(tone),\n se = sd(tone)/sqrt(n())) %>%\n ggplot(aes(y = mean, \n ymin = mean - 1.96*se, \n ymax = mean + 1.96*se, \n x = \"Front Page\")) +\n geom_point() + \n geom_pointrange() +\n theme_minimal() +\n scale_y_continuous(limits = c(-.8, -.1), \n breaks = seq(-.8, -.1, by = .1)) +\n labs(y = \"Mean Net Sentiment\", x = \"Platform\")\nggsave(\"fig/sent_mean.png\", height = 3, width = 4)\n \nggplot(lsd, aes(x = tone)) +\n geom_density() +\n theme_minimal() +\n geom_vline(xintercept = 0, lty = \"dashed\", color = \"darkgrey\") +\n geom_vline(xintercept = mean(lsd$tone), lty = \"dotted\") +\n labs(y = \"Density\", x = \"Negative <--> Positive\")\nggsave(\"fig/sent_density.png\", height = 3, width = 4)\n"
},
{
"alpha_fraction": 0.7035830616950989,
"alphanum_fraction": 0.7524430155754089,
"avg_line_length": 37.375,
"blob_id": "5b1a9af42b495f8d51890456d520d9c068aa8a06",
"content_id": "3cb883adb1f25bba6fc666ae558a01bca3983ef1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 8,
"path": "/calc/scraping/get_texts.py",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "from lxml import html\nimport requests\n\npage = requests.get('http://www.nytimes.com/interactive/2015/02/17/upshot/what-do-people-actually-order-at-chipotle.html?src=me&_r=1&abt=0002&abg=1')\ntree = html.fromstring(page.text)\n\ntextbody = tree.xpath('//p[@title=\"paragraph paragraph-1\"]/text()')\nprint textbody\n"
},
{
"alpha_fraction": 0.5917243957519531,
"alphanum_fraction": 0.610548734664917,
"avg_line_length": 47.52586364746094,
"blob_id": "5c29d07975c84f0039fa4724d6e4554e000c0296",
"content_id": "d670c3cbeca79d6dba13072c6749409c342ada72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 5631,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 116,
"path": "/calc/prelim/stm_analyses_prelim.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "#################################################\n# Preliminary analyses of nytimes articles (stm)\n#################################################\n\n\nrm(list=ls())\nsetwd(\"/data/Uni/projects/2015/nyt/calc\")\nlibrary(stm)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(car)\nlibrary(ggplot2)\n\n## load data\nload(\"in/nyt_combined.Rdata\")\nload(\"in/nyt_reduced.Rdata\")\nload(\"in/nyt_polecon.Rdata\")\nload(\"in/nyt_readab.Rdata\")\nload(\"in/stm_select.Rdata\")\nload(\"in/stm_polecon.Rdata\")\n\n\n### summarize selection\nlabelTopics(stm_select)\n\n### summarize results\nlabelTopics(stm_polecon)\ntopics <- c(\"Presidential Race\",\"Technology\",\"International\",\"Supreme Court/Laws\",\"Police\",\"Religion (unclear)\",\"Iran/Israel\",\"Health/Care\",\"Sport\",\"Economy\")\nplot.STM(stm_polecon, type = \"summary\", custom.labels = topics)\nplot.STM(stm_polecon, type = \"perspectives\", topics=c(1,3))\n\ntopic_polecon <- apply(stm_polecon$theta, 1, function(x) which(x == max(x)))\nView(nyt_polecon[topic_polecon == 6,])\n\n\n\n### topic proportions in each category\nprep <- estimateEffect(1:10 ~ emailed + facebook + front + tweeted + viewed +\n digital_opinion + digital_topnews + digital_bottom\n , stm_polecon, meta = out_polecon$meta, uncertainty = \"Global\")\npar(mfrow = c(2,4))\nplot.estimateEffect(prep, covariate = \"emailed\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"emailed\", labeltype = \"custom\", custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"facebook\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"facebook\", labeltype = \"custom\", custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"front\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"front\", labeltype = \"custom\", custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"tweeted\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"tweeted\", labeltype = \"custom\", custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"viewed\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"viewed\", labeltype = \"custom\", custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"digital_opinion\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"digital_opinion\", labeltype = \"custom\", custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"digital_topnews\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"digital_topnews\", labeltype = \"custom\", custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"digital_bottom\", topics = 1:10, model = stm_polecon\n , xlim = c(-.3,.3), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , main = \"digital_bottom\", labeltype = \"custom\", custom.labels = topics)\npar(mfrow = c(1,1))\n\n\n### complexity by category\nci <- function(x){\n mu <- mean(x, na.rm = T)\n se <- sd(x, na.rm = T)/sqrt(length(na.omit(x)))\n ci_lo <- mu - 1.96 * se\n ci_hi <- mu + 1.96 * se\n out <- c(mu, ci_lo, ci_hi)\n return(out)\n}\n\ntest <- nyt_reduced %>% mutate(readab = nyt_readab$ARI) %>% right_join(nyt_polecon) %>% data.frame()\nreadab_summary <- data.frame(NULL)\nfor(i in c(\"emailed\", \"facebook\", \"front\", \"tweeted\", \"viewed\", \"digital_opinion\"\n , \"digital_topnews\", \"digital_bottom\")){\n tmp <- data.frame(t(ci(test[which(test[i] == 1), \"readab\"])))\n tmp$variable = i\n readab_summary <- rbind(readab_summary, tmp)\n}\ncolnames(readab_summary)[1:3] <- c(\"mean\",\"cilo\",\"cihi\")\n\nggplot(readab_summary, aes(y = mean, ymin = cilo, ymax = cihi, x = variable)) + geom_pointrange()\n\n\n### look at topic proportions for each category over time\ntopic_select <- apply(stm_select$theta, 1, function(x) which(x == max(x)))\ntopic_select <- topic_select == 1 | topic_select == 3 | topic_select == 5\ntopic_select <- nyt_reduced$title[topic_select == T]\n\nnyt_series <- nyt_combined %>% filter(title %in% topic_select) %>% select(date, title, type) %>%\n left_join(bind_cols(select(nyt_polecon, title), data.frame(stm_polecon$theta))) %>%\n group_by(date, type) %>% select(-title) %>% summarize_each(funs(mean)) %>%\n gather(\"topic\",\"proportion\",3:12)\nnyt_series$topic <- factor(nyt_series$topic, labels = topics)\n\nggplot(nyt_series, aes(x = date, y = proportion, col = topic)) + geom_line() + facet_wrap(~type)\n\n\n### switches between categories\n\nnyt_switch <- nyt_combined %>% filter(title %in% topic_select) %>% select(date, title, type) %>%\n filter(title %in% unique(title[duplicated(title)])) %>% arrange(title, date) %>%\n group_by(title) %>% mutate(day = as.numeric(date - min(date))) %>% unique()\ntmp <- nyt_switch %>% group_by(title) %>% summarize(maxday = max(day)) %>% filter(maxday==0)\nnyt_switch <- nyt_switch %>% filter(!title %in% tmp$title) %>% count(day, type)\ntmp <- nyt_switch %>% group_by(day) %>% summarize(total = sum(n))\nnyt_switch <- nyt_switch %>% left_join(tmp) %>% mutate(prop = n/total) %>% filter(day <=5)\n\nggplot(nyt_switch, aes(x = day, y = prop, col = type)) + geom_line()\n\n\n"
},
{
"alpha_fraction": 0.6105263233184814,
"alphanum_fraction": 0.628947377204895,
"avg_line_length": 39.05263137817383,
"blob_id": "7fae3b8a125d45215ee3f410f3266edf7dc8a111",
"content_id": "1d6ba0f8f2746e922ebf87c12ed2361dd74a2b32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 760,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 19,
"path": "/calc/nyt_top10.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "#################################################\n# Extract top 10 article \n#################################################\n## Part I: Topic models, selection of articles, measure complexity\n\nrm(list=ls())\nsetwd(\"/data/Dropbox/Uni/Projects/2015/nyt/calc\")\nload(\"in/nyt_articles.Rdata\")\nload(\"in/nyt_src.Rdata\")\n\n## reduce articles to the ones that are included in nyt_top10\ntitles <- unique(c(as.matrix(src_shared[,grep(\"Title\",colnames(src_shared))])))\nurls <- unique(c(as.matrix(src_shared[,grep(\"URL\",colnames(src_shared))])))\nnyt_articles <- nyt_articles[(nyt_articles$title %in% titles | nyt_articles$link %in% urls),]\n\n## save hazard data and articles in one data frame\nnyt_top10 <- src_shared\n\nsave(nyt_articles, nyt_top10, file=\"out/nyt_top10.Rdata\")"
},
{
"alpha_fraction": 0.5602150559425354,
"alphanum_fraction": 0.5897177457809448,
"avg_line_length": 49.427120208740234,
"blob_id": "21e5342f8df3627ca0bbdb3f88d216fb9cef0ebd",
"content_id": "a455e81bdd8aa7e37eb1538a21b12320e1d979e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 14880,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 295,
"path": "/calc/stm_analyses.R",
"repo_name": "pwkraft/nyt",
"src_encoding": "UTF-8",
"text": "##################################\n## Analyses for MPSA 2016 paper ##\n##################################\n\nsetwd(\"/data/Uni/projects/2015/nyt/calc\")\nrm(list=ls())\nlibrary(stm)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(car)\nlibrary(ggplot2)\nlibrary(gridExtra)\n\n## load data\nload(\"in/nyt_combined.Rdata\")\nload(\"in/nyt_reduced.Rdata\")\nload(\"in/nyt_polecon.Rdata\")\nload(\"in/nyt_readab.Rdata\")\nload(\"in/stm_res.Rdata\")\n\n## separate metadata\nnyt_part <- c(\"Front Page\",\"Opinion (Digital Edition)\"\n ,\"Top News (Digital Edition)\",\"Bottom Part (Digital Edition)\")\nnyt_share <- c(\"Most Viewed\",\"Shared on Facebook\",\"Most Emailed\",\"Tweeted\")\nnyt_part_var <- c(\"front\",\"digital_opinion\",\"digital_topnews\",\"digital_bottom\")\nnyt_share_var <- c(\"viewed\",\"facebook\",\"emailed\",\"tweeted\")\n\n\n### basic summaries / overview\n\n## indicative words for each topic (highest probability)\npdf(\"fig/words.pdf\")\npar(mfrow = c(2,2), cex = .5, mar=c(0,0,2,0))\nplot.STM(stm_res, type = \"labels\", topics = 1:5)\nplot.STM(stm_res, type = \"labels\", topics = 11:15)\npar(mar = c(2, 0, 0, 0))\nplot.STM(stm_res, type = \"labels\", topics = 6:10)\nplot.STM(stm_res, type = \"labels\", topics = 16:20)\ndev.off()\npar(mfcol = c(1,1), cex = 1, mar = c(5, 4, 4, 2) + 0.1, mgp = c(3,1,0))\n\npdf(\"fig/words_top.pdf\")\npar(mfrow = c(2,2), cex = .5, mar=c(2,0,2,0))\nplot.STM(stm_res, type = \"labels\", topics = 1:5)\nplot.STM(stm_res, type = \"labels\", topics = 6:10)\ndev.off()\n\npdf(\"fig/words_bottom.pdf\")\npar(mfrow = c(2,2), cex = .5, mar=c(2,0,2,0))\nplot.STM(stm_res, type = \"labels\", topics = 11:15)\nplot.STM(stm_res, type = \"labels\", topics = 16:20)\ndev.off()\n\n## proportions of topics (all)\npdf(\"fig/prop.pdf\", height = 4)\npar(mar = c(5, 2, 2, 2) + 0.1)\nplot.STM(stm_res, type = \"summary\", custom.labels = topics, text.cex=.7, main = NA)\ndev.off()\n\n## proportion of topics (polecon)\npdf(\"fig/prop_polecon.pdf\", height = 4)\npar(mar = c(5, 2, 2, 2) + 0.1)\nplot.STM(stm_res, type = \"summary\", topics = topics_polecon\n , custom.labels = topics[topics_polecon], text.cex=.7, main = NA)\ndev.off()\n\n## example for topic differences\npdf(\"fig/perspective.pdf\", height = 4)\npar(mar = c(1, 1, 1, 1) + 0.1)\nplot.STM(stm_res, type = \"perspective\", topics = c(1,3), custom.labels = topics[c(1,3)])\ndev.off()\n\n\n### estimate effect of meta-information (all topics)\n\n## model estimation\nprep <- estimateEffect(1:20 ~ emailed + facebook + front + tweeted + viewed +\n digital_opinion + digital_topnews + digital_bottom\n , stm_res, meta = out$meta, uncertainty = \"Global\")\n\n## plot results\npdf(\"fig/res_nyt.pdf\", height = 5)\npar(mfrow = c(2,2), mar = c(0, 3, 3, 1), mgp = c(1,1,0), cex=.6)\nplot.estimateEffect(prep, covariate = \"front\", model = stm_res, xlim = c(-.25,.25)\n , ylab = \"Front Page\", method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , labeltype = \"custom\", custom.labels = topics, xaxt=\"n\")\nplot.estimateEffect(prep, covariate = \"digital_opinion\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Opinion (Digital Edition)\", labeltype = \"custom\"\n , custom.labels = topics, xaxt=\"n\")\npar(mar = c(3, 3, 0, 1), mgp = c(1,1,0))\nplot.estimateEffect(prep, covariate = \"digital_topnews\", model = stm_res, xlim = c(-.25,.25)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Top News (Digital Edition)\", labeltype = \"custom\"\n , custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"digital_bottom\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Bottom Part (Digital Edition)\", labeltype = \"custom\"\n , custom.labels = topics)\ndev.off()\n\npdf(\"fig/res_share.pdf\", height = 5)\npar(mfrow = c(2,2), mar = c(0, 3, 3, 1), mgp = c(1,1,0), cex=.6)\nplot.estimateEffect(prep, covariate = \"viewed\", model = stm_res, xlim = c(-.25,.25)\n , ylab = \"Most Viewed\", method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , labeltype = \"custom\", custom.labels = topics, xaxt=\"n\")\nplot.estimateEffect(prep, covariate = \"facebook\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Shared on Facebook\", labeltype = \"custom\"\n , custom.labels = topics, xaxt=\"n\")\npar(mar = c(3, 3, 0, 1), mgp = c(1,1,0))\nplot.estimateEffect(prep, covariate = \"emailed\", model = stm_res, xlim = c(-.25,.25)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Emailed\", labeltype = \"custom\"\n , custom.labels = topics)\nplot.estimateEffect(prep, covariate = \"tweeted\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Tweeted\", labeltype = \"custom\"\n , custom.labels = topics)\ndev.off()\n\n\n### estimate effect of meta-information (polecon)\n\n## model estimation\nprep <- estimateEffect(topics_polecon ~ emailed + facebook + front + tweeted + viewed +\n digital_opinion + digital_topnews + digital_bottom\n , stm_res, meta = out$meta, uncertainty = \"Global\")\n\n## plot results\npdf(\"fig/res_nyt_polecon_empty.pdf\", height = 4)\npar(mfrow = c(2,2), mar = c(0, 3, 3, 1), mgp = c(1,1,0), cex=.6)\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Front Page\", xlab = NA, yaxt=\"n\",xaxt=\"n\")\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Opinion (Digital Edition)\", xlab = NA, yaxt=\"n\",xaxt=\"n\")\npar(mar = c(3, 3, 0, 1), mgp = c(1,1,0))\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Top News (Digital Edition)\", xlab = NA, yaxt=\"n\")\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Bottom Part (Digital Edition)\", xlab = NA, yaxt=\"n\")\ndev.off()\n\npdf(\"fig/res_nyt_polecon.pdf\", height = 4)\npar(mfrow = c(2,2), mar = c(0, 3, 3, 1), mgp = c(1,1,0), cex=.6)\nplot.estimateEffect(prep, covariate = \"front\", model = stm_res, xlim = c(-.25,.25)\n , ylab = \"Front Page\", method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , labeltype = \"custom\", custom.labels = topics[topics_polecon], xaxt=\"n\")\nplot.estimateEffect(prep, covariate = \"digital_opinion\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Opinion (Digital Edition)\", labeltype = \"custom\"\n , custom.labels = topics[topics_polecon], xaxt = \"n\")\npar(mar = c(3, 3, 0, 1), mgp = c(1,1,0))\nplot.estimateEffect(prep, covariate = \"digital_topnews\", model = stm_res, xlim = c(-.25,.25)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Top News (Digital Edition)\", labeltype = \"custom\"\n , custom.labels = topics[topics_polecon])\nplot.estimateEffect(prep, covariate = \"digital_bottom\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Bottom Part (Digital Edition)\", labeltype = \"custom\"\n , custom.labels = topics[topics_polecon])\ndev.off()\n\npdf(\"fig/res_share_polecon_empty.pdf\", height = 4)\npar(mfrow = c(2,2), mar = c(0, 3, 3, 1), mgp = c(1,1,0), cex=.6)\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Most Viewed\", xlab = NA, yaxt=\"n\",xaxt=\"n\")\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Shared on Facebook\", xlab = NA, yaxt=\"n\",xaxt=\"n\")\npar(mar = c(3, 3, 0, 1), mgp = c(1,1,0))\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Emailed\", xlab = NA, yaxt=\"n\")\nplot(0,0,type = \"n\", xlim = c(-.25,.25), ylab = \"Tweeted\", xlab = NA, yaxt=\"n\")\ndev.off()\n\npdf(\"fig/res_share_polecon.pdf\", height = 4)\npar(mfrow = c(2,2), mar = c(0, 3, 3, 1), mgp = c(1,1,0), cex=.6)\nplot.estimateEffect(prep, covariate = \"viewed\", model = stm_res, xlim = c(-.25,.25)\n , ylab = \"Most Viewed\", method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , labeltype = \"custom\", custom.labels = topics[topics_polecon], xaxt=\"n\")\nplot.estimateEffect(prep, covariate = \"facebook\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Shared on Facebook\", labeltype = \"custom\"\n , custom.labels = topics[topics_polecon], xaxt=\"n\")\npar(mar = c(3, 3, 0, 1), mgp = c(1,1,0))\nplot.estimateEffect(prep, covariate = \"emailed\", model = stm_res, xlim = c(-.25,.25)\n , method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Emailed\", labeltype = \"custom\"\n , custom.labels = topics[topics_polecon])\nplot.estimateEffect(prep, covariate = \"tweeted\", model = stm_res\n , xlim = c(-.25,.25), method = \"difference\", cov.value1 = 1, cov.value2 = 0\n , ylab = \"Tweeted\", labeltype = \"custom\"\n , custom.labels = topics[topics_polecon])\ndev.off()\n\n\n### topic proportions for each category over time\n\n## prepare data\nnyt_series <- nyt_combined %>% select(date, title, type) %>%\n left_join(bind_cols(select(nyt_reduced, title), data.frame(stm_res$theta))) %>%\n na.omit() %>% group_by(date, type) %>% select(-title) %>% summarize_each(funs(mean)) %>%\n gather(\"topic\",\"proportion\",3:ncol(.))\nnyt_series$type <- factor(nyt_series$type, levels = c(nyt_part_var,nyt_share_var)\n , labels = c(nyt_part,nyt_share))\nnyt_series$topic <- as.numeric(gsub(\"X\",\"\",nyt_series$topic))\nnyt_series$topic <- factor(nyt_series$topic, labels = topics)\n\n## polecon\nggplot(filter(nyt_series, topic %in% topics[topics_polecon] & type %in% nyt_part)\n , aes(x = date, y = proportion, col = topic)) + geom_line() + facet_wrap(~type) +\n theme_classic() + theme(panel.border = element_rect(fill=NA)) + theme(legend.position = \"bottom\") + \n ylab(\"Proportion\") + xlab(\"Date\") +\n theme(axis.text.x = element_text(angle = 50, hjust = 1)) + scale_color_discrete(name = \"Topic\")\nggsave(\"fig/series_nyt.pdf\", height = 5)\n\nggplot(filter(nyt_series, topic %in% topics[topics_polecon] & type %in% nyt_share)\n , aes(x = date, y = proportion, col = topic)) + geom_line() + facet_wrap(~type) + \n theme_classic() + theme(panel.border = element_rect(fill=NA)) +\n theme(legend.position = \"bottom\") + ylab(\"Proportion\") + xlab(\"Date\") +\n theme(axis.text.x = element_text(angle = 50, hjust = 1)) + scale_color_discrete(name = \"Topic\")\nggsave(\"fig/series_share.pdf\", height = 5)\n\n## 3 main topics\np <- ggplot(filter(nyt_series, topic %in% c(\"Presidential Race\", \"Legal/Court\", \"Police\") & \n type %in% nyt_part), aes(x = date, y = proportion, col = topic, lty = topic)) + \n geom_line() + facet_wrap(~type) + theme_classic() + theme(legend.position = \"bottom\") + \n ylab(\"Proportion\") + xlab(\"Date\") + theme(axis.text.x = element_text(angle = 50, hjust = 1)) + \n scale_color_discrete(name = \"Topic\") + scale_linetype_discrete(name = \"Topic\")\np + theme(panel.border = element_rect(fill=NA))\nggsave(\"fig/series_nyt_main.pdf\", height = 5)\np + theme(panel.border = element_rect())\nggsave(\"fig/series_nyt_main_empty.pdf\", height = 5)\n\np <- ggplot(filter(nyt_series, topic %in% c(\"Presidential Race\", \"Legal/Court\", \"Police\") & \n type %in% nyt_share), aes(x = date, y = proportion, col = topic, lty = topic)) + \n geom_line() + facet_wrap(~type) + theme_classic() + theme(legend.position = \"bottom\") + \n ylab(\"Proportion\") + xlab(\"Date\") + theme(axis.text.x = element_text(angle = 50, hjust = 1)) + \n scale_color_discrete(name = \"Topic\") + scale_linetype_discrete(name = \"Topic\")\np + theme(panel.border = element_rect(fill=NA))\nggsave(\"fig/series_share_main.pdf\", height = 5)\np + theme(panel.border = element_rect())\nggsave(\"fig/series_share_main_empty.pdf\", height = 5)\n\n\n### complexity by category\nci <- function(x){\n mu <- mean(x, na.rm = T)\n se <- sd(x, na.rm = T)/sqrt(length(na.omit(x)))\n ci_lo <- mu - 1.96 * se\n ci_hi <- mu + 1.96 * se\n out <- c(mu, ci_lo, ci_hi)\n return(out)\n}\n\nreadab <- nyt_reduced %>% mutate(readab = nyt_readab) %>% filter(topic_pred %in% topics_polecon) %>% \n data.frame()\nreadab_summary <- data.frame(NULL)\nfor(i in c(nyt_part_var,nyt_share_var)){\n tmp <- data.frame(t(ci(readab[which(readab[i] == 1), \"readab\"])))\n tmp$variable = i\n readab_summary <- rbind(readab_summary, tmp)\n}\ncolnames(readab_summary)[1:3] <- c(\"mean\",\"cilo\",\"cihi\")\nreadab_summary$variable <- factor(readab_summary$variable\n , levels = c(nyt_share_var[c(1,3,2,4)],nyt_part_var[c(1,3,2,4)])\n , labels = c(nyt_share[c(1,3,2,4)],nyt_part[c(1,3,2,4)]))\nreadab_summary$group <- rep(c(\"Newspaper section\",\"Shared/Viewed\"),each=4)\n\np <- ggplot(readab_summary, aes(y = mean, ymin = cilo, ymax = cihi, x = variable)) + \n theme_classic() + geom_pointrange() + facet_wrap(~group, scales=\"free_x\") + \n ylab(\"Flesch-Kincaid Grade Level\") + xlab(NULL) + \n theme(axis.text.x = element_text(angle = 40, hjust = 1))\np + theme(panel.border = element_rect(fill=NA))\nggsave(\"fig/readability.pdf\",height=5)\np + theme(panel.border = element_rect())\nggsave(\"fig/readability_empty.pdf\",height=5)\n\n\n### switches between categories (polecon)\n\n## prepare data\nnyt_switch <- nyt_combined %>% filter(title %in% nyt_polecon$title) %>% select(date, title, type) %>%\n filter(title %in% unique(title[duplicated(title)])) %>% arrange(title, date) %>%\n group_by(title) %>% mutate(day = as.numeric(date - min(date))) %>% unique()\ntmp <- nyt_switch %>% group_by(title) %>% summarize(maxday = max(day)) %>% filter(maxday==0)\nnyt_switch <- nyt_switch %>% filter(!title %in% tmp$title) %>% count(day, type)\ntmp <- nyt_switch %>% group_by(day) %>% summarize(total = sum(n))\nnyt_switch <- nyt_switch %>% left_join(tmp) %>% mutate(prop = n/total) %>% filter(day <=5)\nnyt_switch$type <- factor(nyt_switch$type, levels = c(nyt_part_var,nyt_share_var)\n , labels = c(nyt_part,nyt_share))\nnyt_switch$group <- factor(nyt_switch$type %in% nyt_share\n , labels = c(\"Newspaper section\",\"Shared/Viewed\"))\n \n## create plot\np <- ggplot(nyt_switch, aes(x = day, y = prop, col = type, lty=type)) + geom_line() + theme_classic() + \n theme(legend.position = \"bottom\") + scale_color_discrete(name = NULL) + \n scale_linetype_discrete(name = NULL) + ylab(\"Proportion\") + xlab(\"Day\") + facet_wrap(~group)\np + theme(panel.border = element_rect(fill=NA))\nggsave(\"fig/switch.pdf\")\np + theme(panel.border = element_rect())\nggsave(\"fig/switch_empty.pdf\") \n\n\n"
}
] | 11 |
asai28/Fake-News-Detection
|
https://github.com/asai28/Fake-News-Detection
|
b0a8f79f32660543dc1f54fed3b8e7cad0d31ed5
|
b4767ec052b4b758aecdcb6a6d4404118e025688
|
a295ec1b9e612f0df49ec0173cd450bf4b29afe8
|
refs/heads/master
| 2020-05-07T11:55:19.896290 | 2019-03-30T01:26:56 | 2019-03-30T01:26:56 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5868365168571472,
"alphanum_fraction": 0.5919320583343506,
"avg_line_length": 33.130435943603516,
"blob_id": "e61b6798737ca8b66ae37494b3adbc30ef0ea3a1",
"content_id": "01102721df4f863804246e826aa5b93c4195dd1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2355,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 69,
"path": "/code/adjacency_matrix.py",
"repo_name": "asai28/Fake-News-Detection",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\n\nclass AdjacencyMatrix:\n def __init__(self, base_path):\n self.base_path = base_path\n\n def get_folder_data(self, folder):\n news_df = pd.read_csv(self.base_path + folder + \"/News.txt\", header=None)\n news_list = list(news_df[0])\n\n users_df = pd.read_csv(self.base_path + folder + \"/User.txt\", header=None)\n users_list = list(users_df[0])\n\n news_user_df = pd.read_csv(self.base_path + folder + \"/\" + folder + \"NewsUser.txt\", header=None, sep=\"\\t\")\n result_list = []\n for index, row in news_user_df.iterrows():\n news_index = row[0]\n user_index = row[1]\n result_list.append([news_list[news_index - 1], users_list[user_index - 1]])\n\n result_df = pd.DataFrame(result_list, columns=[\"News\", \"Users\"])\n\n return result_df, news_df, users_df\n\n def get_all_data(self):\n print(\"Fetching BuzzFeed data\")\n bf_res_df, bf_news_df, bf_users_df = self.get_folder_data(\"BuzzFeed\")\n\n print(\"Fetching PolitiFact data\")\n pf_res_df, pf_news_df, pf_users_df = self.get_folder_data(\"PolitiFact\")\n\n news_user_df = pd.concat([bf_res_df, pf_res_df])\n news_df = pd.concat([bf_news_df, pf_news_df])\n users_df = pd.concat([bf_users_df, pf_users_df])\n\n return news_user_df, news_df, users_df\n\n def get_adjacency_matrix(self):\n news_user_df, news_df, users_df = self.get_all_data()\n news_list = list(news_df[0].unique())\n users_list = list(users_df[0].unique())\n\n nodes = news_list + users_list\n result = np.empty((len(nodes), len(nodes)))\n\n print(\"Generating Adjacency Matrix\")\n for index, row in news_user_df.iterrows():\n news = row[0]\n user = row[1]\n result[nodes.index(news)][nodes.index(user)] = 1\n result[nodes.index(user)][nodes.index(news)] = 1\n\n result_df = pd.DataFrame(result, columns=nodes, index=nodes)\n\n # print(\"Dumping the Adjacency Matrix to CSV\")\n # result_df.to_csv(self.base_path + \"adjacency_matrix.csv\")\n\n print(\"Done\")\n\n return result_df\n\n\nif __name__ == \"__main__\":\n base_path = \"/Users/jagde/Documents/ASU/SWM/Project/Fake-News-Detection/dataset/\"\n\n adj = AdjacencyMatrix(base_path)\n res = adj.get_adjacency_matrix()\n"
},
{
"alpha_fraction": 0.5412460565567017,
"alphanum_fraction": 0.5523842573165894,
"avg_line_length": 33.62650680541992,
"blob_id": "9b86be011054b93d1c66b827414921cff04db7a9",
"content_id": "ecb22ab0a376152ca9f5d67f5eb6383478ee823b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2873,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 83,
"path": "/code/gat_adj_features.py",
"repo_name": "asai28/Fake-News-Detection",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport scipy.sparse as sp\nfrom code.adjacency_matrix import AdjacencyMatrix\nfrom code.feature_matrix import FeatureMatrix\n\nclass GATInputGenerator:\n def __init__(self):\n self.gat = \"cora\"\n self.AM = AdjacencyMatrix(base_path = \"/Users/jagde/Documents/ASU/SWM/Project/Fake-News-Detection/dataset/\")\n self.FM = FeatureMatrix(base_path = \"/Users/jagde/Documents/ASU/SWM/Project/Fake-News-Detection/dataset/\")\n self.label_zip = None\n\n def getAdj(self):\n adj_df = self.AM.get_adjacency_matrix()\n # feature_df = self.FM.get_feature_matrix()\n print(adj_df.shape)\n adj_np = adj_df.values\n print(type(adj_np))\n res = []\n for i in range(adj_np.shape[0]):\n #print(\"Adj \",i,\" Row \", adj_np[i])\n temp_res = np.nonzero(np.array(adj_np[i]))[0]\n #print(\"temp\", temp_res)\n # res = np.concatenate((res, temp_res), axis=0)\n res.append(list(temp_res))\n print(res)\n\n return sp.csr_matrix(res)\n\n def getFeatures(self):\n feature_df = self.FM.get_feature_matrix()\n # print(adj_df.shape)\n label = feature_df['label'].tolist()\n label_comp = [0 if each else 1 for each in label]\n self.label_zip = list(zip(label_comp, label))\n feature_df.drop(['label'], axis=1)\n feature_np = feature_df.values\n # print(type(feature_np))\n res = feature_np\n for i in range(feature_np.shape[0]):\n #print(\"Adj \",i,\" Row \", adj_np[i])\n temp_res = np.nonzero(np.array(feature_np[i]))[0]\n #print(\"temp\", temp_res)\n # res = np.concatenate((res, temp_res), axis=0)\n res.append(list(temp_res))\n print(res)\n\n return sp.csr_matrix(res)\n\n def getYs(self):\n yTrain = yVal = yTest = self.label_zip[:]\n train_mask, val_mask, test_mask = [False] * len(yTrain)\n n = len(yTrain)\n train_range = range(0, int(n * 0.5))\n val_range = range(int(n * 0.5), int(n * 0.75))\n test_range = range(int(n * 0.75), n)\n\n for i in train_range:\n yVal[i] = (0,0)\n yTest[i] = (0,0)\n train_mask[i] = True\n for i in val_range:\n yTrain[i] = (0,0)\n yTest[i] = (0,0)\n val_mask[i] = True\n for i in test_range:\n yVal[i] = (0,0)\n yTrain[i] = (0,0)\n test_mask[i] = True\n\n return yTrain, yVal, yTest, train_mask, val_mask, test_mask\n\n def getComps(self):\n adj = self.getAdj()\n features = self.getFeatures()\n yTrain, yVal, yTest, train_mask, val_mask, test_mask = self.getYs()\n\n return adj, features, yTrain, yVal, yTest, train_mask, val_mask, test_mask\n\nif __name__ == \"__main__\":\n obj = GATInputGenerator()\n obj.getComps()"
}
] | 2 |
Jotaro2401/shoptoons
|
https://github.com/Jotaro2401/shoptoons
|
aa3c5e19bcefa539bf5bf1f5f6d78d90767cbcda
|
02926160be30080ee91d7e03e0857c35b1f66254
|
4854f56b2298a352d824606a95d7c06070794fed
|
refs/heads/master
| 2023-09-02T16:49:46.392653 | 2021-11-13T16:58:01 | 2021-11-13T16:58:01 | 425,927,606 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.7298245429992676,
"avg_line_length": 19.35714340209961,
"blob_id": "cb9aa51b62c634c0bf7377bde320cd5b2fd93332",
"content_id": "1ffedc877706d5cff0031878f802e89a848040cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 14,
"path": "/README.md",
"repo_name": "Jotaro2401/shoptoons",
"src_encoding": "UTF-8",
"text": "# SHOPTOONS \nSIMPLE ECOMMERCE WEBSITE : SHOPTOONS (ANME & WEBSERIES MERCHANDISE SHOPPING WEBSITE)\n \n## Dependencies ##\n1. Python3\n2. Flask\n3. Sqlite\n4. Bootstrap\n5. Php\n\n## How to run ##\n1. Set up database by running database.py\n2. Run main.py\n3. Enter localhost:5000 in the browser.\n"
},
{
"alpha_fraction": 0.6235754489898682,
"alphanum_fraction": 0.6284310817718506,
"avg_line_length": 40.530250549316406,
"blob_id": "5dcc7f8e59df7e62da4351ec361fa66ff2a2f0e4",
"content_id": "633e4e3ff5049cdc8185f518e68b8bb5de4f3f39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 35011,
"license_type": "no_license",
"max_line_length": 318,
"num_lines": 843,
"path": "/main.py",
"repo_name": "Jotaro2401/shoptoons",
"src_encoding": "UTF-8",
"text": "from flask import *\nimport sqlite3, hashlib, os\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\napp.secret_key = 'random string'\nUPLOAD_FOLDER = 'static/uploads'\nALLOWED_EXTENSIONS = set(['jpeg', 'jpg', 'png', 'gif'])\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef getLoginDetails():\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n if 'email' not in session:\n loggedIn = False\n firstName = ''\n noOfItems = 0\n else:\n loggedIn = True\n cur.execute(\"SELECT userId, firstName FROM users WHERE email = '\" + session['email'] + \"'\")\n userId, firstName = cur.fetchone()\n cur.execute(\"SELECT count(productId) FROM kart WHERE userId = \" + str(userId))\n noOfItems = cur.fetchone()[0]\n conn.close()\n return (loggedIn, firstName, noOfItems)\n\ndef getsellerLoginDetails():\n with sqlite3.connect('seller.db') as conn:\n cur = conn.cursor()\n if 'email' not in session:\n loggedIn = False\n firstName = ''\n noOfItems = 0\n else:\n loggedIn = True\n cur.execute(\"SELECT userId, firstName FROM users WHERE email = '\" + session['email'] + \"'\")\n userId, firstName = cur.fetchone()\n cur.execute(\"SELECT count(productId) FROM kart WHERE userId = \" + str(userId))\n noOfItems = cur.fetchone()[0]\n conn.close()\n return (loggedIn, firstName, noOfItems)\n\[email protected](\"/test\")\ndef test():\n\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('test.html', itemData=itemData)\n\n#login \n\[email protected](\"/customer\")\ndef rootc():\n loggedIn, firstName, noOfItems = getLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products')\n itemData = cur.fetchall()\n cur.execute('SELECT categoryId, name FROM categories')\n categoryData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('hello.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)\n\n\[email protected](\"/loginForm\")\ndef loginForm():\n if 'email' in session:\n return redirect(url_for('rootc'))\n else:\n return render_template('login.html', error='')\n\n\[email protected](\"/seller\")\ndef roots():\n loggedIn, firstName, noOfItems = getsellerLoginDetails()\n with sqlite3.connect('seller.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products')\n itemData = cur.fetchall()\n cur.execute('SELECT categoryId, name FROM categories')\n categoryData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('sellerhome.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)\n\n\[email protected](\"/sellerloginForm\")\ndef sellerloginForm():\n if 'email' in session:\n return redirect(url_for('roots'))\n else:\n return render_template('sellerlogin.html', error='')\n\[email protected](\"/login\", methods = ['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n email = request.form['email']\n password = request.form['password']\n if is_valid(email, password):\n session['email'] = email\n return redirect(url_for('rootc'))\n else:\n error = 'Invalid UserId / Password'\n return render_template('login.html', error=error)\n\[email protected](\"/sellerlogin\", methods = ['POST', 'GET'])\ndef sellerlogin():\n if request.method == 'POST':\n email = request.form['email']\n password = request.form['password']\n if seller_valid(email, password):\n session['email'] = email\n return redirect(url_for('roots'))\n else:\n error = 'Invalid UserId / Password'\n return render_template('sellerlogin.html', error=error)\n\[email protected](\"/sellerregister\", methods = ['GET', 'POST'])\ndef sellerregister():\n if request.method == 'POST':\n #Parse form data \n password = request.form['password']\n email = request.form['email']\n firstName = request.form['firstName']\n lastName = request.form['lastName']\n address1 = request.form['address1']\n address2 = request.form['address2']\n zipcode = request.form['zipcode']\n city = request.form['city']\n state = request.form['state']\n country = request.form['country']\n phone = request.form['phone']\n\n with sqlite3.connect('seller.db') as con:\n try:\n cur = con.cursor()\n cur.execute('INSERT INTO users (password, email, firstName, lastName, address1, address2, zipcode, city, state, country, phone) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (hashlib.md5(password.encode()).hexdigest(), email, firstName, lastName, address1, address2, zipcode, city, state, country, phone))\n\n con.commit()\n\n msg = \"Registered Successfully\"\n except:\n con.rollback()\n msg = \"Error occured\"\n con.close()\n return render_template(\"sellerlogin.html\", error=msg)\n\[email protected](\"/sellerregisterationForm\")\ndef sellerregistrationForm():\n return render_template(\"sellerregister.html\")\n\[email protected](\"/register\", methods = ['GET', 'POST'])\ndef register():\n if request.method == 'POST':\n #Parse form data \n password = request.form['password']\n email = request.form['email']\n firstName = request.form['firstName']\n lastName = request.form['lastName']\n address1 = request.form['address1']\n address2 = request.form['address2']\n zipcode = request.form['zipcode']\n city = request.form['city']\n state = request.form['state']\n country = request.form['country']\n phone = request.form['phone']\n\n with sqlite3.connect('main.db') as con:\n try:\n cur = con.cursor()\n cur.execute('INSERT INTO users (password, email, firstName, lastName, address1, address2, zipcode, city, state, country, phone) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (hashlib.md5(password.encode()).hexdigest(), email, firstName, lastName, address1, address2, zipcode, city, state, country, phone))\n\n con.commit()\n\n msg = \"Registered Successfully\"\n except:\n con.rollback()\n msg = \"Error occured\"\n con.close()\n return render_template(\"login.html\", error=msg)\n\[email protected](\"/registerationForm\")\ndef registrationForm():\n return render_template(\"register.html\")\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\ndef parse(data):\n ans = []\n i = 0\n while i < len(data):\n curr = []\n for j in range(7):\n if i >= len(data):\n break\n curr.append(data[i])\n i += 1\n ans.append(curr)\n return ans\n\n#login\n\[email protected](\"/webseries\")\ndef root2():\n loggedIn, firstName, noOfItems = getLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2')\n itemData = cur.fetchall()\n cur.execute('SELECT categoryId, name FROM categories')\n categoryData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('webseries.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)\n\[email protected](\"/sellerpage\")\ndef seller():\n loggedIn, firstName, noOfItems = getsellerLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1')\n itemData = cur.fetchmany(size=7)\n cur.execute('SELECT categoryId, name FROM categories')\n categoryData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('spage.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)\n\n\[email protected](\"/add\")\ndef admin():\n if 'email' not in session:\n return redirect(url_for('roots'))\n loggedIn, firstName, noOfItems = getsellerLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT categoryId, name FROM categories\")\n categories = cur.fetchall()\n cur.execute(\"SELECT mainId, main FROM main\")\n main = cur.fetchall()\n conn.close()\n return render_template('add2.html', loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categories=categories , main=main)\n\[email protected](\"/addItem\", methods=[\"GET\", \"POST\"])\ndef addItem():\n if request.method == \"POST\":\n name = request.form['name']\n price = float(request.form['price'])\n description = request.form['description']\n stock = int(request.form['stock'])\n categoryId = int(request.form['category'])\n mainId = int(request.form['main'])\n\n #Uploading image procedure\n image = request.files['image']\n if image and allowed_file(image.filename):\n filename = secure_filename(image.filename)\n image.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n imagename = filename\n with sqlite3.connect('main.db') as conn:\n try:\n cur = conn.cursor()\n cur.execute('''INSERT INTO products (name, price, description, image, stock, categoryId, mainId) VALUES (?, ?, ?, ?, ?, ?, ?)''', (name, price, description, imagename, stock, categoryId, mainId))\n conn.commit()\n msg=\"added successfully\"\n except:\n msg=\"error occured\"\n conn.rollback()\n conn.close()\n print(msg)\n return redirect(url_for('roots'))\n\[email protected](\"/remove\")\ndef remove():\n if 'email' not in session:\n return redirect(url_for('roots'))\n loggedIn, firstName, noOfItems = getsellerLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products')\n data = cur.fetchall()\n conn.close()\n return render_template('remove.html', data=data, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems,)\n\[email protected](\"/removeItem\")\ndef removeItem():\n productId = request.args.get('productId')\n with sqlite3.connect('main.db') as conn:\n try:\n cur = conn.cursor()\n cur.execute('DELETE FROM products WHERE productID = ' + productId)\n conn.commit()\n msg = \"Deleted successsfully\"\n except:\n conn.rollback()\n msg = \"Error occured\"\n conn.close()\n print(msg)\n return redirect(url_for('remove'))\n\n#separation one#\n\[email protected](\"/animehoodie\")\ndef animehoodie():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 1')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/animetshirt\")\ndef animetshirt():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 2')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/animekeychain\")\ndef animetkeychain():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 3')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/animecosplay\")\ndef animecosplay():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 4')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/animemug\")\ndef animemug():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 5')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/animeposter\")\ndef animeposter():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 6')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/animebag\")\ndef animebag():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 7')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/animemask\")\ndef animemask():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 8')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\n\[email protected](\"/animesticker\")\ndef animesticker():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 1 AND categoryId = 9')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webhoodie\")\ndef webhoodie():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 1')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webtshirt\")\ndef webtshirt():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 2')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webkeychain\")\ndef webkeychain():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId =2 AND categoryId = 3')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webcosplay\")\ndef webcosplay():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 4')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webmug\")\ndef webemug():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 5')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webposter\")\ndef webposter():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 6')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webbag\")\ndef webbag():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 7')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\[email protected](\"/webmask\")\ndef webmask():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 8')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\n\[email protected](\"/websticker\")\ndef websticker():\n loggedIn, firstName, noOfItems = getLoginDetails()\n categoryId = request.args.get(\"categoryId\")\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE mainId = 2 AND categoryId = 9')\n itemData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData)\n\n#separation one#\n\[email protected](\"/account/profile\")\ndef profileHome():\n if 'email' not in session:\n return redirect(url_for('root1'))\n loggedIn, firstName, noOfItems = getLoginDetails()\n return render_template(\"profileHome.html\", loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)\n\[email protected](\"/account\")\ndef account():\n if 'email' not in session:\n return redirect(url_for('root'))\n loggedIn, firstName, noOfItems = getLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId, email, firstName, lastName, address1, address2, zipcode, city, state, country, phone FROM users WHERE email = ?\", (session['email'], ))\n profileData = cur.fetchone()\n conn.close()\n return render_template(\"accout.html\", profileData=profileData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)\n\[email protected](\"/account/profile/edit\")\ndef editProfile():\n if 'email' not in session:\n return redirect(url_for('root1'))\n loggedIn, firstName, noOfItems = getLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId, email, firstName, lastName, address1, address2, zipcode, city, state, country, phone FROM users WHERE email = '\" + session['email'] + \"'\")\n profileData = cur.fetchone()\n conn.close()\n return render_template(\"editProfile.html\", profileData=profileData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)\n\[email protected](\"/account/profile/changePassword\", methods=[\"GET\", \"POST\"])\ndef changePassword():\n if 'email' not in session:\n return redirect(url_for('loginForm'))\n if request.method == \"POST\":\n oldPassword = request.form['oldpassword']\n oldPassword = hashlib.md5(oldPassword.encode()).hexdigest()\n newPassword = request.form['newpassword']\n newPassword = hashlib.md5(newPassword.encode()).hexdigest()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId, password FROM users WHERE email = '\" + session['email'] + \"'\")\n userId, password = cur.fetchone()\n if (password == oldPassword):\n try:\n cur.execute(\"UPDATE users SET password = ? WHERE userId = ?\", (newPassword, userId))\n conn.commit()\n msg=\"Changed successfully\"\n except:\n conn.rollback()\n msg = \"Failed\"\n return render_template(\"changePassword.html\", msg=msg)\n else:\n msg = \"Wrong password\"\n conn.close()\n return render_template(\"changePassword.html\", msg=msg)\n else:\n return render_template(\"changePassword.html\")\n\[email protected](\"/updateProfile\", methods=[\"GET\", \"POST\"])\ndef updateProfile():\n if request.method == 'POST':\n email = request.form['email']\n firstName = request.form['firstName']\n lastName = request.form['lastName']\n address1 = request.form['address1']\n address2 = request.form['address2']\n zipcode = request.form['zipcode']\n city = request.form['city']\n state = request.form['state']\n country = request.form['country']\n phone = request.form['phone']\n with sqlite3.connect('main.db') as con:\n try:\n cur = con.cursor()\n cur.execute('UPDATE users SET firstName = ?, lastName = ?, address1 = ?, address2 = ?, zipcode = ?, city = ?, state = ?, country = ?, phone = ? WHERE email = ?', (firstName, lastName, address1, address2, zipcode, city, state, country, phone, email))\n\n con.commit()\n msg = \"Saved Successfully\"\n except:\n con.rollback()\n msg = \"Error occured\"\n con.close()\n return redirect(url_for('editProfile'))\n\[email protected](\"/productDescription\")\ndef productDescription():\n loggedIn, firstName, noOfItems = getLoginDetails()\n productId = request.args.get('productId')\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE productId = ' + productId)\n productData = cur.fetchone()\n conn.close()\n return render_template(\"productDescription1.html\", data=productData, loggedIn = loggedIn, firstName = firstName, noOfItems = noOfItems)\n\[email protected](\"/addToCart\")\ndef addToCart():\n if 'email' not in session:\n return redirect(url_for('loginForm'))\n else:\n productId = int(request.args.get('productId'))\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId FROM users WHERE email = '\" + session['email'] + \"'\")\n userId = cur.fetchone()[0]\n try:\n cur.execute(\"INSERT INTO kart (userId, productId) VALUES (?, ?)\", (userId, productId))\n conn.commit()\n msg = \"Added successfully\"\n except:\n conn.rollback()\n msg = \"Error occured\"\n conn.close()\n return redirect(url_for('cart1'))\n\[email protected](\"/cart\")\ndef cart():\n if 'email' not in session:\n return redirect(url_for('loginForm'))\n loggedIn, firstName, noOfItems = getLoginDetails()\n email = session['email']\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId FROM users WHERE email = '\" + email + \"'\")\n userId = cur.fetchone()[0]\n cur.execute(\"SELECT products.productId, products.name, products.price, products.image FROM products, kart WHERE products.productId = kart.productId AND kart.userId = \" + str(userId))\n products = cur.fetchall()\n totalPrice = 0\n for row in products:\n totalPrice += row[2]\n return render_template(\"cart.html\", products = products, totalPrice=totalPrice, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)\n\[email protected](\"/cart1\")\ndef cart1():\n if 'email' not in session:\n return redirect(url_for('loginForm'))\n loggedIn, firstName, noOfItems = getLoginDetails()\n email = session['email']\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId FROM users WHERE email = '\" + email + \"'\")\n userId = cur.fetchone()[0]\n cur.execute(\"SELECT products.productId, products.name, products.price, products.image FROM products, kart WHERE products.productId = kart.productId AND kart.userId = \" + str(userId))\n products = cur.fetchall()\n totalPrice = 0\n for row in products:\n totalPrice += row[2]\n return render_template(\"cart1.html\", products = products, totalPrice=totalPrice, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)\n\n\[email protected](\"/removeFromCart\")\ndef removeFromCart():\n if 'email' not in session:\n return redirect(url_for('loginForm'))\n email = session['email']\n productId = int(request.args.get('productId'))\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId FROM users WHERE email = '\" + email + \"'\")\n userId = cur.fetchone()[0]\n try:\n cur.execute(\"DELETE FROM kart WHERE userId = \" + str(userId) + \" AND productId = \" + str(productId))\n conn.commit()\n msg = \"removed successfully\"\n except:\n conn.rollback()\n msg = \"error occured\"\n conn.close()\n return redirect(url_for('cart1'))\n\[email protected](\"/logout\")\ndef logout():\n session.pop('email', None)\n return render_template(\"hello.html\")\n\ndef is_valid(email, password):\n con = sqlite3.connect('main.db')\n cur = con.cursor()\n cur.execute('SELECT email, password FROM users')\n data = cur.fetchall()\n for row in data:\n if row[0] == email and row[1] == hashlib.md5(password.encode()).hexdigest():\n return True\n return False\n\ndef seller_valid(email, password):\n con = sqlite3.connect('seller.db')\n cur = con.cursor()\n cur.execute('SELECT email, password FROM users')\n data = cur.fetchall()\n for row in data:\n if row[0] == email and row[1] == hashlib.md5(password.encode()).hexdigest():\n return True\n return False\n\n\[email protected](\"/checkout\", methods=['GET','POST'])\ndef payment():\n if 'email' not in session:\n return redirect(url_for('loginForm'))\n loggedIn, firstName, noOfItems = getLoginDetails()\n email = session['email']\n\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId FROM users WHERE email = '\" + email + \"'\")\n userId = cur.fetchone()[0]\n cur.execute(\"SELECT products.productId, products.name, products.price, products.image FROM products, kart WHERE products.productId = kart.productId AND kart.userId = \" + str(userId))\n products = cur.fetchall()\n totalPrice = 0\n for row in products:\n totalPrice += row[2]\n print(row)\n cur.execute(\"INSERT INTO Orders (userId, productId) VALUES (?, ?)\", (userId, row[0]))\n cur.execute(\"DELETE FROM kart WHERE userId = \" + str(userId))\n conn.commit()\n\n \n\n return render_template(\"checkout.html\", products = products, totalPrice=totalPrice, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)\n\[email protected](\"/\")\ndef home():\n return render_template('hello.html')\n\[email protected](\"/decider\")\ndef decider():\n return render_template('decider.html')\n\[email protected](\"/aboutus\")\ndef aboutus():\n return render_template('aboutus.html')\n\[email protected](\"/anime\")\ndef trial():\n loggedIn, firstName, noOfItems = getLoginDetails()\n with sqlite3.connect('database.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products')\n itemData = cur.fetchall()\n cur.execute('SELECT categoryId, name FROM categories')\n categoryData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('anime.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)\n\n\[email protected](\"/thanks\")\ndef thanks():\n loggedIn, firstName, noOfItems = getLoginDetails()\n with sqlite3.connect('database.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products')\n itemData = cur.fetchall()\n cur.execute('SELECT categoryId, name FROM categories')\n categoryData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('thanks.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)\n\n\[email protected](\"/trial2\")\ndef test2():\n loggedIn, firstName, noOfItems = getLoginDetails()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT productId, name, price, description, image, stock FROM products')\n itemData = cur.fetchall()\n cur.execute('SELECT categoryId, name FROM categories')\n categoryData = cur.fetchall()\n itemData = parse(itemData) \n return render_template('shop.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)\n\[email protected](\"/payout\")\ndef payout():\n if 'email' not in session:\n return redirect(url_for('loginForm'))\n loggedIn, firstName, noOfItems = getLoginDetails()\n email = session['email']\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId, email, firstName, lastName, address1, address2, zipcode, city, state, country, phone FROM users WHERE email = ?\", (session['email'], ))\n profileData = cur.fetchone()\n conn.close()\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT userId FROM users WHERE email = '\" + email + \"'\")\n userId = cur.fetchone()[0]\n cur.execute(\"SELECT products.productId, products.name, products.price, products.image FROM products, kart WHERE products.productId = kart.productId AND kart.userId = \" + str(userId))\n products = cur.fetchall()\n totalPrice = 0\n for row in products:\n totalPrice += row[2]\n return render_template(\"payout.html\", products = products, profileData=profileData, totalPrice=totalPrice, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)\n\[email protected](\"/search\", methods=['GET', 'POST'])\ndef search():\n loggedIn, firstName, noOfItems = getLoginDetails()\n if request.method == 'POST':\n name = request.form['sname']\n print(name)\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT products.productId, products.name, products.price, products.image, categories.name FROM products, categories WHERE products.categoryId = categories.categoryId AND products.name like ? ', ('%{}%'.format(name), ))\n data = cur.fetchall()\n print(data)\n conn.close()\n categoryName = data[0][3]\n print(categoryName)\n data = parse(data)\n return render_template('shopsearch.html', data=data, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems,\n categoryName=categoryName)\n else:\n return render_template('shoperror.html')\n\[email protected](\"/searchweb\", methods=['GET', 'POST'])\ndef searchweb():\n loggedIn, firstName, noOfItems = getLoginDetails()\n if request.method == 'POST':\n name = request.form['sname']\n print(name)\n with sqlite3.connect('main.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT products.productId, products.name, products.price, products.image, categories.name FROM products, categories WHERE products.categoryId = categories.categoryId AND products.name like ? ', ('%{}%'.format(name), ))\n data = cur.fetchall()\n print(data)\n conn.close()\n categoryName = data[0][3]\n print(categoryName)\n data = parse(data)\n return render_template('shopsearch.html', data=data, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems,\n categoryName=categoryName)\n else:\n return render_template('shopweb.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n"
}
] | 2 |
shansenault/Misc
|
https://github.com/shansenault/Misc
|
986299476e8f458460da6a2b6ac8e333f8435fe2
|
e524946930555d19295fd1c08c8bf88c83bee8ad
|
f2f5aa7b1c65318fc14ad499702cd4516a4da2df
|
refs/heads/master
| 2021-09-01T06:22:32.309166 | 2017-12-25T09:47:07 | 2017-12-25T09:47:07 | 77,104,673 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47931304574012756,
"alphanum_fraction": 0.48868072032928467,
"avg_line_length": 33.64864730834961,
"blob_id": "267ff9a6bcd80c6c365b53a4f47b1c5c74154ce3",
"content_id": "e0600b89f1428f65fb303aabdb2c9616e8cb754a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "JavaScript",
"length_bytes": 1281,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 37,
"path": "/Javascript/react-typing-test/src/Tester.js",
"repo_name": "shansenault/Misc",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\n\nexport default class Tester extends Component {\n\n constructor(props)\n {\n super(props);\n this.state = {userInput: \"\"};\n }\n\n handleTextChanged(event) {\n this.setState({userInput: event.target.value});\n }\n\n// {e => this.handleTextChanged(e)} binds \"this\" by creating a lamda.\n\n render() {\n return (\n <div>\n <h3>Letters written: {this.state.userInput.length}</h3>\n <h2>{this.props.sentence.split(\"\").map((value, index) => {\n if(index > this.state.userInput.length - 1) {\n return <span>{value}</span>;\n }\n if(this.state.userInput[index] === value) {\n return <span style={{ backgroundColor: 'green'}}>{value}</span>\n }\n return <span style={{ backgroundColor: 'red'}}>{value}</span>\n })}</h2>\n <textarea onChange={e => this.handleTextChanged(e)}\n style={{fontFamily: 'Helvetica', fontWeight: 'bold', fontSize: '1.5em',\n width: this.props.sentence.length * 15, height: '150px', resize: 'none'}}\n />\n </div>\n )\n }\n}"
},
{
"alpha_fraction": 0.5763598084449768,
"alphanum_fraction": 0.5800209045410156,
"avg_line_length": 31.982759475708008,
"blob_id": "91db0772c8d918bf6c6b779c7d55feaf6a500b01",
"content_id": "617080dfe7361f91cf4a7bf0e285c6ba1bbc931e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1912,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 58,
"path": "/Python/rps.py",
"repo_name": "shansenault/Misc",
"src_encoding": "UTF-8",
"text": "import random\nimport sys\n\nclass InputHandler():\n\n VALID_MOVES = ('rock', 'paper', 'scissors')\n VALID_OPTIONS = ['quit']\n\n def capture(self, text=\"\", prompt='> '):\n\n if text:\n return raw_input(\"{0}\\n{1}\".format(text, prompt))\n else:\n return raw_input(prompt)\n\n def validate(self, user_input):\n\n for option in self.VALID_OPTIONS:\n if user_input.lower() in option:\n if option == \"quit\":\n print \"Goodbye.\"\n sys.exit()\n\n for move in self.VALID_MOVES:\n if user_input.lower() in move:\n return True, move\n\n return False, None\n\n def check_winner(self, user_choice):\n\n computer_choice = random.choice(self.VALID_MOVES)\n \n if user_choice == computer_choice:\n return \"The computer chose {0}. It's a draw.\".format(computer_choice)\n elif user_choice == 'rock' and computer_choice == 'paper':\n return \"The computer chose {0}. You lose.\".format(computer_choice)\n elif user_choice == 'paper' and computer_choice == 'scissors':\n return \"The computer chose {0}. You lose.\".format(computer_choice)\n elif user_choice == 'scissors' and computer_choice == 'rock':\n return \"The computer chose {0}. You lose.\".format(computer_choice)\n else:\n return \"The computer chose {0}. You win.\".format(computer_choice)\n\ndef main():\n\n ih = InputHandler()\n\n success, user_choice = ih.validate(ih.capture(\"\\nWelcome to Rock, Paper, Scissors. Make your move:\"))\n\n while True:\n if success:\n success, user_choice = ih.validate(ih.capture(ih.check_winner(user_choice)))\n else:\n success, user_choice = ih.validate(ih.capture(\"Invalid input. Please select either rock, paper or scissors. Use 'q' to quit.\"))\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.48962655663490295,
"alphanum_fraction": 0.48962655663490295,
"avg_line_length": 34.24390411376953,
"blob_id": "7a966f9c56cec0cae22962ce73deb4bb1646bd93",
"content_id": "40472278b5dbe11bd151680a8deb7ea9e53dcaf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1446,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 41,
"path": "/Python/table/README.md",
"repo_name": "shansenault/Misc",
"src_encoding": "UTF-8",
"text": "# table.py\n\nFormat plaintext or (eventually) CSVs into an ASCII table that expands/contracts to the input data. Expected input from plaintext files:\n\n```\nFirst line (first column header)\nSecond line (first row of first column)\nThird line (second row of first column)\n...\n(newline)\nnth line (second column header)\n...\n(final line must be a newline, this indcates the final column)\n```\n\nSee `sample-table.txt` for an example.\n\n## Usage\n\nIn your terminal emulator of choice, within the directory of `table.py` and the input file:\n\n`python table.py <sample-table.txt>`\n\nYou should see something akin to this:\n\n```\n+-----------------------------+-----------------------------------+--------------------------------+\n| .text | .data | .bss |\n+-----------------------------+-----------------------------------+--------------------------------+\n| read-only | writable | writable |\n| holds program instructions | contains static initialized data | contains uninitialized data |\n| | reserved for global variables | reserved for global variables |\n+-----------------------------+-----------------------------------+--------------------------------+\n```\n\n\n## TODO\n\n* Turn this into a proper CLI tool\n* Add feature to customize the style of the separators\n* Add support for CSVs\n\n"
},
{
"alpha_fraction": 0.6016877889633179,
"alphanum_fraction": 0.607172966003418,
"avg_line_length": 31.452054977416992,
"blob_id": "ed0556fa99efe0efa4c0b340b55d31826aa2d656",
"content_id": "247bf8d1e3683e895abd11963f2506a5f66f55af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2370,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 73,
"path": "/Python/table/table.py",
"repo_name": "shansenault/Misc",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTakes plaintext files and converts them to ascii tables.\nFirst line of each column is treated as the column label, columns\nare split by newlines. Any other line is a row of its corresponding column.\n\"\"\"\n\ndef create_column(column_data):\n result = ''\n\n padding = len(max(column_data, key=len)) + 2\n \n # write header\n result += '+' + '-' * (padding + 1) + '+\\n' \n result += '|{:^{pad}}'.format(column_data[0], pad=padding + 1) + '|\\n'\n result += '+' + '-' * (padding + 1) + '+\\n' \n\n # write rows\n for row in column_data[1:]:\n result += '| {:{pad}}'.format(row, pad=padding) + '|\\n'\n\n # write footer\n result += '+' + '-' * (padding + 1) + '+' \n\n return result\n\ndef parse_file(text_file):\n table = []\n lines = []\n\n with open(text_file, 'r') as f:\n for line in f:\n if line == '\\n': # this requires the text file to end with a newline\n table.append(list(lines)) # pass by value instead of by reference - just copy lines\n lines[:] = []\n else:\n lines.append(line.replace('\\n', ''))\n\n return table\n\ncolumn_list = []\nresult = ''\n\ncolumns = parse_file('sample-table.txt')\nlongest_column_length = len(max(columns, key=len))\n\n# any columns that are shorter than the longest column, add blank lines to make sure the height is uniform\nfor column in columns:\n if len(column) < longest_column_length:\n for x in range(0, longest_column_length - len(column)):\n column.append('')\n\n# create columns\nfor column in columns:\n column_list.append(create_column(column))\n\n# determine the height of the lines by finding how long vertically a column is by counting newline characters\nmax_lines = len(column_list[0].split('\\n'))\n\n# for every line that needs to be written\nfor x in range(0, max_lines):\n # go through each column\n for y in range(0, len(column_list)):\n # then get the line for the column and append it to the result\n # if the column isn't the first one, cut out the first character of each newline\n # to avoid duplicate characters\n if y != 0:\n result += column_list[y].split('\\n')[x][1:]\n else:\n result += column_list[y].split('\\n')[x]\n # once every x line of every column has been added to the result, add a newline and continue\n result += '\\n'\n\nprint result\n\n"
},
{
"alpha_fraction": 0.5007824897766113,
"alphanum_fraction": 0.5735524296760559,
"avg_line_length": 31.769229888916016,
"blob_id": "88bcda87fd7205584d2d6576d1352b863c753bea",
"content_id": "0f6cd30b8cfc3690da754aac482bec9133e40486",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1278,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 39,
"path": "/Challenges/Daily Programmer/344_Easy_Baum-Sweet_Sequence.py",
"repo_name": "shansenault/Misc",
"src_encoding": "UTF-8",
"text": "# [2017-12-11] Challenge #344 [Easy] Baum-Sweet Sequence\n# https://redd.it/7j33iv\n# Your challenge today is to write a program that generates the Baum-Sweet sequence from 0 to some number n.\n# For example, given \"20\" your program would emit:\n# 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0\n\n\ndef generate_baum_sweet_sequence(n1, n2):\n\n sequence = []\n\n for n in range(n1, n2 + 1):\n blocks = list(('{0:b}'.format(n)).split('1'))\n odd_blocks = False\n\n for block in blocks:\n if len(block) % 2 is not 0:\n odd_blocks = True\n\n sequence.append(1 if not odd_blocks else 0)\n # 1 if the binary representation of n contains no block of consecutive 0s of odd length; otherwise 0\n\n if n1 == 0: # handle the case where the first n in the sequence is 0\n sequence.remove(0), sequence.insert(0, 1)\n\n return sequence\n\n\nif __name__ == '__main__':\n\n n1 = 0\n n2 = 20\n expected_result = [1, 1, 0, 1, 1, 0, 0, 1,\n 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0]\n sequence = generate_baum_sweet_sequence(n1, n2)\n\n if sequence == expected_result:\n print(\"Baum-Sweet sequence from {n1} to {n2}:\\n{0}\".format(\n ', '.join(str(x) for x in sequence), n1=n1, n2=n2))\n"
},
{
"alpha_fraction": 0.7764706015586853,
"alphanum_fraction": 0.7764706015586853,
"avg_line_length": 27.33333396911621,
"blob_id": "e83932909c7a35f9657fb060125383a37372c604",
"content_id": "d0a2fe404775afe5da7442b1e7020ac770474655",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 3,
"path": "/README.md",
"repo_name": "shansenault/Misc",
"src_encoding": "UTF-8",
"text": "# Misc\n\nWhere I dump small things and experiments that don't deserve their own repo.\n"
},
{
"alpha_fraction": 0.6063183546066284,
"alphanum_fraction": 0.6130012273788452,
"avg_line_length": 27.379310607910156,
"blob_id": "ec63d4b5b2f66ba4023b04aeed424d640221c252",
"content_id": "92be01539979f49f8f1d5051b5ba25e7ebf825b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1646,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 58,
"path": "/Javascript/typing-test/typing.js",
"repo_name": "shansenault/Misc",
"src_encoding": "UTF-8",
"text": "var tb = document.getElementById(\"tb\");\nvar sentence = \"Dogs are nice.\";\n// document.write(s);\n\nfunction renderSentence(value) {\n for (var i = 0; i < value.length; ++i) {\n // loop through chars in value string\n // and create a span for each char\n // each span uses id of index of char\n // with default class\n var span = document.createElement(\"span\");\n span.setAttribute(\"id\", \"s{}\".replace(/{}/, i));\n span.setAttribute(\"class\", \"default\");\n document.getElementById(\"s\").appendChild(span);\n span.innerHTML = value[i];\n }\n\n // var sentence = document.createElement('span');\n // sentence.setAttribute('class', 'default');\n // document.getElementById('s').appendChild(sentence);\n // sentence.innerHTML = value;\n}\n\nrenderSentence(sentence);\n\ntb.addEventListener(\"input\", function(e) {\n var correct = verifyInput(tb.value);\n console.log(tb.value);\n console.log(correct);\n\n if (correct) {\n for (var i = 0; i < tb.value.length; ++i) {\n document.getElementById(\"s{}\".replace(/{}/, i)).className = \"green\";\n }\n //console.log(s.innerHTML.substr(0, tb.value.length));\n //s.innerHTML.substr(0, tb.value.length).style.color = '#00ff00';\n //console.log(s);\n //s.style.color('green');\n } else {\n for (var i = 0; i < tb.value.length; ++i) {\n document.getElementById(\"s{}\".replace(/{}/, i)).className = \"red\";\n }\n\n //s.innerHTML.substr(0, tb.value.length);\n //s.style.color('red');\n }\n});\n\nfunction verifyInput(input) {\n var result = true;\n for (var i = 0; i < input.length; ++i) {\n if (input[i] != sentence[i]) {\n result = false;\n break;\n }\n }\n return result;\n}\n"
}
] | 7 |
jarmoj/notesapp-react-redux-boilerplate
|
https://github.com/jarmoj/notesapp-react-redux-boilerplate
|
8970006a4c36627bc73ae5ac17d83b592ef32a9c
|
509329cc403789736ba250b5449b6f42298bb154
|
00c35829be0d4f6252007962e1448eb98f5710a1
|
refs/heads/master
| 2021-05-03T11:26:03.968786 | 2016-09-11T02:29:54 | 2016-09-11T02:29:54 | 64,979,335 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6016964316368103,
"alphanum_fraction": 0.6054072976112366,
"avg_line_length": 23.08085060119629,
"blob_id": "d4292c11310c978328b4c2667d9da3061041ecbb",
"content_id": "57a8816d12803ee899c484c2ffb710ec0e38f7ae",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5659,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 235,
"path": "/src/components/NotesSearch.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\n// import PureRenderMixin from 'react-addons-pure-render-mixin';\nimport _ from 'lodash';\n\nconst BACKSPACE = 8;\nconst ENTER = 13;\nconst ESCAPE = 27;\nconst LEFT_ARROW = 37;\nconst RIGHT_ARROW = 39;\n\nexport default class NotesSearch extends React.Component {\n\n static get propTypes() {\n return {\n query: React.PropTypes.string.isRequired,\n selected: React.PropTypes.string,\n search: React.PropTypes.func.isRequired,\n returnPressed: React.PropTypes.func.isRequired,\n };\n }\n\n constructor(props) {\n super(props);\n // this.shouldComponentUpdate = PureRenderMixin.shouldComponentUpdate.bind(this);\n this.state = {\n value: props.query,\n query: props.query,\n lastQuery: '',\n sinceLastType: Number(new Date()),\n };\n\n this.search = this.search.bind(this);\n this.search = _.debounce(this.search, 50);\n\n this.complete = this.complete.bind(this);\n // this.complete = _.debounce(this.complete, 100);\n\n this.onChangeCallback = this.onChangeCallback.bind(this);\n this.onKeyDownCallback = this.onKeyDownCallback.bind(this);\n this.onKeyUpCallback = this.onKeyUpCallback.bind(this);\n }\n\n componentWillReceiveProps(newProps) {\n if (newProps.query === this.state.query\n && newProps.selected === this.state.value) {\n return;\n }\n if (this.millisecondsSinceLastType() < 500) {\n this.complete(newProps);\n }\n }\n\n shouldComponentUpdate(newProps, newState) {\n console.log(newProps);\n console.log(newState);\n if (newProps.query === newProps.lastQuery) {\n return false;\n }\n if (newState.value !== this.state.value) {\n return true;\n }\n if (newState.query !== this.state.value) {\n return true;\n }\n // return false;\n // }\n // if (Number(new Date()) - this.state.sinceLastType < 100) {\n // return false;\n // }\n // if ((newProps.query !== newState.query\n // || newProps.selected !== newState.selected)) {\n // return true;\n // }\n // return false;\n return true;\n }\n\n componentDidUpdate() {\n if (document.activeElement === this.input\n && this.props.selected\n && this.state.query.length !== this.state.value.length) {\n this.input.selectionStart = this.state.query.length;\n this.input.selectionEnd = this.state.value.length;\n }\n }\n\n onKeyUpCallback(e) {\n switch (e.keyCode) {\n case ENTER:\n this.returnPressed();\n break;\n case ESCAPE:\n this.escapePressed();\n break;\n default:\n break;\n }\n }\n\n onKeyDownCallback(e) {\n this.setState({ sinceLastType: Number(new Date()) });\n switch (e.keyCode) {\n case BACKSPACE:\n this.backspacePressed(e);\n break;\n case LEFT_ARROW:\n this.leftArrowPressed(e);\n break;\n case RIGHT_ARROW:\n this.rightArrowPressed(e);\n break;\n default:\n break;\n }\n }\n\n onChangeCallback(e) {\n const query = e.target.value;\n if (this.state.query !== query\n && this.state.value.startsWith(query)) {\n this.setState({\n query,\n });\n } else {\n this.setState({\n value: query,\n query,\n });\n }\n this.search(query);\n }\n\n millisecondsSinceLastType() {\n return Number(new Date()) - this.state.sinceLastType;\n }\n\n complete(newProps) {\n const query = newProps.query;\n const selected = newProps.selected ? newProps.selected : query;\n let value = query;\n if (selected.startsWith(query)\n && query.startsWith(this.state.query)) {\n value = selected;\n }\n this.setState({\n value,\n query,\n });\n }\n\n backspacePressed() {\n let selectionStart = this.input.selectionStart;\n if (selectionStart == this.input.selectionEnd) {\n selectionStart--;\n }\n const left = this.state.value.substring(0, selectionStart);\n const right = this.state.value.substring(this.input.selectionEnd, this.state.value.length);\n const newValue = left + right;\n this.setState({\n value: newValue,\n query: newValue,\n });\n this.input.selectionEnd = selectionStart;\n e.preventDefault();\n }\n\n rightArrowPressed(e) {\n if (e.shiftKey\n || this.input.selectionStart === this.input.selectionEnd) {\n return;\n }\n this.setState({\n value: this.state.value,\n query: this.state.value,\n });\n this.input.selectionStart = this.input.selectionEnd;\n e.preventDefault();\n }\n\n leftArrowPressed(e) {\n if (e.shiftKey\n || this.input.selectionStart === this.input.selectionEnd) {\n return;\n }\n this.setState({\n value: this.state.value,\n query: this.state.value,\n });\n this.input.selectionEnd = this.input.selectionStart;\n e.preventDefault();\n }\n\n returnPressed() {\n this.setState({\n value: this.state.value,\n query: this.state.value,\n });\n this.search(this.state.value);\n this.props.returnPressed(this.state.value);\n }\n\n escapePressed() {\n this.onChangeCallback({ target: { value: '' } });\n }\n\n focus() {\n this.input.focus();\n }\n\n search(query) {\n if (this.state.lastQuery !== query\n || this.state.query !== query) {\n this.props.search(query);\n }\n this.setState({\n lastQuery: query,\n });\n }\n\n render() {\n return (\n <div className=\"notes-search-border\">\n <input\n type=\"text\"\n className=\"notes-search\"\n ref={c => this.input = c} // eslint-disable-line\n value={this.state.value}\n onChange={this.onChangeCallback}\n onKeyUp={this.onKeyUpCallback}\n onKeyDown={this.onKeyDownCallback}\n />\n </div>\n );\n }\n}\n"
},
{
"alpha_fraction": 0.6190037131309509,
"alphanum_fraction": 0.6199262142181396,
"avg_line_length": 26.443037033081055,
"blob_id": "ab9d940d7e17e796eae18f38a41639b8c23b97de",
"content_id": "714c75c00cce2dabe95e651c702c53c2de42c56e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2168,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 79,
"path": "/src/components/NotesListItem.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\n// import PureRenderMixin from 'react-addons-pure-render-mixin'\nimport dateFormat from 'dateformat';\n\nexport default class NotesListItem extends React.Component {\n\n static get propTypes() {\n return {\n title: React.PropTypes.string.isRequired,\n text: React.PropTypes.string.isRequired,\n timestamp: React.PropTypes.object.isRequired,\n orderBy: React.PropTypes.string.isRequired,\n isSelected: React.PropTypes.bool.isRequired,\n rowClicked: React.PropTypes.func.isRequired,\n deleteClicked: React.PropTypes.func.isRequired,\n };\n }\n\n constructor(props) {\n super(props);\n // this.shouldComponentUpdate = PureRenderMixin.shouldComponentUpdate.bind(this);\n this.onClickRowCallback = this.onClickRowCallback.bind(this);\n this.onClickDeleteCallback = this.onClickDeleteCallback.bind(this);\n }\n\n onClickRowCallback() {\n this.props.rowClicked(this.props.title);\n }\n\n onClickDeleteCallback() {\n this.props.deleteClicked(this.props.title);\n }\n\n timestamp() {\n if (this.props.timestamp && this.props.orderBy) {\n const parts = this.props.orderBy.split(' ');\n const which = parts[0] === 'title' ? 'modified' : parts[0];\n return dateFormat(this.props.timestamp.get(which));\n }\n return '';\n }\n\n isSelected() {\n if (!this.props.isSelected) {\n return '';\n }\n\n if (this.props.isSelected) {\n return ' selected';\n }\n return '';\n }\n\n render() {\n return (\n <tr\n className={'notes-list-row' + this.isSelected()} // eslint-disable-line\n onClick={this.onClickRowCallback}\n ref={c => this.row = c} // eslint-disable-line\n >\n <td className=\"notes-list-item-title\">\n {this.props.title}\n <span className=\"notes-list-item-text\">\n {' '} - {this.props.text}\n </span>\n </td>\n <td className=\"notes-list-item-date\">\n {this.timestamp()}\n </td>\n <td className=\"notes-list-item-destroy\">\n <button\n className=\"destroy\"\n onClick={this.onClickDeleteCallback}\n >X</button>\n </td>\n </tr>\n );\n }\n}\n"
},
{
"alpha_fraction": 0.5747663378715515,
"alphanum_fraction": 0.5872274041175842,
"avg_line_length": 25.75,
"blob_id": "de396db99219130ce02460188960f23b7145f0db",
"content_id": "2862802c80ece57a7c07216797498dcb7510bcac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 642,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 24,
"path": "/server/python_server/setup.py",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "\"\"\"NotesApp setup.py file.\"\"\"\nimport os\nfrom setuptools import setup\n\n\ndef read(fname):\n \"\"\"Read file in as string.\"\"\"\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(name=\"NotesApp python backend\",\n version=\"0.0.1\",\n author=\"Jarmo Jomppanen\",\n author_email=\"[email protected]\",\n description=\"A NotesApp backend with simple REST API\",\n license=\"MIT\",\n keywords=\"notes app backend\",\n packages=['notes_app_server', 'test'],\n long_description=read('README.txt'),\n install_requires=[\n \"requests\",\n \"tornado>=4.3\",\n \"tornado-cors>=0.6.0\",\n \"pytest\",\n ],)\n"
},
{
"alpha_fraction": 0.5934878587722778,
"alphanum_fraction": 0.5967581272125244,
"avg_line_length": 26.47265625,
"blob_id": "a1e480a35bf18405aa0c4e835864d4352e7520ab",
"content_id": "16bd21e8b2d971e6f9a72d2dbf7dc11011dd0132",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 7033,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 256,
"path": "/test/actions/index_spec.js",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import TestUtils from 'react-addons-test-utils';\nimport { describe, it } from 'mocha';\nimport { expect } from 'chai';\nimport { List, Set, is } from 'immutable';\nimport diff from 'immutablediff';\nimport * as tk from 'timekeeper';\nimport urlencode from 'urlencode';\nimport configureStore from 'redux-mock-store';\nimport thunk from 'redux-thunk';\nimport _state from '../test_data';\nimport * as types from '../../src/types.js';\nimport * as actions from '../../src/actions/index';\n\nconst middlewares = [thunk];\nconst mockStore = configureStore(middlewares);\n\ndescribe('actions', () => {\n it('should create an action to set the complete state of the app', () => {\n const state = _state;\n const expectedAction = {\n type: types.SET_STATE,\n state,\n };\n expect(actions.setState(state)).to.deep.equal(expectedAction);\n });\n\n it('should give action to set query and set notes to empty with difficult query', () => {\n global.mockAxios.reset();\n const encoded = urlencode('test query');\n global.mockAxios.onGet(`${actions.SEARCH_URL}${encoded}`).reply(200, {\n notes: [],\n });\n\n const query = 'test query';\n const notes = List.of();\n const expectedActions = [\n {\n type: types.SET_QUERY,\n query,\n },\n {\n type: types.SET_NOTES,\n notes,\n },\n {\n type: types.SELECT_NOTE,\n title: null,\n },\n ];\n\n const getState = _state;\n const store = mockStore(getState);\n return store.dispatch(actions.search(query)).then(() => {\n const actionsGot = store.getActions();\n expect(actionsGot).to.deep.equal(expectedActions);\n });\n });\n\n it('should give action to set query and set notes to all with empty query', () => {\n global.mockAxios.reset();\n global.mockAxios.onGet(`${actions.SEARCH_URL}`).reply(200, {\n notes: _state.get('notes').toJS(),\n });\n\n const query = '';\n const notes = _state.get('notes');\n const expectedActions = [\n {\n type: types.SET_QUERY,\n query,\n },\n {\n type: types.SET_NOTES,\n notes,\n },\n {\n type: types.SELECT_NOTE,\n title: null,\n },\n ];\n\n const getState = _state;\n const store = mockStore(getState);\n return store.dispatch(actions.search(query)).then(() => {\n const actionsGot = store.getActions();\n expect(actionsGot).to.deep.equal(expectedActions);\n });\n });\n\n it('should create an action to add new note with given title and text', () => {\n global.mockAxios.reset();\n global.mockAxios.onPut('http://localhost:3456/notes').reply(200);\n\n const timestamp = (new Date()).toISOString();\n tk.freeze(timestamp);\n const title = 'test title';\n const text = 'test content for note text';\n const expectedActions = [\n {\n type: types.ADD_NOTE,\n title,\n text,\n timestamp,\n },\n {\n type: types.SELECT_NOTE,\n title,\n },\n ];\n\n const getState = _state;\n const store = mockStore(getState);\n return store.dispatch(actions.addNote(title, text)).then(() => {\n const actionsGot = store.getActions();\n tk.reset();\n expect(actionsGot).to.deep.equal(expectedActions);\n });\n });\n\n it('should create an action to select note by title', () => {\n const title = 'react';\n const notes = List.of(_state.get('notes').get(0));\n const expectedActions = [\n {\n type: types.SET_QUERY,\n query: title,\n },\n {\n type: types.SELECT_NOTE,\n title,\n },\n ];\n\n const getState = _state;\n const store = mockStore(getState);\n store.dispatch(actions.selectNote(title));\n const actionsGot = store.getActions();\n expect(actionsGot).to.deep.equal(expectedActions);\n });\n\n it('clearSelection should empty query, set notes to all and selected to null', () => {\n global.mockAxios.reset();\n global.mockAxios.onGet(`${actions.SEARCH_URL}`).reply(200, {\n notes: _state.get('notes').toJS(),\n });\n\n const expectedActions = [\n {\n type: types.SELECT_NOTE,\n title: null,\n },\n {\n type: types.SET_QUERY,\n query: '',\n },\n {\n type: types.SET_NOTES,\n notes: _state.get('notes'),\n },\n ];\n\n const getState = _state;\n const store = mockStore(getState);\n return store.dispatch(actions.clearSelection()).then(() => {\n const actionsGot = store.getActions();\n expect(actionsGot).to.deep.equal(expectedActions);\n });\n });\n\n it('should create an action to edit the note having title with given text', () => {\n const url = `${actions.ADD_URL}`;\n global.mockAxios.reset();\n global.mockAxios.onPut(url).reply(200);\n\n const timestamp = (new Date()).toISOString();\n tk.freeze(timestamp);\n const selected = 'test title';\n const title = 'test title new';\n const text = 'test content for note text';\n const expectedActions = [\n {\n type: types.EDIT_NOTE,\n selected,\n title,\n text,\n timestamp,\n },\n {\n type: types.SELECT_NOTE,\n title,\n },\n ];\n\n const getState = _state;\n const store = mockStore(getState);\n return store.dispatch(actions.editNote(selected, title, text)).then(() => {\n const actionsGot = store.getActions();\n tk.reset();\n expect(actionsGot).to.deep.equal(expectedActions);\n });\n });\n\n it('should create an action to delete note with given title', () => {\n const selected = 'test title';\n const encoded = urlencode(selected);\n const url = `${actions.DELETE_URL}/${encoded}`;\n global.mockAxios.reset();\n global.mockAxios.onDelete(url).reply(200);\n\n const expectedActions = [\n {\n type: types.DELETE_NOTE,\n selected,\n },\n {\n type: types.SELECT_NOTE,\n title: null,\n },\n ];\n\n const getState = _state;\n const store = mockStore(getState);\n return store.dispatch(actions.deleteNote(selected)).then(() => {\n const actionsGot = store.getActions();\n expect(actionsGot).to.deep.equal(expectedActions);\n });\n });\n\n it('should create an action to set list order by to title', () => {\n const expectedAction = {\n type: types.ORDER_BY_TITLE,\n };\n expect(actions.orderByTitle()).to.deep.equal(expectedAction);\n });\n\n it('should create an action to set list order by to modified', () => {\n const expectedAction = {\n type: types.ORDER_BY_MODIFIED,\n };\n expect(actions.orderByModified()).to.deep.equal(expectedAction);\n });\n\n it('should create an action to set list order by to created', () => {\n const expectedAction = {\n type: types.ORDER_BY_CREATED,\n };\n expect(actions.orderByCreated()).to.deep.equal(expectedAction);\n });\n\n it('should create an action to toggle list order between ascending / descending', () => {\n const expectedAction = {\n type: types.TOGGLE_ASCENDING_DESCENDING,\n };\n expect(actions.toggleAscendingDescending()).to.deep.equal(expectedAction);\n });\n});\n"
},
{
"alpha_fraction": 0.6986607313156128,
"alphanum_fraction": 0.6986607313156128,
"avg_line_length": 24.600000381469727,
"blob_id": "e3273c1868bbc4846a9c848e2dc596a1b1d6ac47",
"content_id": "fe6aa61eca6232490bbc4bc364292e7231b637c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 896,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 35,
"path": "/src/index.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport ReactDOM from 'react-dom';\nimport { compose, createStore, applyMiddleware } from 'redux';\nimport { Provider } from 'react-redux';\nimport thunk from 'redux-thunk';\nimport reducer from './reducers/index';\nimport * as actions from './actions/index';\nimport { NotesAppContainer } from './containers/NotesApp';\nimport _state from '../test/test_data';\n\nfunction createToolsStore(rootReducer) {\n return createStore(\n rootReducer,\n _state,\n compose(\n applyMiddleware(thunk),\n window.devToolsExtension ? window.devToolsExtension() : f => f\n )\n );\n}\n\nconst store = createToolsStore(reducer);\n\nstore.dispatch(actions.search(''));\n\nif (typeof window !== 'undefined') {\n require('./styles/index.scss'); // eslint-disable-line\n}\n\nReactDOM.render(\n <Provider store={store}>\n <NotesAppContainer />\n </Provider>,\n document.getElementById('app')\n);\n"
},
{
"alpha_fraction": 0.6530612111091614,
"alphanum_fraction": 0.7551020383834839,
"avg_line_length": 11.25,
"blob_id": "3064ce2011e99f55d6ff871ea501f224ac5ab8eb",
"content_id": "fe95f8e61dcccfcb0982b4f0bab91787014daebb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 49,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 4,
"path": "/server/python_server/requirements.txt",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "requests\ntornado>=4.3\ntornado-cors==0.6.0\npytest\n"
},
{
"alpha_fraction": 0.6627907156944275,
"alphanum_fraction": 0.6627907156944275,
"avg_line_length": 33.400001525878906,
"blob_id": "023f3a22c4b262def2379099e75a481abfa1f96e",
"content_id": "7852c429662362093cfcf23512d4132cb7b47bba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 20,
"path": "/server/python_server/conftest.py",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "\"\"\"Configure pytest to start, and stop the server around tests.\"\"\"\nimport pytest\nimport subprocess\nimport time\nimport sys\n\nsys.path.append(\"test\")\nimport test_server\n\[email protected](scope=\"session\", autouse=True)\ndef start_server(request):\n \"\"\"Start the server and add finalizer to stop it.\"\"\"\n test_server.start_server()\n test_server.wait_server()\n test_server.check_using_test_db()\n request.addfinalizer(test_server.stop_server)\n # proc = subprocess.Popen([\"python\", \"notes_app_server/server.py\",\n # \"--use-test-db\"], stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE)\n # request.addfinalizer(proc.terminate)\n"
},
{
"alpha_fraction": 0.6384544372558594,
"alphanum_fraction": 0.6399263739585876,
"avg_line_length": 31.544910430908203,
"blob_id": "66740a72efdf384300ebc534fb2b7594f7c95d36",
"content_id": "4e607ae26b7aab61ecc551ed753cbc82d7381bc8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5435,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 167,
"path": "/test/reducers/index_spec.js",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import { List, Map, fromJS, Set } from 'immutable';\nimport { describe, it } from 'mocha';\nimport { expect } from 'chai';\nimport reducer from '../../src/reducers/index';\nimport _state from '../test_data';\nimport * as types from '../../src/types.js';\n\ndescribe('reducer', () => {\n it('handles SET_STATE', () => {\n const initialState = new Map();\n const action = {\n type: types.SET_STATE,\n state: _state,\n };\n\n const nextState = reducer(initialState, action);\n expect(nextState).to.equal(_state);\n });\n\n it('handles SET_QUERY by changing the query from previous to new', () => {\n const initialState = _state;\n const query = 'test query string';\n const action = {\n type: types.SET_QUERY,\n query,\n };\n const expectedState = _state.setIn(['query'], query);\n const nextState = reducer(initialState, action);\n expect(nextState).to.equal(expectedState);\n });\n\n it('handles ADD_NOTE by adding note to notes of the state', () => {\n const timestamp = (new Date()).toISOString();\n const initialState = _state;\n const initialNotes = _state.get('notes');\n const title = 'test title string';\n const text = 'test content for note text';\n const action = {\n type: types.ADD_NOTE,\n title,\n text,\n timestamp,\n };\n const nextState = reducer(initialState, action);\n const nextNotes = nextState.get('notes');\n\n expect(nextNotes.count()).to.equal(initialNotes.count() + 1);\n\n const newNote = nextNotes.find(note => note.get('title') === title);\n expect(newNote.get('text') === text);\n\n const modified = newNote.getIn(['timestamp', 'modified']);\n const created = newNote.getIn(['timestamp', 'created']);\n\n const modifiedDate = new Date(modified);\n const createdDate = new Date(created);\n\n expect(modifiedDate.toISOString()).to.equal(createdDate.toISOString());\n });\n\n it('handles SELECT_NOTE by changing selected state', () => {\n const initialState = _state;\n const title = 'redux';\n const action = {\n type: types.SELECT_NOTE,\n title,\n };\n const nextState = reducer(initialState, action);\n\n expect(nextState.get('selected')).to.equal(title);\n });\n\n it('handles EDIT_NOTE by changing the selected note contents', () => {\n const timestamp = (new Date()).toISOString();\n const initialState = _state;\n const initialNotes = _state.get('notes');\n const selected = 'redux';\n const title = 'some new title';\n const text = 'some new content text';\n const action = {\n type: types.EDIT_NOTE,\n selected,\n title,\n text,\n timestamp,\n };\n const initialNote = initialNotes.find(note => note.get('title') === selected);\n\n const initialModified = initialNote.getIn(['timestamp', 'modified']);\n\n const nextState = reducer(initialState, action);\n const nextNotes = nextState.get('notes');\n\n expect(nextNotes.count()).to.equal(initialNotes.count());\n\n const newNote = nextNotes.find(note => note.get('title') === title);\n\n const modified = newNote.getIn(['timestamp', 'modified']);\n const expected = (new Date(initialModified)).toISOString();\n expect((new Date(modified)).toISOString()).to.not.equal(expected);\n\n expect(newNote.get('title')).to.equal(title);\n expect(newNote.get('text')).to.equal(text);\n });\n\n it('handles DELETE_NOTE by deleting the note from notes', () => {\n const initialState = _state;\n const selected = 'redux';\n const action = {\n type: types.DELETE_NOTE,\n selected,\n };\n const nextState = reducer(initialState, action);\n expect(nextState.get('notes').count()).to.equal(initialState.get('notes').count() - 1);\n\n const deletedIndex = nextState.get('notes').findIndex(note => note.get('title') === selected);\n expect(deletedIndex).to.equal(-1);\n });\n\n it('handles ORDER_BY_TITLE by setting list order be by title', () => {\n const initialState = _state.set('orderBy', 'modified ascending');\n const action = {\n type: types.ORDER_BY_TITLE,\n };\n expect(initialState.get('orderBy')).to.not.contain('title');\n\n const nextState = reducer(initialState, action);\n expect(nextState.get('orderBy')).to.contain('title');\n });\n\n it('handles ORDER_BY_MODIFIED by setting list order be by modified', () => {\n const initialState = _state;\n const action0 = {\n type: types.ORDER_BY_TITLE,\n };\n const action = {\n type: types.ORDER_BY_MODIFIED,\n };\n const nextState0 = reducer(initialState, action0);\n expect(nextState0.get('orderBy')).to.not.contain('modified');\n\n const nextState = reducer(nextState0, action);\n expect(nextState.get('orderBy')).to.contain('modified');\n });\n\n it('handles ORDER_BY_CREATED by setting list order be by created', () => {\n const initialState = _state;\n const action = {\n type: types.ORDER_BY_CREATED,\n };\n expect(initialState.get('orderBy')).to.not.contain('created');\n\n const nextState = reducer(initialState, action);\n expect(nextState.get('orderBy')).to.contain('created');\n });\n\n it('handles TOGGLE_ASCENDING_DESCENDING to get list orderder between (a/de)scending', () => {\n const initialState = _state;\n const action = {\n type: types.TOGGLE_ASCENDING_DESCENDING,\n };\n expect(initialState.get('orderBy')).to.contain('ascending');\n\n const nextState = reducer(initialState, action);\n expect(nextState.get('orderBy')).to.contain('descending');\n });\n});\n"
},
{
"alpha_fraction": 0.7007092237472534,
"alphanum_fraction": 0.7120567560195923,
"avg_line_length": 16.407407760620117,
"blob_id": "2a2a70aaf6a91862c255fae62d5a4ac02eada6a4",
"content_id": "25cc27d1664a8576865aa6eadc7e18b079b500c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1410,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 81,
"path": "/README.md",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "# Notes App - React - Redux\n\n(under work)\n\nThis is a Notes App that got its inspiration from Notational Velocity, nvALT,\nSimplenote, and other such note taking tools. I used the project as a climbing\ntree to latest web stack and especially to React and Redux.\n\n## Some of the used technology so far\n\n- Babel\n- Webpack\n\n- React\n- Redux\n- ImmutableJS\n- Axios\n\n- Css-loader\n- Sass-loader\n- Style-loader\n\n- Mocha\n- Chai\n- Enzyme\n- Jsdom\n\n- Python backend\n\n\n\n## Used References\n\nAt first I started Following the following tutorial(s) somewhat:\n\n\"Getting Started with React, Redux and Immutable: a Test-Driven Tutorial\n(Part 1)\" - Nicolas Goutay :\n\nhttp://www.theodo.fr/blog/2016/03/getting-started-with-react-redux-and-immutable-a-test-driven-tutorial-part-1/\n\nWho in turn followed :\n\n\"A Comprehensive Guide to Test-First Development with Redux, React, and\nImmutable\" - Tero Parviainen :\n\nhttp://teropa.info/blog/2015/09/10/full-stack-redux-tutorial.html\n\nAt some point I felt confident about my setup and the development diverged from\nthose examples.\n\n\n\n## Building a State Tree\n\nOur app is composed of notes:\n\nstate\n notes\n item\n id\n title\n text\n timestamp\n added\n modified\n item\n ...\n filter (search)\n selected\n\n## UI for the app\n\nWe split the app into following components:\n\n- NotesApp\n - NotesHeader\n - NotesSearch\n - NotesList\n - NotesListItem\n - NotesEdit\n - NotesFooter\n"
},
{
"alpha_fraction": 0.6118068099021912,
"alphanum_fraction": 0.6118068099021912,
"avg_line_length": 21.66216278076172,
"blob_id": "8b0e2d731d92a24af9094738a0c2bbe4de6fd261",
"content_id": "1ddd3660a5e4ecb56254252c2371cb68e2dd38a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1677,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 74,
"path": "/src/components/NotesEdit.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\n// import PureRenderMixin from 'react-addons-pure-render-mixin'\n\nexport default class NotesEdit extends React.Component {\n\n static get propTypes() {\n return {\n noteEdited: React.PropTypes.func.isRequired,\n selected: React.PropTypes.string,\n };\n }\n\n constructor(props) {\n super(props);\n // this.shouldComponentUpdate = PureRenderMixin.shouldComponentUpdate.bind(this);\n this.state = {\n value: '',\n enabled: false,\n };\n\n this.onChangeCallback = this.onChangeCallback.bind(this);\n }\n\n componentWillReceiveProps(props) {\n this.setState(this.stateValueFromProps(props));\n }\n\n onChangeCallback(e) {\n this.setState({ value: e.target.value });\n this.props.noteEdited(e.target.value);\n }\n\n stateValueFromProps(props) {\n const note = this.findSelectedNote(props);\n if (!props.selected || !note) {\n return {\n value: '',\n };\n }\n return {\n value: note.get('text'),\n };\n }\n\n findSelectedNote(props) {\n const noteIndex = props.notes.findIndex(note => note.get('title') === props.selected);\n const note = props.notes.get(noteIndex);\n return note;\n }\n\n isDisabled() {\n if (this.props.selected || this.state.enabled) {\n return '';\n }\n return ' disabled';\n }\n\n focus() {\n this.textarea.focus();\n }\n\n render() {\n return (\n <div className=\"notes-edit-border\">\n <textarea\n className={'notes-edit' + this.isDisabled()} // eslint-disable-line\n ref={c => this.textarea = c} // eslint-disable-line\n value={this.state.value}\n onChange={this.onChangeCallback}\n />\n </div>\n );\n }\n}\n"
},
{
"alpha_fraction": 0.533207356929779,
"alphanum_fraction": 0.5441414713859558,
"avg_line_length": 26.036497116088867,
"blob_id": "d0a4aa1ed0ba4339c5838e643ed0b33063013ddb",
"content_id": "954ee6f860e02aa3df05385e5c88b99600f61674",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7408,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 274,
"path": "/server/python_server/test/test_server.py",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "\"\"\"Test NotesApp backend for its rest api.\"\"\"\nimport requests\nimport unittest\nimport subprocess\nimport time\nimport json\n\n\nPORT = 3456\nURL = \"http://localhost:\" + str(PORT)\n\n\ndef get(path):\n \"\"\"Perform Get request.\"\"\"\n return requests.get(URL + path)\n\n\ndef put(path, it):\n \"\"\"Perform Put request.\"\"\"\n return requests.put(URL + path, json=it)\n\n\ndef delete(path):\n \"\"\"Perform Delete request.\"\"\"\n return requests.delete(URL + path)\n\n\nclass TestNotes(unittest.TestCase):\n \"\"\"Test /notes/* .\n\n /notes\n /notes/titles\n \"\"\"\n\n def setUp(self):\n \"\"\"Set up test on server side.\"\"\"\n get(\"/test/begin\")\n\n def tearDown(self):\n \"\"\"Tear down test on server side.\"\"\"\n get(\"/test/end\")\n\n def test_notes_root_get(self):\n \"\"\"Test /notes .\"\"\"\n r = get(\"/notes\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('notes', r.json())\n notes = r.json()['notes']\n self.assertEqual(len(notes), 3)\n note = notes[0]\n for part in ['title', 'text']:\n self.assertIn(part, note)\n\n def test_notes_titles(self):\n \"\"\"Test /notes/titles .\"\"\"\n r = get(\"/notes/titles\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('note_titles', r.json())\n titles = r.json()['note_titles']\n self.assertEqual(len(titles), 3)\n\n def test_notes_root_put(self):\n \"\"\"Test /notes .\"\"\"\n r = put(\"/notes\", {\n 'title': 'add a note',\n 'text': 'with some novel content',\n 'timestamp': {\n 'created': 'EEST 1985-10-12 09:22',\n 'modified': 'EEST 1985-10-12 09:22'\n }\n })\n self.assertEqual(r.status_code in [200, 201], True)\n\n r = get(\"/notes\")\n self.assertIn('notes', r.json())\n notes = r.json()['notes']\n self.assertEqual(len(notes), 4)\n note = notes[0]\n for part in ['title', 'text']:\n self.assertIn(part, note)\n\n\nclass TestNote(unittest.TestCase):\n \"\"\"Test /note/* .\n\n /note/:title\n GET - return note that has title :title\n DELETE - delete note with title :title\n \"\"\"\n\n def setUp(self):\n \"\"\"Set up test on server side.\"\"\"\n get(\"/test/begin\")\n\n def tearDown(self):\n \"\"\"Tear down test on server side.\"\"\"\n get(\"/test/end\")\n\n def test_note_get_find(self):\n \"\"\"Return note with given title - success.\"\"\"\n r = get(\"/note/react\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('title', r.json())\n self.assertEqual(r.json()[\"title\"], \"react\")\n\n def test_note_get_find_not(self):\n \"\"\"Return note with given title - failure.\"\"\"\n r = get(\"/note/nothere\")\n self.assertEqual(r.status_code, 404)\n\n def test_note_delete(self):\n \"\"\"Delete note with given title.\"\"\"\n r = delete(\"/note/react\")\n self.assertEqual(r.status_code in [200, 204], True)\n\n\n\nclass TestVersion(unittest.TestCase):\n \"\"\"Test /version .\n\n /version\n \"\"\"\n\n def test_version_root(self):\n \"\"\"Test /version .\"\"\"\n r = get(\"/version\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('version', r.json())\n self.assertIn('api_version', r.json())\n self.assertIn('is_test_db', r.json())\n\n\nclass TestSearch(unittest.TestCase):\n \"\"\"Test /search?q=(.*) .\n\n /search?q=(.*)\n \"\"\"\n\n def setUp(self):\n \"\"\"Set up test on server side.\"\"\"\n get(\"/test/begin\")\n\n def tearDown(self):\n \"\"\"Tear down test on server side.\"\"\"\n get(\"/test/end\")\n\n def test_search_empty_return_all(self):\n \"\"\"Test /search?q= .\"\"\"\n r = get(\"/search?q=\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('notes', r.json())\n notes = r.json()['notes']\n self.assertEqual(len(notes), 3)\n note = notes[0]\n for part in ['title', 'text']:\n self.assertIn(part, note)\n\n def test_search_wrong_return_none(self):\n \"\"\"Test /search?q=nothing .\"\"\"\n r = get(\"/search?q=nothing\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('notes', r.json())\n notes = r.json()['notes']\n self.assertEqual(len(notes), 0)\n\n def test_search_to_get_one(self):\n \"\"\"Test /search?q=react .\"\"\"\n r = get(\"/search?q=react\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('notes', r.json())\n notes = r.json()['notes']\n self.assertEqual(len(notes), 1)\n note = notes[0]\n for part in ['title', 'text']:\n self.assertIn(part, note)\n self.assertEqual(note[\"title\"], \"react\")\n\n def test_search_prefix_to_get_two(self):\n \"\"\"Test /search?q=re .\"\"\"\n r = get(\"/search?q=re\")\n self.assertEqual(r.status_code, 200)\n self.assertIn('application/json', r.headers['content-type'])\n self.assertIn('notes', r.json())\n notes = r.json()['notes']\n self.assertEqual(len(notes), 2)\n note = notes[0]\n for part in ['title', 'text']:\n self.assertIn(part, note)\n\n\nclass TestTodo(unittest.TestCase):\n \"\"\"Test todoList .\"\"\"\n\n def setUp(self):\n \"\"\"Set up test on server side.\"\"\"\n get(\"/test/begin\")\n\n def tearDown(self):\n \"\"\"Tear down test on server side.\"\"\"\n get(\"/test/end\")\n\n def test_persist_to_disk(self):\n \"\"\"Add persisting on disk.\"\"\"\n pass\n\n\nclass NotTestDb(Exception):\n \"\"\"Exception raised if not using test database.\"\"\"\n\n pass\n\n\ndef check_using_test_db():\n \"\"\"Check that the db in use is for testing.\"\"\"\n e = NotTestDb(\"Not a test database! Start server with a test setup!\")\n try:\n if not get(\"/version\").json()[\"is_test_db\"]:\n raise e\n except:\n raise e\n\n\nserver_process = None\n\n\ndef start_server():\n \"\"\"Start NotesApp backend.\"\"\"\n global server_process\n server_process = subprocess.Popen([\"python\", \"notes_app_server/server.py\",\n \"--use-test-db\"])\n\n\ndef wait_server():\n \"\"\"Wait until server responding.\"\"\"\n ready = False\n while not ready:\n time.sleep(1)\n try:\n requests.get(URL + \"/version\")\n ready = True\n except requests.exceptions.ConnectionError:\n pass\n\n\ndef stop_server():\n \"\"\"Stop NotesApp backend.\"\"\"\n global server_process\n time.sleep(1)\n subprocess.check_output([\"kill\", \"-s\", \"SIGINT\", str(server_process.pid)])\n\n\ndef run_tests():\n \"\"\"Run tests and then stop server.\"\"\"\n check_using_test_db()\n unittest.main(exit=False)\n\n\ndef main():\n \"\"\"Run the whole thing.\"\"\"\n start_server()\n wait_server()\n run_tests()\n stop_server()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.592476487159729,
"alphanum_fraction": 0.5950580835342407,
"avg_line_length": 29.29608917236328,
"blob_id": "4035cc76f1c989574103b7713425e67f9958b4fe",
"content_id": "db9628c2803873ccb0e384287c68e63aca98bdd5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5423,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 179,
"path": "/src/components/NotesList.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\n// import PureRenderMixin from 'react-addons-pure-render-mixin'\nimport { List } from 'immutable';\nimport ImmutablePropTypes from 'react-immutable-proptypes';\nimport NotesListItem from './NotesListItem';\n\nexport const UP_POINTING = '\\u25B3';\nexport const DOWN_POINTING = '\\u25BD';\n\nexport class NotesList extends React.Component {\n\n static get propTypes() {\n return {\n notes: ImmutablePropTypes.list.isRequired,\n selected: React.PropTypes.string,\n orderBy: React.PropTypes.string.isRequired,\n titleHeaderClicked: React.PropTypes.func.isRequired,\n timestampHeaderClicked: React.PropTypes.func.isRequired,\n arrowHeaderClicked: React.PropTypes.func.isRequired,\n selectNote: React.PropTypes.func.isRequired,\n deleteNote: React.PropTypes.func.isRequired,\n };\n }\n\n constructor(props) {\n super(props);\n // this.shouldComponentUpdate = PureRenderMixin.shouldComponentUpdate.bind(this);\n this.titleHeaderOnClickCallback = this.titleHeaderOnClickCallback.bind(this);\n this.timestampHeaderOnClickCallback = this.timestampHeaderOnClickCallback.bind(this);\n this.arrowHeaderOnClickCallback = this.arrowHeaderOnClickCallback.bind(this);\n }\n\n titleHeaderOnClickCallback() {\n if (this.props.titleHeaderClicked !== undefined) {\n this.props.titleHeaderClicked();\n }\n }\n\n timestampHeaderOnClickCallback() {\n if (this.props.timestampHeaderClicked !== undefined) {\n this.props.timestampHeaderClicked();\n }\n }\n\n arrowHeaderOnClickCallback() {\n if (this.props.arrowHeaderClicked !== undefined) {\n this.props.arrowHeaderClicked();\n }\n }\n\n orderBy(notes) {\n if (this.props.orderBy === undefined) {\n return this.orderByDefault(notes);\n }\n\n if (this.props.orderBy.indexOf('title') !== -1) {\n return this.orderByTitle(notes);\n } else if (this.props.orderBy.indexOf('modified') !== -1) {\n return this.orderByModified(notes);\n } else if (this.props.orderBy.indexOf('created') !== -1) {\n return this.orderByCreated(notes);\n }\n return this.orderByDefault(notes);\n }\n\n orderByDefault(notes) {\n return this.orderByModified(notes);\n }\n\n orderByTitle(notes) {\n if (notes === undefined) {\n return List.of();\n }\n return notes.sortBy(note => note.get('title'));\n }\n\n orderByModified(notes) {\n if (notes === undefined) {\n return List.of();\n }\n return notes.sortBy(note => new Date(note.getIn(['timestamp', 'modified'])));\n }\n\n orderByCreated(notes) {\n if (notes === undefined) {\n return List.of();\n }\n return notes.sortBy(note => new Date(note.getIn(['timestamp', 'created'])));\n }\n\n aDeScending(notes) {\n if (this.props.orderBy === undefined) {\n return notes.reverse();\n }\n\n if (this.props.orderBy.indexOf('descending') !== -1) {\n return notes.reverse();\n } else if (this.props.orderBy.indexOf('ascending') !== -1) {\n return notes;\n }\n return notes.reverse();\n }\n\n timestampHeaderText() {\n if (this.props.orderBy === undefined) {\n return 'Modified';\n }\n\n const parts = this.props.orderBy.split(' ');\n\n if (parts[0] === 'modified') {\n return 'Modified';\n } else if (parts[0] === 'created') {\n return 'Created';\n }\n return 'Modified';\n }\n\n arrowHeaderText() {\n if (this.props.orderBy === undefined) {\n return DOWN_POINTING;\n }\n\n if (this.props.orderBy.indexOf('descending') !== -1) {\n return DOWN_POINTING;\n } else if (this.props.orderBy.indexOf('ascending') !== -1) {\n return UP_POINTING;\n }\n return DOWN_POINTING;\n }\n\n render() {\n return (\n <div className=\"notes-list-border\">\n <table className=\"notes-list-rows\">\n <thead>\n <tr>\n <th\n className=\"notes-list-header-title\"\n ref={c => this.titleHeader = c} // eslint-disable-line\n onClick={this.titleHeaderOnClickCallback}\n >Title</th>\n <th\n className=\"notes-list-header-date\"\n ref={c => this.timestampHeader = c} // eslint-disable-line\n onClick={this.timestampHeaderOnClickCallback}\n >{this.timestampHeaderText()}</th>\n <th\n className=\"notes-list-header-destroy\"\n ref={c => this.arrowHeader = c} // eslint-disable-line\n onClick={this.arrowHeaderOnClickCallback}\n >{this.arrowHeaderText()}</th>\n </tr>\n </thead>\n </table>\n <div className=\"notes-list-rows-view scrollable\">\n <table className=\"notes-list-rows\">\n <tbody>\n {this.aDeScending(this.orderBy(this.props.notes)).map(item =>\n <NotesListItem\n key={item.get('title')}\n ref={c => this.items = (this.items ? this.items.concat([c]) : [c])} // eslint-disable-line\n title={item.get('title')}\n text={item.get('text')}\n timestamp={item.get('timestamp')}\n orderBy={this.props.orderBy}\n rowClicked={this.props.selectNote}\n deleteClicked={this.props.deleteNote}\n isSelected={this.props.selected === item.get('title')}\n />\n )\n }\n </tbody>\n </table>\n </div>\n </div>\n );\n }\n}\n"
},
{
"alpha_fraction": 0.604764461517334,
"alphanum_fraction": 0.606930136680603,
"avg_line_length": 26.567163467407227,
"blob_id": "45893f40e804cfa1300146b86b4aa98b2bc12947",
"content_id": "fbd26c2f849580f0bb3c17409cdfdfb9d1ade045",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1851,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 67,
"path": "/test/components/NotesSearch_spec.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport TestUtils from 'react-addons-test-utils';\nimport { describe, it } from 'mocha';\nimport { expect } from 'chai';\nimport { mount, shallow } from 'enzyme';\nimport NotesSearch from '../../src/components/NotesSearch';\n\n\nconst { renderIntoDocument,\n Simulate } = TestUtils;\n\ndescribe('NotesSearch', () => {\n it('returns the query string in search() when edited', () => {\n let wasCalled = '';\n const inputStr = 'jkhiU/(F/&RU€%€DUF&Guihiughgdj)';\n function search(text) {\n wasCalled = text;\n }\n const component = renderIntoDocument(\n <NotesSearch\n search={search}\n query=\"\"\n selected=\"\"\n returnPressed={() => null}\n />\n );\n Simulate.change(component.input, { target: { value: inputStr } });\n component.search.flush();\n\n expect(wasCalled).to.equal(inputStr);\n });\n\n it('pressing return will call returnPressed()', () => {\n let wasCalled = false;\n function callback() {\n wasCalled = true;\n }\n const component = mount(\n <NotesSearch\n returnPressed={callback}\n search={() => null}\n query=\"\"\n selected=\"\"\n />\n );\n const search = component.find('input');\n search.simulate('keyUp', { keyCode: 13, which: 13, key: 'Enter' });\n expect(wasCalled).to.equal(true);\n });\n\n it.skip('TODO: test returnPressed will remove selection and make it also part of query', () => {\n expect(true).to.equal(false);\n });\n\n it.skip('TODO: test complete selected with highlight if it starts with the query part', () => {\n expect(true).to.equal(false);\n });\n\n it.skip('TODO: test arrow right include highlighted into query', () => {\n expect(true).to.equal(false);\n });\n\n it.skip('TODO: test arrow right include highlighted into query', () => {\n expect(true).to.equal(false);\n });\n\n});\n"
},
{
"alpha_fraction": 0.4706616699695587,
"alphanum_fraction": 0.4706616699695587,
"avg_line_length": 32.375,
"blob_id": "9d18d9ed8f41549c1f6d264995542cae400ed54d",
"content_id": "0176859a30d00ad8a7875ceffd157e2b851d9d60",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 801,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 24,
"path": "/server/python_server/README.txt",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "===================================================\n A Simple NotesApp Backend - Python Implementation\n===================================================\n\nA simple Tornado server that implements a tiny REST API for adding notes and\nsearching them.\n\n\n----------\n REST API\n----------\n\n /notes\n GET - returns all notes\n PUT - update/create new note in notes\n /notes/titles\n GET - returns titles of all notes\n /note/:title\n GET - return note that has title :title\n DELETE - delete note with title :title\n /search?q=:query\n GET - search notes with query and return the notes\n /version\n GET - return database version information\n"
},
{
"alpha_fraction": 0.6091814637184143,
"alphanum_fraction": 0.6106387972831726,
"avg_line_length": 20.221649169921875,
"blob_id": "285e24e21f5b47adaee3e8f95096c3dff3085b13",
"content_id": "294ada55b7e223464f5447dd12e68063a17f1bd9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4117,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 194,
"path": "/src/actions/index.js",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import { fromJS } from 'immutable';\nimport axios from 'axios';\nimport urlencode from 'urlencode';\nimport * as types from '../types';\n\nexport const URL_BASE = 'http://localhost:3456';\nexport const SEARCH_URL = `${URL_BASE}/search?q=`;\nexport const ADD_URL = `${URL_BASE}/notes`;\nexport const DELETE_URL = `${URL_BASE}/note`;\n\nexport function restSearchNotes(query) {\n const encoded = urlencode(query);\n const url = `${SEARCH_URL}${encoded}`;\n return axios.get(url);\n}\n\nexport function restAddUpdateNote(title, text, timestamp) {\n const url = ADD_URL;\n const note = {\n title,\n text,\n timestamp: {\n modified: timestamp,\n created: timestamp,\n },\n };\n return axios.put(url, note);\n}\n\nexport function restDeleteNote(title) {\n const encoded = urlencode(title);\n const url = `${DELETE_URL}/${encoded}`;\n return axios.delete(url);\n}\n\nexport function searchNotes(query) {\n return restSearchNotes(query);\n}\n\nexport function addNoteToNotes(title, text, timestamp) {\n return restAddUpdateNote(title, text, timestamp);\n}\n\nexport function updateNoteToNotes(title, text, timestamp) {\n return restAddUpdateNote(title, text, timestamp);\n}\n\nexport function deleteNoteInNotes(title) {\n return restDeleteNote(title);\n}\n\nexport function setState(state) {\n return {\n type: types.SET_STATE,\n state,\n };\n}\n\nexport function setQuery(query) {\n return {\n type: types.SET_QUERY,\n query,\n };\n}\n\nexport function setNotes(notes) {\n return {\n type: types.SET_NOTES,\n notes,\n };\n}\n\nexport function search(query) {\n return dispatch =>\n searchNotes(query).then((response) => {\n const notes = response.data;\n const immutableNotes = fromJS(notes.notes);\n\n dispatch(setQuery(query));\n dispatch(setNotes(immutableNotes));\n if (immutableNotes.count() === 0 || query === '') {\n dispatch({\n type: types.SELECT_NOTE,\n title: null,\n });\n } else {\n const notesSorted = immutableNotes.sortBy(note => note.get('title'));\n dispatch({\n type: types.SELECT_NOTE,\n title: notesSorted.get(0).get('title'),\n });\n }\n });\n}\n\nexport function addNote(title, text) {\n const timestamp = (new Date()).toISOString();\n return dispatch =>\n addNoteToNotes(title, text, timestamp).then(() => {\n dispatch({\n type: types.ADD_NOTE,\n title,\n text,\n timestamp,\n });\n dispatch({\n type: types.SELECT_NOTE,\n title,\n });\n });\n}\n\nexport function selectNote(title) {\n return dispatch => {\n dispatch(setQuery(title));\n dispatch({\n type: types.SELECT_NOTE,\n title,\n });\n };\n}\n\nexport function clearSelection() {\n return dispatch =>\n searchNotes('').then((response) => {\n const notes = response.data;\n const immutableNotes = fromJS(notes.notes);\n\n dispatch({\n type: types.SELECT_NOTE,\n title: null,\n });\n dispatch(setQuery(''));\n dispatch(setNotes(immutableNotes));\n });\n}\n\nexport function editNote(selected, title, text) {\n const timestamp = (new Date()).toISOString();\n return dispatch => {\n const titleUpdate = { old: selected, new: title };\n return updateNoteToNotes(titleUpdate, text, timestamp).then(() => {\n dispatch({\n type: types.EDIT_NOTE,\n selected,\n title,\n text,\n timestamp,\n });\n dispatch({\n type: types.SELECT_NOTE,\n title,\n });\n });\n };\n}\n\nexport function deleteNote(selected) {\n return dispatch =>\n deleteNoteInNotes(selected).then(() => {\n dispatch({\n type: types.DELETE_NOTE,\n selected,\n });\n dispatch({\n type: types.SELECT_NOTE,\n title: null,\n });\n });\n}\n\nexport function orderByTitle() {\n return {\n type: types.ORDER_BY_TITLE,\n };\n}\n\nexport function orderByModified() {\n return {\n type: types.ORDER_BY_MODIFIED,\n };\n}\n\nexport function orderByCreated() {\n return {\n type: types.ORDER_BY_CREATED,\n };\n}\n\nexport function toggleAscendingDescending() {\n return {\n type: types.TOGGLE_ASCENDING_DESCENDING,\n };\n}\n"
},
{
"alpha_fraction": 0.6648757457733154,
"alphanum_fraction": 0.6667226552963257,
"avg_line_length": 30.85026741027832,
"blob_id": "c4ae3f3ca2bc9d2e9e6b8437a93765daf18d3e8d",
"content_id": "be109419a86e2a3da30c43b0870e69a206062d80",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5956,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 187,
"path": "/src/containers/NotesApp.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport { bindActionCreators } from 'redux';\nimport { connect } from 'react-redux';\nimport _ from 'lodash';\nimport ImmutablePropTypes from 'react-immutable-proptypes';\nimport * as actionCreators from '../actions/index';\n// import PureRenderMixin from 'react-addons-pure-render-mixin';\nimport SplitPane from '../../node_modules/react-split-pane/lib/SplitPane';\nimport NotesSearch from '../components/NotesSearch';\nimport { NotesList } from '../components/NotesList';\nimport NotesEdit from '../components/NotesEdit';\n\nexport class NotesApp extends React.Component {\n\n static get propTypes() {\n return {\n query: React.PropTypes.string.isRequired,\n notes: ImmutablePropTypes.list.isRequired,\n selected: React.PropTypes.string,\n orderBy: React.PropTypes.string.isRequired,\n toggleAscendingDescending: React.PropTypes.func.isRequired,\n clearSelection: React.PropTypes.func.isRequired,\n orderByTitle: React.PropTypes.func.isRequired,\n orderByModified: React.PropTypes.func.isRequired,\n orderByCreated: React.PropTypes.func.isRequired,\n addNote: React.PropTypes.func.isRequired,\n editNote: React.PropTypes.func.isRequired,\n search: React.PropTypes.func.isRequired,\n selectNote: React.PropTypes.func.isRequired,\n deleteNote: React.PropTypes.func.isRequired,\n };\n }\n\n constructor() {\n super();\n // this.shouldComponentUpdate = PureRenderMixin.shouldComponentUpdate.bind(this);\n this.setUpKeyboardHandling();\n\n this.returnPressed = this.returnPressed.bind(this);\n this.orderByTimestamp = this.orderByTimestamp.bind(this);\n this.noteEdited = this.noteEdited.bind(this);\n }\n\n componentWillMount() {\n this.noteEdited = _.debounce(this.noteEdited, 1000);\n }\n\n componentDidMount() {\n this.search.focus();\n }\n\n setUpKeyboardHandling() {\n window.onkeydown = (e) => {\n switch (e.keyCode) {\n case 27:\n this.escapePressed();\n break;\n default:\n break;\n }\n };\n }\n\n setOrderToDefault() {\n this.props.orderByTitle();\n if (this.props.orderBy.split(' ')[1] === 'descending') {\n this.props.toggleAscendingDescending();\n }\n }\n\n orderByTimestamp() {\n if (this.props.orderByCreated === undefined) {\n return;\n }\n\n if (this.props.orderByModified === undefined) {\n return;\n }\n\n if (this.props.orderBy.split(' ')[0] === 'modified') {\n this.props.orderByCreated();\n } else {\n this.props.orderByModified();\n }\n }\n\n escapePressed() {\n this.noteEdited.flush();\n\n if (this.props.query === '') {\n this.setOrderToDefault();\n }\n\n this.props.clearSelection();\n this.search.focus();\n }\n\n returnPressed(query) {\n if (this.props.selected !== query) {\n this.props.addNote(query, '');\n } else {\n this.props.selectNote(query);\n }\n\n if (this.edit) {\n this.edit.focus();\n }\n }\n\n noteEdited(text) {\n if (this.props.selected) {\n this.props.editNote(this.props.selected, this.props.selected, text);\n }\n }\n\n render() {\n return (\n <div className=\"notes-app\">\n <NotesSearch\n query={this.props.query}\n search={this.props.search}\n notes={this.props.notes}\n selected={this.props.selected}\n ref={c => this.search = c} // eslint-disable-line\n returnPressed={this.returnPressed}\n />\n <div className=\"contain-absolute\">\n <SplitPane split=\"horizontal\" defaultSize=\"50%\">\n <NotesList\n notes={this.props.notes}\n orderBy={this.props.orderBy}\n selected={this.props.selected}\n titleHeaderClicked={this.props.orderByTitle}\n timestampHeaderClicked={this.orderByTimestamp}\n arrowHeaderClicked={this.props.toggleAscendingDescending}\n selectNote={this.props.selectNote}\n deleteNote={this.props.deleteNote}\n ref={c => this.list = c} // eslint-disable-line\n />\n <NotesEdit\n notes={this.props.notes}\n selected={this.props.selected}\n noteEdited={this.noteEdited}\n ref={c => this.edit = c} // eslint-disable-line\n />\n </SplitPane>\n </div>\n </div>\n );\n }\n}\n\nfunction validateSelected(selected, notes) {\n const noteIndex = notes.findIndex(note => note.get('title') === selected);\n return noteIndex !== -1 ? selected : null;\n}\n\nexport function mapStateToProps(state) {\n const selected = validateSelected(state.get('selected'), state.get('notes'));\n return {\n query: state.get('query'),\n notes: state.get('notes'),\n selected,\n orderBy: state.get('orderBy'),\n };\n}\n\nexport function mapDispatchToProps(dispatch) {\n return {\n searchNotes: bindActionCreators(actionCreators.searchNotes, dispatch),\n setState: bindActionCreators(actionCreators.setState, dispatch),\n setQuery: bindActionCreators(actionCreators.setQuery, dispatch),\n setNotes: bindActionCreators(actionCreators.setNotes, dispatch),\n search: bindActionCreators(actionCreators.search, dispatch),\n addNote: bindActionCreators(actionCreators.addNote, dispatch),\n selectNote: bindActionCreators(actionCreators.selectNote, dispatch),\n clearSelection: bindActionCreators(actionCreators.clearSelection, dispatch),\n editNote: bindActionCreators(actionCreators.editNote, dispatch),\n deleteNote: bindActionCreators(actionCreators.deleteNote, dispatch),\n orderByTitle: bindActionCreators(actionCreators.orderByTitle, dispatch),\n orderByModified: bindActionCreators(actionCreators.orderByModified, dispatch),\n orderByCreated: bindActionCreators(actionCreators.orderByCreated, dispatch),\n toggleAscendingDescending: bindActionCreators(actionCreators.toggleAscendingDescending, dispatch),\n };\n}\n\nexport const NotesAppContainer = connect(mapStateToProps, mapDispatchToProps)(NotesApp);\n"
},
{
"alpha_fraction": 0.6049184799194336,
"alphanum_fraction": 0.6074133515357971,
"avg_line_length": 31.53043556213379,
"blob_id": "439b51bc8869b4f7348c90bed7ff1b7209dcfd0c",
"content_id": "957df883b9604c5b3765d52c2e34fd8db22e2a90",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 11224,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 345,
"path": "/test/components/NotesList_spec.jsx",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport TestUtils from 'react-addons-test-utils';\nimport { describe, it } from 'mocha';\nimport { expect } from 'chai';\nimport { List } from 'immutable';\nimport { shallow } from 'enzyme';\nimport { NotesList, UP_POINTING, DOWN_POINTING } from '../../src/components/NotesList';\nimport _state from '../test_data';\n\nconst { renderIntoDocument,\n Simulate } = TestUtils;\n\ndescribe('NotesList', () => {\n it('by default renders a list with notes in modified descending order', () => {\n const notes = _state.get('notes');\n const component = renderIntoDocument(\n <NotesList\n notes={notes}\n selected=\"\"\n orderBy=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n expect(component.items[0].props.title).to.equal('react');\n expect(component.items[1].props.title).to.equal('redux');\n expect(component.items[2].props.title).to.equal('immutable');\n });\n it('clicking note in list will call selectNote handler', () => {\n let wasCalled = '';\n const notes = _state.get('notes');\n function selectNote(title) {\n wasCalled = title;\n }\n const component = renderIntoDocument(\n <NotesList\n notes={notes}\n selectNote={selectNote}\n selected=\"\"\n orderBy=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n deleteNote={() => null}\n />\n );\n Simulate.click(component.items[1].row);\n\n expect(wasCalled).to.equal('redux');\n });\n it('clicking Title header will call titleHeaderClicked handler', () => {\n let wasCalled = false;\n function titleHeaderClicked() {\n wasCalled = true;\n }\n const component = renderIntoDocument(\n <NotesList\n notes={List.of()}\n titleHeaderClicked={titleHeaderClicked}\n selected=\"\"\n orderBy=\"\"\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n Simulate.click(component.titleHeader);\n\n expect(wasCalled).to.equal(true);\n });\n it('orderByTitle returns list of notes ordered by title ascending', () => {\n const notes = _state.get('notes');\n\n const component = renderIntoDocument(\n <NotesList\n notes={notes}\n selected=\"\"\n orderBy=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n const notesTitleAscending = component.orderByTitle(notes);\n expect(notesTitleAscending.get(0).get('title')).to.equal('immutable');\n expect(notesTitleAscending.get(1).get('title')).to.equal('react');\n expect(notesTitleAscending.get(2).get('title')).to.equal('redux');\n });\n it('orderByModified returns list of notes ordered by modified ascending', () => {\n const notes = _state.get('notes');\n\n const component = renderIntoDocument(\n <NotesList\n notes={notes}\n selected=\"\"\n orderBy=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n const notesModifiedAscending = component.orderByModified(notes);\n expect(notesModifiedAscending.get(0).get('title')).to.equal('immutable');\n expect(notesModifiedAscending.get(1).get('title')).to.equal('redux');\n expect(notesModifiedAscending.get(2).get('title')).to.equal('react');\n });\n it('list lists items according to orderBy = title ascending | descending', () => {\n const notes = _state.get('notes');\n\n const componentTitleAscending = renderIntoDocument(\n <NotesList\n notes={notes}\n orderBy=\"title ascending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentTitleAscending.items[0].props.title).to.equal('immutable');\n expect(componentTitleAscending.items[1].props.title).to.equal('react');\n expect(componentTitleAscending.items[2].props.title).to.equal('redux');\n\n const componentTitleDescending = renderIntoDocument(\n <NotesList\n notes={notes}\n orderBy=\"title descending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentTitleDescending.items[0].props.title).to.equal('redux');\n expect(componentTitleDescending.items[1].props.title).to.equal('react');\n expect(componentTitleDescending.items[2].props.title).to.equal('immutable');\n });\n it('clicking Modified / Created header will call timestampHeaderClicked handler', () => {\n let wasCalled = false;\n function timestampHeaderClicked() {\n wasCalled = true;\n }\n const component = renderIntoDocument(\n <NotesList\n notes={List.of()}\n timestampHeaderClicked={timestampHeaderClicked}\n selected=\"\"\n orderBy=\"\"\n titleHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n Simulate.click(component.timestampHeader);\n\n expect(wasCalled).to.equal(true);\n });\n it('list list items according to orderBy = (modified | created) (ascending | descending)', () => {\n const notes = _state.get('notes');\n\n const componentModifiedAscending = renderIntoDocument(\n <NotesList\n notes={notes}\n orderBy=\"modified ascending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentModifiedAscending.items[0].props.title).to.equal('immutable');\n expect(componentModifiedAscending.items[1].props.title).to.equal('redux');\n expect(componentModifiedAscending.items[2].props.title).to.equal('react');\n\n const componentModifiedDescending = renderIntoDocument(\n <NotesList\n notes={notes}\n orderBy=\"modified descending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentModifiedDescending.items[0].props.title).to.equal('react');\n expect(componentModifiedDescending.items[1].props.title).to.equal('redux');\n expect(componentModifiedDescending.items[2].props.title).to.equal('immutable');\n\n const componentCreatedAscending = renderIntoDocument(\n <NotesList\n notes={notes}\n orderBy=\"created ascending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentCreatedAscending.items[0].props.title).to.equal('react');\n expect(componentCreatedAscending.items[1].props.title).to.equal('redux');\n expect(componentCreatedAscending.items[2].props.title).to.equal('immutable');\n\n const componentCreatedDescending = renderIntoDocument(\n <NotesList\n notes={notes}\n orderBy=\"created descending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentCreatedDescending.items[0].props.title).to.equal('immutable');\n expect(componentCreatedDescending.items[1].props.title).to.equal('redux');\n expect(componentCreatedDescending.items[2].props.title).to.equal('react');\n });\n it('orderby modify | created is printed in the timestamp header text', () => {\n const componentModified = renderIntoDocument(\n <NotesList\n notes={List.of()}\n orderBy=\"modified ascending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentModified.timestampHeader.textContent).to.contain('Modified');\n\n const componentCreated = renderIntoDocument(\n <NotesList\n notes={List.of()}\n orderBy=\"created ascending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentCreated.timestampHeader.textContent).to.contain('Created');\n });\n it('orderby ascending | descending is reflected in the header arrow', () => {\n const componentAscending = renderIntoDocument(\n <NotesList\n notes={List.of()}\n orderBy=\"modified ascending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentAscending.arrowHeader.textContent).to.contain(UP_POINTING);\n\n const componentDescending = renderIntoDocument(\n <NotesList\n notes={List.of()}\n orderBy=\"modified descending\"\n selected=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n arrowHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n\n expect(componentDescending.arrowHeader.textContent).to.contain(DOWN_POINTING);\n });\n it('clicking header arrow will call arrowHeaderClicked handler', () => {\n let wasCalled = false;\n function arrowHeaderClicked() {\n wasCalled = true;\n }\n const component = renderIntoDocument(\n <NotesList\n notes={List.of()}\n arrowHeaderClicked={arrowHeaderClicked}\n selected=\"\"\n orderBy=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n Simulate.click(component.arrowHeader);\n\n expect(wasCalled).to.equal(true);\n });\n it('list is scrollable (in class)', () => {\n const component = shallow(\n <NotesList\n notes={List.of()}\n arrowHeaderClicked={() => null}\n selected=\"\"\n orderBy=\"\"\n titleHeaderClicked={() => null}\n timestampHeaderClicked={() => null}\n selectNote={() => null}\n deleteNote={() => null}\n />\n );\n const scrollable = component.find('.scrollable');\n expect(scrollable).to.not.equal(undefined);\n });\n});\n"
},
{
"alpha_fraction": 0.7283950448036194,
"alphanum_fraction": 0.7530864477157593,
"avg_line_length": 12.5,
"blob_id": "bcf5b8e0a6aef88c959c0ae6c060ee67f01cac2e",
"content_id": "6a1ee428a5ef23b9fa4a367bbfcd41304dfb0bf7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 81,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 6,
"path": "/server/python_server/tox.ini",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist = py34\n[pytest]\n[testenv]\ndeps=-rrequirements.txt\ncommands=py.test\n"
},
{
"alpha_fraction": 0.5616871118545532,
"alphanum_fraction": 0.5663060545921326,
"avg_line_length": 23.854984283447266,
"blob_id": "5ce15410aea1d894be231cbd65bd55320b5564d7",
"content_id": "b0f8136046e4bf4054439937828e1f0d91f1a3a9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8227,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 331,
"path": "/server/python_server/notes_app_server/server.py",
"repo_name": "jarmoj/notesapp-react-redux-boilerplate",
"src_encoding": "UTF-8",
"text": "\"\"\"A simple server with a REST API for the Notes App frontend.\"\"\"\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.web\nimport tornado.escape\n\nfrom tornado_cors import CorsMixin\n\nimport logging\nimport json\nimport os\nimport signal\nimport sys\n\nPORT = 3456\nDB_PATH = \"db.json\"\nTEST_DB_PATH = \"test/test_db.json\"\n\ndb = {\n 'version': {\n 'version': '0.0.1',\n 'api_version': '0.1',\n 'is_test_db': True\n },\n 'notes': [\n {\n 'title': 'some note title',\n 'text': 'some note text'\n },\n {\n 'title': 'other note title',\n 'text': 'other note text'\n }\n ]\n}\n\n\ndef tokenize(s):\n \"\"\"Split string into tokens.\"\"\"\n return [p.lower() for p in s.split(\" \") if p]\n\n\nclass NoteAlreadyExists(Exception):\n \"\"\"Raised if trying to add a new note with title that is already taken.\"\"\"\n\n def __init__(self, title):\n \"\"\"Show exception with the note title.\"\"\"\n super(NoteAlreadyExists, self).__init__(title)\n\n\nclass NoSuchNoteExists(Exception):\n \"\"\"Raised if trying to delete a note that doesn't exist.\"\"\"\n\n def __init__(self, title):\n \"\"\"Show exception with the note title.\"\"\"\n super(NoSuchNoteExists, self).__init__(title)\n\n\ndef add_note(note):\n \"\"\"Add note to notes.\"\"\"\n if find_note(note[\"title\"]):\n raise NoteAlreadyExists(note[\"title\"])\n db['notes'].append(note)\n\n\ndef delete_note(title):\n \"\"\"Delete note from notes.\"\"\"\n found = find_note(title)\n if not found:\n raise NoSuchNoteExists(title)\n del db['notes'][found[0]]\n\n\ndef update_note(title, note):\n \"\"\"Update an existing note with a given title, possibly retitling it.\"\"\"\n found = find_note(title)\n if not found:\n raise NoSuchNoteExists(title)\n note[\"timestamp\"][\"created\"] = found[1][\"timestamp\"][\"created\"]\n db['notes'][found[0]] = note\n\n\ndef find_note(title):\n \"\"\"Return (index, note) of note that has title or False if no such note.\"\"\"\n for i, note in enumerate(db['notes']):\n if note[\"title\"] == title:\n return i, note\n return False\n\n\ndef search_notes(query):\n \"\"\"Search notes by query.\"\"\"\n def match_token(note, tokens):\n \"\"\"Test if note contains any of the tokens.\n\n A very simple implementation still. Return False if any of the tokens\n is missing, True if any match.\n \"\"\"\n tokens_found = []\n for token in tokens:\n s = note[\"title\"] + \" \" + note[\"text\"]\n if token not in s.lower():\n return False\n tokens_found.append(token)\n return len(tokens_found) == len(tokens)\n\n notes = []\n query_tokens = tokenize(query)\n for note in db['notes']:\n if match_token(note, query_tokens):\n notes.append(note)\n return notes\n\n\nclass CorsBaseHandler(CorsMixin, tornado.web.RequestHandler):\n \"\"\"Set up CORS and allow separate origin for the client.\"\"\"\n\n CORS_ORIGIN = 'http://localhost:8080'\n CORS_METHODS = 'GET, PUT, DELETE'\n CORS_HEADERS = (\n 'Access-Control-Allow-Headers, '\n 'Origin, '\n 'Accept, '\n 'X-Requested-With, '\n 'Content-Type, '\n 'Access-Control-Request-Method, '\n 'Access-Control-Request-Headers'\n )\n\n\nclass VersionRootHandler(CorsBaseHandler):\n \"\"\"Handle /version .\"\"\"\n\n def get(self):\n \"\"\"Handle get and return verision and api_version.\"\"\"\n response = {\n 'version': '0.0.1',\n 'api_version': '0.1',\n 'is_test_db': True\n }\n self.write(response)\n\n\nclass NotesRootHandler(CorsBaseHandler):\n \"\"\"Handle /notes .\"\"\"\n\n def get(self):\n \"\"\"Handle get and return all notes from database.\"\"\"\n response = {\n 'notes': db['notes']\n }\n self.write(response)\n\n def put(self, *args, **kwargs):\n \"\"\"Handle put and create / update give note.\"\"\"\n note = json.loads(self.request.body.decode('utf-8'))\n title_update = note[\"title\"]\n\n if isinstance(title_update, dict):\n find_title = title_update[\"old\"]\n new_title = title_update[\"new\"]\n else:\n find_title = title_update\n new_title = title_update\n\n _note = {\n 'title': new_title,\n 'text': note[\"text\"],\n 'timestamp': note[\"timestamp\"]\n }\n\n found = find_note(find_title)\n if not found:\n add_note(_note)\n self.clear()\n self.set_status(200)\n self.finish(\"Note '{}' added.\".format(find_title))\n else:\n update_note(find_title, _note)\n self.clear()\n self.set_status(204)\n self.finish(\"Note '{}' updated.\".format(new_title))\n\n\nclass NoteHandler(CorsBaseHandler):\n \"\"\"Handle /note/(.*) .\n\n /note/:title\n GET\n DELETE\n \"\"\"\n\n def get(self, title):\n \"\"\"Handle get and return note with given title from database.\"\"\"\n found = find_note(title)\n\n if not found:\n self.clear()\n self.set_status(404)\n self.finish(\"Note '{}'' not found!\".format(title))\n return\n\n response = found[1]\n self.write(response)\n\n def delete(self, title):\n \"\"\"Handle delete and delete note with given title from database.\"\"\"\n try:\n delete_note(title)\n except NoSuchNoteExists:\n self.clear()\n self.set_status(404)\n self.finish(\"Note '{}' does not even exist.\".format(title))\n\n\nclass NotesTitlesHandler(CorsBaseHandler):\n \"\"\"Handle /notes/titles .\"\"\"\n\n def get(self):\n \"\"\"Handle get and return all note titles from database.\"\"\"\n response = {\n 'note_titles': [note[\"title\"] for note in db['notes']]\n }\n self.write(response)\n\n\nclass NotesSearchHandler(CorsBaseHandler):\n \"\"\"Handle /search?q=(.*) .\"\"\"\n\n def get(self):\n \"\"\"Handle get and return all notes matching search query.\"\"\"\n response = {\n 'notes': []\n }\n if self.get_argument('q') == \"\":\n response = {\n 'notes': db['notes']\n }\n else:\n response = {\n 'notes': search_notes(self.get_argument('q'))\n }\n self.write(response)\n\n\nclass TestBeginHandler(CorsBaseHandler):\n \"\"\"Handle /test/begin .\"\"\"\n\n def get(self):\n \"\"\"Setup test to have expected state.\"\"\"\n read_db()\n\n\nclass TestEndHandler(CorsBaseHandler):\n \"\"\"Handle /test/begin .\"\"\"\n\n def get(self):\n \"\"\"Setup test to have end with expected state afterwards.\"\"\"\n read_db()\n\n\ndef is_using_test_db():\n \"\"\"Check if started with use test db flag.\"\"\"\n return \"--use-test-db\" in sys.argv\n\n\nroutes = [\n (r\"/version\", VersionRootHandler),\n (r\"/notes\", NotesRootHandler),\n (r\"/notes/titles\", NotesTitlesHandler),\n (r\"/note/(.*)\", NoteHandler),\n (r\"/search\", NotesSearchHandler),\n]\n\ntest_routes = [\n (r\"/test/begin\", TestBeginHandler),\n (r\"/test/end\", TestEndHandler)\n]\n\nif is_using_test_db():\n routes.extend(test_routes)\n\napplication = tornado.web.Application(routes)\n\n\ndef read_db():\n \"\"\"'Read in' database for use.\"\"\"\n global db\n db_path = DB_PATH\n if is_using_test_db():\n db_path = TEST_DB_PATH\n\n logging.info(\"server path:\", os.path.abspath(__file__))\n logging.info(\"server: db_path:\", db_path)\n\n with open(db_path) as f:\n db = json.load(f)\n\n\nis_closing = False\n\n\ndef signal_handler(signum, frame):\n \"\"\"Signal handler for closing tornado.\"\"\"\n global is_closing\n logging.info('exiting...')\n is_closing = True\n\n\ndef try_exit():\n \"\"\"Try closing tornado.\"\"\"\n global is_closing\n if is_closing:\n # clean up here\n tornado.ioloop.IOLoop.instance().stop()\n logging.info('exit success')\n\n\ndef start():\n \"\"\"Start tornado.\"\"\"\n logging.info(\"Starting server...\")\n read_db()\n signal.signal(signal.SIGINT, signal_handler)\n application.listen(PORT)\n tornado.ioloop.PeriodicCallback(try_exit, 500).start()\n tornado.ioloop.IOLoop.instance().start()\n logging.info(\"Server stopped.\")\n\n\nif __name__ == \"__main__\":\n start()\n"
}
] | 19 |
SatvikDandale/Huffman-Data-Compression
|
https://github.com/SatvikDandale/Huffman-Data-Compression
|
030e44e5e93b16e7b197c9d7fcc347513467a317
|
2944ca3096eb8bf8160b1cba405ac88597d2b8d2
|
5b069d6956dcb622380fedbfe282a02d620006e1
|
refs/heads/master
| 2022-12-12T23:59:50.807096 | 2020-09-09T10:17:46 | 2020-09-09T10:17:46 | 221,185,013 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8158844709396362,
"alphanum_fraction": 0.8158844709396362,
"avg_line_length": 68.25,
"blob_id": "d21ac8515b24a5c18a872554e4ebde7974a0f71f",
"content_id": "61c3f0c208fdc7c34dd55e589b134b102fade78a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 4,
"path": "/README.md",
"repo_name": "SatvikDandale/Huffman-Data-Compression",
"src_encoding": "UTF-8",
"text": "# Huffman Text Compression\n\nA Python Code to encode a text file using dynamic Huffman Compression and save to an encoded file.\nAnother Python Code to use the metadata of the previous compression technique use and decode the encoded file to retireve the original piece of text.\n"
},
{
"alpha_fraction": 0.5571277737617493,
"alphanum_fraction": 0.5640971660614014,
"avg_line_length": 33.31159591674805,
"blob_id": "b2cb7509a0b2b86f3e11289263bcc02967a0094b",
"content_id": "ad07c6b5ec6dce71b3dd881ba0bb48863f6a5fff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4735,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 138,
"path": "/HuffmanEncoding.py",
"repo_name": "SatvikDandale/Huffman-Data-Compression",
"src_encoding": "UTF-8",
"text": "import json\n\nclass node:\n # This is just a structure\n def __init__(self):\n self.data = ''\n self.freq = 0\n self.left = None\n self.right = None\n# End of class\n\nclass HuffmanEncoding:\n def getKey(self, n):\n # Utility function for sorting the nodes according to their frequencies\n return n.freq\n\n def saveDict(self, node, s):\n try:\n self.charCodes\n except:\n self.charCodes = {}\n # This function will print the char, freq, corresponding code\n if node.left == None and node.right == None and node.data != '-':\n # We have a valid character at this node\n self.charCodes[node.data] = s\n # print(node.data, \": \", node.freq, \"->\", s)\n return\n self.saveDict(node.left, s + \"0\")\n self.saveDict(node.right, s + \"1\")\n\n def createDict(self, text):\n # This function will create a char: freq pair from the given text.\n tempDict = {}\n for char in text:\n if char in tempDict:\n tempDict[char] += 1\n else:\n tempDict[char] = 1\n return tempDict\n\n def createTree(self):\n # This function will create a Huffman Tree with the given char: freq combination.\n node_list = []\n # Creating a list of nodes\n for data in self.charDict:\n freq = self.charDict[data]\n n = node()\n n.data = data\n n.freq = freq\n node_list.append(n)\n # End of for loop\n node_list = sorted(node_list, key = self.getKey)\n\n # Start creating the tree\n self.root = node()\n \n while len(node_list) > 1:\n # Take two nodes of lowest frequency\n a = node_list[0]\n b = node_list[1]\n\n node_list.pop(0) # Remove a \n node_list.pop(0) # Remove b\n\n temp = node()\n # Create a new node with children being the previous two nodes.\n temp.data = '-'\n temp.freq = a.freq + b.freq # Add the frequencies\n temp.left = a\n temp.right = b\n\n self.root = temp\n\n node_list.append(temp)\n # The node with lower frequency should come first\n node_list = sorted(node_list, key = self.getKey) \n \n def convertTextToBinary(self, text):\n # This function will iterate over the text and for each character visited, the character will be replaced by its corresponding binary code\n self.binaryString = \"\"\n self.lenBinary = 0 # To store the length of binary string. (Verifying for multiple of 8)\n for char in text:\n self.lenBinary += len(self.charCodes[char])\n # For each character\n self.binaryString += str(self.charCodes[char]) # Adding each bit code to the binary stream\n # Aftet this loop, the entire text is converted into a string of binary numbers.\n # Make sure that the length of this binary string is a multiple of \"8\". (BYTE ADDRESSABLE MEMORY)\n # If not, add remaining zeros(0s) at the end and keep count of it.\n \n self.extraZerosAtEnd = 8 - self.lenBinary % 8\n for _ in range(self.extraZerosAtEnd):\n self.binaryString += \"0\" # Adding necessary zeroes at the end.\n\n def customEncoding(self):\n self.dividedBinary = [] # This will store binary values which are divided in 8 digits\n self.finalString = \"\" # Final Encoded String\n\n for i in range(0, len(self.binaryString), 8):\n temp = self.binaryString[i:i+8]\n self.dividedBinary.append(temp)\n ascii = int(temp, 2)\n self.finalString += chr(ascii)\n\n\n def __init__(self, text):\n self.charDict = self.createDict(text)\n self.createTree()\n self.saveDict(self.root, \"\")\n self.convertTextToBinary(text)\n self.customEncoding()\n\n\ndef main():\n # temp = {\n # \"b\" : 9,\n # \"a\" : 5,\n # \"f\" : 45,\n # \"c\" : 12,\n # \"d\" : 13,\n # \"e\" : 16\n # }\n with open('sampleText.txt', 'r') as file:\n text = file.read()\n\n h = HuffmanEncoding(text)\n # This object \"h\" has the encoded version of the given text with the huffman tree, char: freq pairs and char: huffman bit code pairs stored. Save the charCodes dictionary in a JSON file.\n \n with open('compressed.dat', 'w') as file:\n file.write(str(h.extraZerosAtEnd))\n file.write(h.finalString)\n \n with open('metadata.json', 'w') as file:\n json.dump(h.charDict, file, indent = 2)\n\n print(h.binaryString[:len(h.binaryString)-h.extraZerosAtEnd])\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5231860280036926,
"alphanum_fraction": 0.5280960202217102,
"avg_line_length": 28.103174209594727,
"blob_id": "e841ef9841b325f9bbb79889c573a389a085e8f0",
"content_id": "19a0835033b597cc4cdf118a4cabfc7cdca642c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3666,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 126,
"path": "/HuffmanDecoding.py",
"repo_name": "SatvikDandale/Huffman-Data-Compression",
"src_encoding": "UTF-8",
"text": "import json\n\nclass node:\n # This is just a structure\n def __init__(self):\n self.data = ''\n self.freq = 0\n self.left = None\n self.right = None\n# End of class\n\n\nclass HuffmanDecoding:\n \n def getKey(self, n):\n # Utility function for sorting the nodes according to their frequencies\n return n.freq\n\n def createTree(self):\n # This function will create a Huffman Tree with the given char: freq combination.\n node_list = []\n # Creating a list of nodes\n for data in self.charDict:\n freq = self.charDict[data]\n n = node()\n n.data = data\n n.freq = freq\n node_list.append(n)\n # End of for loop\n node_list = sorted(node_list, key = self.getKey)\n\n # Start creating the tree\n self.root = node()\n \n while len(node_list) > 1:\n # Take two nodes of lowest frequency\n a = node_list[0]\n b = node_list[1]\n\n node_list.pop(0) # Remove a \n node_list.pop(0) # Remove b\n\n temp = node()\n # Create a new node with children being the previous two nodes.\n temp.data = '-'\n temp.freq = a.freq + b.freq # Add the frequencies\n temp.left = a\n temp.right = b\n\n self.root = temp\n\n node_list.append(temp)\n # The node with lower frequency should come first\n node_list = sorted(node_list, key = self.getKey) \n \n\n def extractText(self):\n # This function is used to extract text from encoded binary string\n # We will use the huffman tree to detect the end of the bits sequence for a character\n current = self.root\n self.finalString = \"\"\n i = 0\n while i < len(self.binaryString):\n bit = self.binaryString[i]\n # Check if current represents a character\n if current.data != '-':\n print(\"Found \", current.data)\n # then it is a character\n self.finalString += current.data\n current = self.root\n continue\n # Else it is an internal node\n print(\"Current bit is: \", bit)\n\n if bit == '0':\n print(\"Going left\\n\")\n current = current.left\n else:\n print(\"Going right\\n\")\n current = current.right\n i += 1\n \n if current.data != '-':\n print(\"Found \", current.data)\n # then it is a character\n self.finalString += current.data\n\n def extractBinary(self, encodedString):\n \n # function :\n # 1. convert char to ascii\n # 2. ascii to binary\n # 3. remove padding\n # 4. concatenate into 1 string\n self.binaryString = \"\"\n self.extraZeroes = int(encodedString[0])\n for i in range(1, len(encodedString)):\n temp = bin(ord(encodedString[i]))\n print(temp)\n self.binaryString += temp[2:]\n\n self.binaryString = self.binaryString[0: len(self.binaryString)-self.extraZeroes]\n print(self.binaryString)\n self.createTree()\n self.extractText()\n\n def __init__(self, data):\n self.charDict = data\n\n\n\ndef main():\n with open('metadata.json') as file:\n data = json.load(file)\n\n d = HuffmanDecoding(data)\n\n with open(\"compressed.dat\", \"r\") as file:\n text = file.read()\n d.extractBinary(text)\n\n with open(\"decompressed.txt\", 'w') as file:\n file.write(d.finalString)\n\nif __name__ == \"__main__\":\n main()"
}
] | 3 |
work-ed/PSU_Capstone
|
https://github.com/work-ed/PSU_Capstone
|
eafd3dd8b7de014a6e705aeb956abb3603235e9b
|
45cd944979ef7f12b15811c4d5fec83be4313cf3
|
1103499839f401ba6e9faaf10fdf107de866183d
|
refs/heads/master
| 2020-05-22T02:14:22.916275 | 2019-08-03T04:10:45 | 2019-08-03T04:10:45 | 186,193,399 | 0 | 0 | null | 2019-05-12T00:07:35 | 2019-08-03T18:05:12 | 2021-03-29T20:04:10 |
HTML
|
[
{
"alpha_fraction": 0.6540084481239319,
"alphanum_fraction": 0.6540084481239319,
"avg_line_length": 15.928571701049805,
"blob_id": "e5afc152d88fc6c7d5553dc1e0fe96a8bd88d9ae",
"content_id": "026ccebbc5d9b62cce3f5f7572dc72dbf3cb9950",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 14,
"path": "/tmdb/apps.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nConfiguration file for the 'tmdb' application\n\"\"\"\nfrom django.apps import AppConfig\n\n\nclass TmdbConfig(AppConfig):\n \"\"\"\n Application specific settings\n \"\"\"\n name = 'tmdb'\n\n def ready(self):\n import tmdb.signals\n"
},
{
"alpha_fraction": 0.5122470855712891,
"alphanum_fraction": 0.55804044008255,
"avg_line_length": 27.454545974731445,
"blob_id": "5bba81301133e2da970583c2f64b7fe37f3065c7",
"content_id": "d3d903b905a98fe2f9105bd6f18ada9317292851",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 939,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 33,
"path": "/tmdb/migrations/0004_auto_20190713_1023.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-13 10:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tmdb', '0003_auto_20190713_1016'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='mediaitem',\n name='description',\n field=models.CharField(default='', max_length=500),\n ),\n migrations.AlterField(\n model_name='mediaitem',\n name='name',\n field=models.CharField(default='', max_length=100),\n ),\n migrations.AlterField(\n model_name='mediaitem',\n name='popularity',\n field=models.DecimalField(decimal_places=3, default=0.0, max_digits=5),\n ),\n migrations.AlterField(\n model_name='mediaitem',\n name='realease_date',\n field=models.CharField(default='', max_length=10),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5961538553237915,
"alphanum_fraction": 0.6105769276618958,
"avg_line_length": 23.959999084472656,
"blob_id": "a2bf014f1812dcb8dade752cbbd0617257a8354f",
"content_id": "19eb9c5da00bda8687758ec44a17576d68a666c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 624,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 25,
"path": "/users/tests/test_forms.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest cases for user forms\n\"\"\"\nimport pytest\nfrom users.forms import UserRegistrationForm\n\n\[email protected](\n 'username, password1, password2, valid',\n [('u$3</r', 'secret', 'not-secret', False),\n ('user', 'p@55w0rd!', 'p@55w0rd!', True),\n ('', '', '', False),\n ]\n)\ndef test_user_registration_form(db, username, password1, password2, valid):\n \"\"\"\n Verifies user registration form functionality\n \"\"\"\n form = UserRegistrationForm(data={\n 'username': username,\n 'password1': password1,\n 'password2': password2,\n })\n\n assert form.is_valid() is valid\n"
},
{
"alpha_fraction": 0.6214979290962219,
"alphanum_fraction": 0.6297577619552612,
"avg_line_length": 27.899999618530273,
"blob_id": "d7092a7e3b89dea995cc0ca04ee85d8acfaa6c64",
"content_id": "7fe0885e35926459383524de46aeeef28be5f521",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8959,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 310,
"path": "/users/tests/test_views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest cases designed to cover 'users/views.py'\n\"\"\"\n\nfrom django import urls\nimport pytest\n\nfrom ..models import CustomUser\n\n\ndef test_users_register_view_GET(client):\n \"\"\"\n Verify that the get register view renders as expected\n \"\"\"\n url = urls.reverse('register')\n resp = client.get(url)\n\n assert resp.status_code == 200\n assert b'Register' in resp.content\n\n\[email protected]('username, password1, password2, expected', [\n ('regtest1', 'TempP@ssw0rd!', 'TempP@ssw0rd!', True),\n ('regtest2', 'TempP@ssw0rd!', 'abc123', False),\n ('', '', '', False),\n])\ndef test_users_register_view_POST(db, client, username, password1, password2, expected): # pylint: disable=unused-argument\n \"\"\"\n Verify that the post register view creates an account\n \"\"\"\n # Test begins with no users\n assert CustomUser.objects.count() == 0\n\n url = urls.reverse('register')\n\n data = {\n 'username': username,\n 'password1': password1,\n 'password2': password2,\n }\n\n resp = client.post(url, data, follow_redirects=True)\n\n # Test should now have a registered user\n assert CustomUser.objects.count() == expected\n\n # Verify redirection to 'Login' page\n if expected:\n assert resp.status_code == 302\n assert resp.url == '/users/login/'\n else:\n assert 'form' in resp.context\n\n\ndef test_users_profile_view_GET(db, client): # pylint: disable=invalid-name, unused-argument\n \"\"\"\n Verify that the profile view renders as expected\n \"\"\"\n # Test begins with no users\n assert CustomUser.objects.count() == 0\n\n # Retrieve URL and peform GET on the target view (user not logged in)\n url = urls.reverse('profile')\n resp = client.get(url)\n\n # Ensure redirection to 'Login' page for users that are not logged in\n # - Should also handle ensure proper destination after login\n assert resp.status_code == 302\n assert resp.url == f'/users/login/?next={url}'\n\n # Create a test user\n CustomUser.objects.create_user('profile_test', password='test123')\n\n # Ensure we now have a user account\n assert CustomUser.objects.count() == 1\n\n login = client.login(username='profile_test', password='test123')\n\n # Verify successful user login\n assert login\n\n # Perform GET on target view (now with an authenticated user)\n resp = client.get(url)\n\n # Ensure page load successful\n assert resp.status_code == 200\n assert 'u_form' and 'p_form' in resp.context\n assert b'Profile' in resp.content\n\n\ndef test_users_profile_view_POST(client, db): # pylint: disable=invalid-name, unused-argument\n \"\"\"\n Verify that the profile view POST method properly processes form\n \"\"\"\n # Create a test user\n testuser = CustomUser.objects.create_user('profile_test2', password='321test')\n\n # Ensure we now have a user object\n assert CustomUser.objects.count() == 1\n\n # Ensure associated user profile exists\n assert str(testuser.profile) == f\"{testuser.username}'s Profile\"\n\n # Login so we can access profile page\n login = client.login(username='profile_test2', password='321test')\n\n # Verify successful user login\n assert login\n\n url = urls.reverse('profile')\n\n # Test invalid data\n data = {}\n\n resp = client.post(url, data, follow_redirects=True)\n\n # Test valid data\n data = {\n 'username': testuser.username,\n 'image': '',\n }\n\n resp = client.post(url, data, follow_redirects=True)\n\n assert resp.status_code == 302\n assert resp.url == '/users/'\n\n\n# @pytest.fixture\n# def testuser(db):\n# \"\"\"\n# Create test user for protected view tests\n# \"\"\"\n# user = CustomUser.objects.create_user('testuser', password='test123')\n# Row.objects.create(user=user)\n# return user\n\n\n# def test_users_row_list_view_noauth(client):\n# \"\"\"\n# Verify that the users row list view redirects as expected\n# - Should not be able to access without an authenticated user\n# \"\"\"\n# url = urls.reverse('row_list')\n# resp = client.get(url)\n\n# assert resp.status_code == 302, \"Expect redirect to login page\"\n# assert resp.url == f'/user/login/?next={url}'\n\n\n# def test_users_row_list_view_auth(testuser, client):\n# \"\"\"\n# Verify that the users row list view renders as expected\n# \"\"\"\n\n# login = client.login(username='testuser', password='test123')\n# assert login, 'Verify successful login'\n\n# url = urls.reverse('row_list')\n# resp = client.get(url)\n\n# assert resp.status_code == 200\n# assert b'CustomUser Rows' in resp.content\n\n\n# def test_users_row_detail_view_noauth(client):\n# \"\"\"\n# Verify that the users row detail view redirects as expected\n# - Should not be able to access without an authenticated user\n# \"\"\"\n# url = urls.reverse('row_detail', kwargs={'pk': 1})\n# resp = client.get(url)\n\n# assert resp.status_code == 302, \"Expect redirect to login page\"\n# assert resp.url == f'/user/login/?next={url}'\n\n\n# def test_users_row_detail_view_auth(testuser, client):\n# \"\"\"\n# Verify that the users row detail view renders as expected\n# \"\"\"\n\n# login = client.login(username='testuser', password='test123')\n# assert login, 'Verify successful login'\n\n# url = urls.reverse('row_detail', kwargs={'pk': 1})\n# resp = client.get(url)\n\n# assert resp.status_code == 200\n# assert b'Row Details' in resp.content\n\n\n# def test_users_row_create_view_noauth(client):\n# \"\"\"\n# Verify that the users row create view redirects as expected\n# - Should not be able to access without an authenticated user\n# \"\"\"\n\n# url = urls.reverse('row_create')\n# resp = client.get(url)\n\n# assert resp.status_code == 302, \"Expect redirect to login page\"\n# assert resp.url == f'/user/login/?next={url}'\n\n\n# @pytest.mark.parametrize('name, subtitle', [\n# ('testrow', 'Fake row for test coverage'),\n# ('', ''),\n# ])\n# def test_users_row_create_view_auth(testuser, client, name, subtitle):\n# \"\"\"\n# Verify that the users row create view renders as expected\n# \"\"\"\n\n# login = client.login(username='testuser', password='test123')\n# assert login, 'Verify successful login'\n\n# data = {\n# 'name': name,\n# 'subtitle': subtitle,\n# }\n\n# url = urls.reverse('row_create')\n# resp = client.post(url, data, follow_redirects=True)\n\n# form = RowForm(data)\n\n# if form.is_valid():\n# assert resp.status_code == 302\n# assert resp.url == '/user/rows/', 'Should redirect to rows list on success'\n# else:\n# assert resp.status_code == 200\n# assert b'Create/Update Row' in resp.content\n\n\n# def test_users_row_update_view_noauth(client):\n# \"\"\"\n# Verify that the users row update view redirects as expected\n# - Should not be able to access without an authenticated user\n# \"\"\"\n\n# url = urls.reverse('row_update', kwargs={'pk': 1})\n# resp = client.get(url)\n\n# assert resp.status_code == 302, \"Expect redirect to login page\"\n# assert resp.url == f'/user/login/?next={url}'\n\n\n# @pytest.mark.parametrize('name, subtitle', [\n# ('testrow', 'Fake row for test coverage'),\n# ('', ''),\n# ])\n# def test_users_row_update_view_auth(testuser, client, name, subtitle):\n# \"\"\"\n# Verify that the users row update view renders as expected\n# \"\"\"\n\n# login = client.login(username='testuser', password='test123')\n# assert login, 'Verify successful login'\n\n# data = {\n# 'name': name,\n# 'subtitle': subtitle,\n# }\n\n# url = urls.reverse('row_update', kwargs={'pk': 1})\n# resp = client.post(url, data, follow_redirects=True)\n\n# form = RowForm(data)\n\n# if form.is_valid():\n# assert resp.status_code == 302\n# assert resp.url == '/user/rows/', 'Should redirect to rows list on success'\n# else:\n# assert resp.status_code == 200\n# assert b'Create/Update Row' in resp.content\n\n\n# def test_users_row_delete_view_noauth(client):\n# \"\"\"\n# Verify that the users row delete view redirects as expected\n# - Should not be able to access without an authenticated user\n# \"\"\"\n\n# url = urls.reverse('row_update', kwargs={'pk': 1})\n# resp = client.get(url)\n\n# assert resp.status_code == 302, \"Expect redirect to login page\"\n# assert resp.url == f'/user/login/?next={url}'\n\n\n# def test_users_row_delete_view_auth(testuser, client):\n# \"\"\"\n# Verify that the users row delete view renders as expected\n# \"\"\"\n\n# login = client.login(username='testuser', password='test123')\n# assert login, 'Verify successful login'\n\n# url = urls.reverse('row_delete', kwargs={'pk': 1})\n\n# resp = client.get(url)\n\n# assert resp.status_code == 200\n# assert b'Delete Row' in resp.content\n\n# resp = client.post(url)\n\n# assert resp.status_code == 302\n# assert resp.url == '/user/rows/'\n"
},
{
"alpha_fraction": 0.5626652836799622,
"alphanum_fraction": 0.5719196200370789,
"avg_line_length": 28.317829132080078,
"blob_id": "716b769a2613c2e82167b8d68985379311905ab2",
"content_id": "882600ee6681cc7f8358cf9f52f483831268f75a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3782,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 129,
"path": "/tmdb/models.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.urls import reverse\n\nfrom users.models import CustomUser\n\n\nclass MediaItem(models.Model):\n\n media_id = models.IntegerField(primary_key=True)\n name = models.CharField(max_length=100, default='')\n release_date = models.CharField(max_length=10, default='')\n description = models.CharField(max_length=500, default='')\n poster_path = models.CharField(max_length=500, default='')\n popularity = models.DecimalField(\n default=0.00, decimal_places=3, max_digits=6)\n\n def __str__(self):\n \"\"\"\n Provide string representation of row instance\n \"\"\"\n return f\"{self.name}\"\n\n def get_absolute_url(self):\n \"\"\"\n Provide full path to row update page\n \"\"\"\n return reverse('tmdb-mediaitem-detail', kwargs={'pk': self.pk})\n\n\nclass Row(models.Model):\n \"\"\"\n Custom model for user rows\n \"\"\"\n TRENDING_MOVIES = 0\n POPULAR_MOVIES = 1\n RECENT_MOVIES = 2\n\n TRENDING_TVSHOWS = 3\n POPULAR_TVSHOWS = 4\n RECENT_TVSHOWS = 5\n\n TRENDING_ALL = 6\n\n CATEGORY_CHOICES = [\n (0, 'Trending Movies'),\n (1, 'Popular Movies'),\n (2, 'Recent Movies'),\n (3, 'Trending TV Shows'),\n (4, 'Popular TV Shows'),\n (5, 'Recent TV Shows'),\n (6, 'All Trending Media'),\n ]\n\n user = models.ForeignKey(\n CustomUser,\n on_delete=models.CASCADE,\n )\n media_list = models.ManyToManyField(MediaItem)\n media_type = models.CharField(\n verbose_name='media type',\n max_length=5,\n default='movie'\n )\n category = models.IntegerField(\n choices=CATEGORY_CHOICES,\n default=TRENDING_MOVIES,\n verbose_name='row category'\n )\n is_protected = models.BooleanField(\n default=False,\n verbose_name='protected'\n )\n name = models.CharField(\n verbose_name='row name',\n max_length=50,\n default='Trending Movies'\n )\n shortname = models.CharField(\n verbose_name='row name',\n max_length=50,\n default='tmovies'\n )\n\n class Meta:\n verbose_name = 'Custom User Row'\n verbose_name_plural = 'Custom User Rows'\n\n def __str__(self):\n \"\"\"\n Provide string representation of row instance\n \"\"\"\n return f\"{self.user.username}: {self.name} (id={self.id})\"\n\n def get_absolute_url(self):\n \"\"\"\n Provide full path to row update page\n \"\"\"\n return reverse('tmdb-row-detail', kwargs={'pk': self.pk})\n\n def save(self, *args, **kwargs):\n if self.category == Row.TRENDING_MOVIES:\n self.name = 'Trending Movies'\n self.shortname = 'tmovies'\n self.media_type = 'movie'\n elif self.category == Row.POPULAR_MOVIES:\n self.name = 'Popular Movies'\n self.shortname = 'pmovies'\n self.media_type = 'movie'\n elif self.category == Row.RECENT_MOVIES:\n self.name = 'Recent Movies'\n self.shortname = 'rmovies'\n self.media_type = 'movie'\n elif self.category == Row.TRENDING_TVSHOWS:\n self.name = 'Trending TV Shows'\n self.shortname = 'tshows'\n self.media_type = 'tv'\n elif self.category == Row.POPULAR_TVSHOWS:\n self.name = 'Popular TV Shows'\n self.shortname = 'pshows'\n self.media_type = 'tv'\n elif self.category == Row.RECENT_TVSHOWS:\n self.name = 'Recent Movies'\n self.shortname = 'rshows'\n self.media_type = 'tv'\n elif self.category == Row.TRENDING_ALL:\n self.name = 'All Trending Media'\n self.shortname = 'trending'\n self.media_type = 'all'\n super(Row, self).save(*args, **kwargs)\n"
},
{
"alpha_fraction": 0.6307000517845154,
"alphanum_fraction": 0.6332691311836243,
"avg_line_length": 22.590909957885742,
"blob_id": "67c2e40368685b47f7437b09b1d0338b0fcaa064",
"content_id": "b32433517bd47b08f316393249e4d1400fa1421d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1557,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 66,
"path": "/main/views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"Processing logic for application requests.\"\"\"\nfrom django.shortcuts import render\n\nfrom tmdb import api_wrapper\nfrom tmdb.models import Row\n\n\ndef index(request):\n \"\"\"\n Default homepage view\n\n Arguments:\n request {HTTPRequestObject} -- Contains information about the user\n request including the method (GET, POST, etc.)\n\n Returns:\n HTML Template -- Renders the default template for the view\n \"\"\"\n\n movies = api_wrapper.get_trending_movies()\n shows = api_wrapper.get_trending_shows()\n rows = None\n\n if request.user.is_authenticated:\n user = request.user\n rows = Row.objects.filter(user_id=user.id)\n\n context = {\n 'movies': movies['results'][:12],\n 'shows': shows['results'][:12],\n 'rows': rows,\n 'model': Row,\n }\n return render(request, 'main/index.html', context)\n\n\ndef about(request):\n \"\"\"\n About page view\n \"\"\"\n return render(request, 'main/about.html')\n\n\ndef contact(request):\n \"\"\"\n Contact page view\n \"\"\"\n return render(request, 'main/contact.html')\n\n\n# dont like that this view makes another api call\n# we already have the info from the index view\n# need to figure out a way to pass context data between views\ndef detail(request, media_type, media_id):\n \"\"\"\n Contact page view\n \"\"\"\n media = api_wrapper.get_media_selection(\n media_id=media_id, media_type=media_type)\n\n context = {\n 'media': media,\n 'media_type': media_type\n }\n\n return render(request, 'main/detail.html', context)\n"
},
{
"alpha_fraction": 0.5446934103965759,
"alphanum_fraction": 0.5446934103965759,
"avg_line_length": 38.42718505859375,
"blob_id": "2c46054d7ff1ff2cba1a82a78296e69838e472b0",
"content_id": "9d32c2f8760a77a96907ce7648f5bb611a008377",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4061,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 103,
"path": "/cms/ajax_views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "from django.http import JsonResponse\nfrom django import template\nfrom django.template.loader import render_to_string\n\n\nclass AjaxCreateUpdateView:\n def get(self, request, *args, **kwargs):\n if 'pk' not in self.kwargs:\n self.object = None\n else:\n self.object = self.get_object()\n context = self.get_context_data()\n self.ajax_partial = 'ajax/{}_form_partial.html'.format(\n self.model.__name__.lower())\n self.ajax_list = 'ajax/{}_list_partial.html'.format(\n self.model.__name__.lower())\n if request.is_ajax():\n try:\n html_form = render_to_string(\n self.ajax_partial, context, request)\n except template.TemplateDoesNotExist:\n html_form = render_to_string(\n 'cms/ajax/create_update_form_partial.html', context,\n request)\n return JsonResponse({'html_form': html_form})\n else:\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['object_list'] = self.model.objects.all()\n return context\n\n def form_valid(self, form):\n self.ajax_partial = 'ajax/{}_form_partial.html'.format(\n self.model.__name__.lower())\n self.ajax_list = 'ajax/{}_list_partial.html'.format(\n self.model.__name__.lower())\n data = dict()\n context = self.get_context_data()\n if form.is_valid():\n form.save()\n data['form_is_valid'] = True\n try:\n data['list'] = render_to_string(\n self.ajax_list, context, self.request)\n except template.TemplateDoesNotExist:\n data['list'] = render_to_string(\n 'cms/ajax/list_partial.html', context, self.request)\n else:\n data['form_is_valid'] = False\n try:\n data['html_form'] = render_to_string(\n self.ajax_partial, context, request=self.request)\n except template.TemplateDoesNotExist:\n data['html_form'] = render_to_string(\n 'cms/ajax/create_update_form_partial',\n context, request=self.request)\n\n if self.request.is_ajax():\n return JsonResponse(data)\n else:\n return super().form_valid(form)\n\n\nclass AjaxDeleteView:\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n context = self.get_context_data()\n self.ajax_partial = 'ajax/delete_partial.html'\n self.ajax_list = 'ajax/{}_list_partial.html'.format(\n self.model.__name__.lower())\n if request.is_ajax():\n try:\n html_form = render_to_string(\n self.ajax_partial, context, request)\n except template.TemplateDoesNotExist:\n html_form = render_to_string(\n 'cms/ajax/delete_partial.html', context, request)\n return JsonResponse({'html_form': html_form})\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, *args, **kwargs):\n self.ajax_partial = 'ajax/delete_partial.html'\n self.ajax_list = 'ajax/{}_list_partial.html'.format(\n self.model.__name__.lower())\n if self.request.is_ajax():\n self.object = self.get_object()\n self.object.delete()\n data = dict()\n data['form_is_valid'] = True\n context = self.get_context_data()\n context['object_list'] = self.model.objects.all()\n try:\n data['list'] = render_to_string(\n self.ajax_list, context, self.request)\n except template.TemplateDoesNotExist:\n data['list'] = render_to_string(\n 'cms/ajax/list_partial.html', context, self.request)\n return JsonResponse(data)\n else:\n return self.delete(*args, **kwargs)\n"
},
{
"alpha_fraction": 0.6390344500541687,
"alphanum_fraction": 0.642402708530426,
"avg_line_length": 28.36263656616211,
"blob_id": "2aa1140012cfcb73566f1804498898f13877d538",
"content_id": "8e25efe19ea77fab582407927193389ea135a75b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5344,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 182,
"path": "/tmdb/tests/test_api_wrapper.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "# pylint: disable=redefined-outer-name\n\n\"\"\"\nTest cases for 'tmdb/api_wrapper.py'\n\"\"\"\n\nimport pytest\nfrom tmdb import api_wrapper\n\nMAX_LENGTH = 20\n\n# crude way to implement tests for api wrapper\n# could probably do better by mocking the actual json\n# i was lazy :( --chris\n\[email protected]\ndef mock_api_wrapper(mocker):\n \"\"\"\n Mock the TMDB API responses\n - Avoids having to hit the external endpoint to run tests\n \"\"\"\n mock_get = mocker.patch('tmdb.api_wrapper.requests.get', autospec=True)\n\n api_result = {\"page\":1}\n\n mock_response = mocker.Mock()\n mock_response.json.return_value = api_result\n\n mock_get.return_value = mock_response\n\n return api_result\n\[email protected]('media, search_term', [\n (0, 'captain'),\n (1, 'captain'),\n (2, 'captain'),\n (None, 'captain'),\n])\ndef test_search(mock_api_wrapper, media, search_term):\n \"\"\"\n Verifies API call for search successfully returns a response\n \"\"\"\n\n resp = api_wrapper.search(media=media, search_term=search_term)\n if media is None:\n assert resp is None\n else:\n assert resp == mock_api_wrapper\n\ndef test_get_trending_movies(mock_api_wrapper):\n \"\"\"\n Verifies API call for trending movies successfully returns a response\n \"\"\"\n\n resp = api_wrapper.get_trending_movies()\n assert resp == mock_api_wrapper\n\ndef test_get_trending_shows(mock_api_wrapper):\n \"\"\"\n Verifies API call for trending shows successfully returns a response\n \"\"\"\n\n resp = api_wrapper.get_trending_shows()\n assert resp == mock_api_wrapper\n\ndef test_get_popular_shows(mock_api_wrapper):\n \"\"\"\n Verifies API call for popular shows successfully returns a response\n \"\"\"\n\n resp = api_wrapper.get_popular_shows()\n assert resp == mock_api_wrapper\n\ndef test_get_airing_shows(mock_api_wrapper):\n \"\"\"\n Verifies API call for airing shows successfully returns a response\n \"\"\"\n\n resp = api_wrapper.get_airing_shows()\n assert resp == mock_api_wrapper\n\ndef test_get_now_playing_movies(mock_api_wrapper):\n \"\"\"\n Verifies API call for now playing movies successfully returns a response\n \"\"\"\n\n resp = api_wrapper.get_now_playing_movies()\n assert resp == mock_api_wrapper\n\ndef test_get_popular_movies(mock_api_wrapper):\n \"\"\"\n Verifies API call for popular movies successfully returns a response\n \"\"\"\n\n resp = api_wrapper.get_popular_movies()\n assert resp == mock_api_wrapper\n\ndef test_get_media_selection(mock_api_wrapper):\n \"\"\"\n Verifies API call for movie selection successfully returns a response\n \"\"\"\n\n resp = api_wrapper.get_media_selection(media_type='movie', media_id=15)\n assert resp == mock_api_wrapper\n\n\n# def test_movie_search_row():\n# \"\"\"\n# Verify able to add a search row with results from movies\n# \"\"\"\n# session = Session() # not sure if this is the right way to do this -cw\n# # Also tests add row\n# new_row_num = session.search_entries('Captain', SearchScope.MOVIE)\n# # Inspect row contents here\n# # movies = session.rows[new_row_num].contents\n# # for movie in movies:\n# # print(f'{movie.name}\\n')\n# assert new_row_num in session.rows\n\n# def test_tv_search_row():\n# \"\"\"\n# Verify able to add a search row with results from tv shows\n# \"\"\"\n# session = Session() # not sure if this is the right way to do this -cw\n# # Also tests add row\n# new_row_num = session.search_entries('Captain', SearchScope.TV)\n# # Inspect row contents here\n# # movies = session.rows[new_row_num].contents\n# # for movie in movies:\n# # print(f'{movie.name}\\n')\n# assert new_row_num in session.rows\n\n# def test_all_search_row():\n# \"\"\"\n# Verify able to add a row with results from both tv and movie\n# \"\"\"\n# # bad type list to keep types that are not allowed in the results\n# bad_type_list = ['person']\n# session = Session()\n# new_row_num = session.search_entries('Captain', SearchScope.ALL)\n\n# assert new_row_num in session.rows\n# # Verify that the search method properly parsed\n# # out all the bad entries and refilled the results page\n# assert len(session.rows[new_row_num]) == MAX_LENGTH\n\n# for item in session.rows[new_row_num]:\n# assert item['media_type'] not in bad_type_list\n\n# def test_delete_row():\n# \"\"\"\n# Verify able to delete a row\n# \"\"\"\n# session = Session() # not sure if this is the right way to do this -cw\n# new_row_num = session.search_movie('Delete')\n# assert new_row_num in session.rows\n# # Show rows before delete\n# for row, val in session.rows.items():\n# print(f'Row {row}: {val.name}\\n')\n# session.delete_row(new_row_num)\n# assert new_row_num not in session.rows\n# # Show rows after delete\n# for row, val in session.rows.items():\n# print(f'Row {row}: {val.name}\\n')\n\n# def test_filter_genre():\n# \"\"\"\n# Test that genre sort works and that it sorts correctly\n# \"\"\"\n# session = Session()\n# row = session.rows['popular']\n# # Filter popular movies by fantasy action\n# genre_set = {28, 14}\n# result = row.filter_genre(genre_set)\n# assert 28 in result[0].genre, 14 in result[0].genre\n\n\n\n# if __name__ == '__main__':\n# test_get_movie()\n# test_delete_row()\n# test_all_search_row()\n"
},
{
"alpha_fraction": 0.6229712963104248,
"alphanum_fraction": 0.6229712963104248,
"avg_line_length": 29.226415634155273,
"blob_id": "3eff1dbd6f5288c7a1a67a4d9832c91e67aef8c1",
"content_id": "0e0fb8361db2b622eb8091c46bb324acdc6c92c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1602,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 53,
"path": "/users/views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nViews for User account management\n\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render\n\nfrom .forms import ProfileUpdateForm, UserRegistrationForm, UserUpdateForm\n\n\ndef register(request):\n \"\"\"\n View for new user registration.\n \"\"\"\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(\n request, f\"Account '{username}' has been created! Please login...\")\n return redirect('login')\n else:\n form = UserRegistrationForm()\n\n return render(request, 'users/register.html', {'form': form})\n\n\n@login_required\ndef profile(request):\n \"\"\"\n View for user profile (requires a login).\n \"\"\"\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(\n request.POST, request.FILES, instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(\n request, f\"Your account has been updated.\")\n return redirect('profile')\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form\n }\n\n return render(request, 'users/profile.html', context)\n"
},
{
"alpha_fraction": 0.6928306818008423,
"alphanum_fraction": 0.7163164615631104,
"avg_line_length": 46.617645263671875,
"blob_id": "efbaf10eb9864ee87cfe76ea3a7ca1ac012b05e1",
"content_id": "474573b4d77516cc3b887375503e6642c793403f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1618,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 34,
"path": "/notes.txt",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "Setup-Dev:\n----------------------\n- install python (i used: https://www.python.org/ftp/python/3.7.3/python-3.7.3-amd64.exe)\n- install git (i used: https://github.com/git-for-windows/git/releases/download/v2.21.0.windows.1/Git-2.21.0-64-bit.exe)\n- install vscode (i used: https://aka.ms/win32-x64-user-stable)\n- create local projects folder (ie. ~/sweng894/)\n- open powershell with admin privs and run:\n Set-ExecutionPolicy RemoteSigned\n- open terminal into local projects folder and run:\n python3 -m venv .env\n git clone https://github.com/work-ed/PSU_Capstone.git\n cd PSU_Capstone\n code .\n- inside vscode use ctrl+shift+P and type Python: Select Interpreter\n- choose the one that starts with .\\.env...\n- use ctrl+shift+` to open a terminal within vscode (this should take you directly into the virtual environment we created earlier '.env')\n- from within integrated terminal run:\n pip install -r requirements.txt\n- you should be able to run the dev site with:\n python manage.py runserver\n- then ctrl+click then link: http://127.0.0.1:8000\n- visit https://code.visualstudio.com/docs/python/tutorial-django#_create-a-debugger-launch-profile for help with debugger setup\n\n\nInfo:\n----------------------\n- basic git stuff:\n (1st time) git clone https://github.com/work-ed/PSU_Capstone.git\n git pull \n git add .\n git commit -m \"<briefly explain what changes you made>\"\n git push\n- we should discuss how we are going to do branching, or who is going to work on what features\n- i will periodically pull what is in the github repo and update the production site so we can see offical status"
},
{
"alpha_fraction": 0.51241534948349,
"alphanum_fraction": 0.542889416217804,
"avg_line_length": 25.84848403930664,
"blob_id": "c21b0499e6f18cc395496312831d7c187f0d2c47",
"content_id": "9846a9604e91909c992025d349e37cc29a657d52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 886,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 33,
"path": "/tmdb/migrations/0003_auto_20190713_1016.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-13 10:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tmdb', '0002_row_user'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mediaitem',\n name='description',\n field=models.CharField(default='', max_length=200),\n ),\n migrations.AddField(\n model_name='mediaitem',\n name='name',\n field=models.CharField(default='', max_length=50),\n ),\n migrations.AddField(\n model_name='mediaitem',\n name='popularity',\n field=models.IntegerField(default=0),\n ),\n migrations.AddField(\n model_name='mediaitem',\n name='realease_date',\n field=models.CharField(default='', max_length=50),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6653746962547302,
"alphanum_fraction": 0.6653746962547302,
"avg_line_length": 19.36842155456543,
"blob_id": "99fd759abdc905f50f04b57fbb5c6c5dd66e8b89",
"content_id": "0ae9256f83a70e12586f31c1bb44cb772a86b5d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 774,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 38,
"path": "/users/forms.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCustom Forms for 'users' application\n\"\"\"\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\nfrom .models import CustomUser, Profile\n\n\nclass UserRegistrationForm(UserCreationForm):\n \"\"\"\n User registration form for 'CustomUser' model\n \"\"\"\n\n class Meta:\n model = CustomUser\n fields = ('username',)\n\n\nclass UserUpdateForm(UserChangeForm):\n \"\"\"\n Enables user to update their username and email\n \"\"\"\n\n class Meta:\n model = CustomUser\n fields = ('username',)\n\n\nclass ProfileUpdateForm(forms.ModelForm):\n \"\"\"\n Enables user to update their profile image\n \"\"\"\n\n class Meta:\n model = Profile\n fields = ('image',)\n"
},
{
"alpha_fraction": 0.6700984835624695,
"alphanum_fraction": 0.678155779838562,
"avg_line_length": 28.01298713684082,
"blob_id": "d8fd3ff8f9993c6f24210035ce5f93e4b104b4f3",
"content_id": "92740548888739124b7e3e5747fabb19b630d670",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2234,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 77,
"path": "/main/tests/test_views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest cases designed to cover 'main/views.py'\n\"\"\"\n\nfrom django import urls\nfrom django.contrib import auth\n\ndef test_main_index_view(mocker, client):\n \"\"\"\n Verify that the index view renders as expected\n \"\"\"\n mocker.patch('tmdb.api_wrapper.get_trending_movies', autospec=True)\n mocker.patch('tmdb.api_wrapper.get_trending_shows', autospec=True)\n url = urls.reverse('index')\n viewname = urls.resolve(url).view_name\n resp = client.get(url)\n\n assert viewname == 'index'\n assert resp.status_code == 200\n assert b'TAMA' in resp.content\n\ndef test_main_index_authenticated_view(mocker, client, django_user_model):\n \"\"\"\n Verify that the index view renders as expected with authenticated user\n \"\"\"\n mocker.patch('tmdb.api_wrapper.get_trending_movies', autospec=True)\n mocker.patch('tmdb.api_wrapper.get_trending_shows', autospec=True)\n\n username = \"foo\"\n password = \"bar\"\n\n django_user_model.objects.create_user(username=username, password=password)\n client.login(username=username, password=password)\n\n client.get(urls.reverse('index'))\n\n user = auth.get_user(client)\n\n assert user.is_authenticated\n\ndef test_main_about_view(client):\n \"\"\"\n Verify that the index view renders as expected\n \"\"\"\n url = urls.reverse('about')\n viewname = urls.resolve(url).view_name\n resp = client.get(url)\n\n assert viewname == 'about'\n assert resp.status_code == 200\n assert b'About' in resp.content\n\ndef test_main_contact_view(client):\n \"\"\"\n Verify that the index view renders as expected\n \"\"\"\n url = urls.reverse('contact')\n viewname = urls.resolve(url).view_name\n resp = client.get(url)\n\n assert viewname == 'contact'\n assert resp.status_code == 200\n assert b'Contact' in resp.content\n\ndef test_main_detail_view(mocker, client):\n \"\"\"\n Verify that the detail view renders as expected\n \"\"\"\n mocker.patch('tmdb.api_wrapper.get_media_selection', autospec=True)\n\n url = urls.reverse('detail', kwargs={'media_type': 'movie', 'media_id': 399579})\n viewname = urls.resolve(url).view_name\n resp = client.get(url)\n\n assert viewname == 'detail'\n assert resp.status_code == 200\n assert b'Detail' in resp.content\n"
},
{
"alpha_fraction": 0.6597510576248169,
"alphanum_fraction": 0.6597510576248169,
"avg_line_length": 16.214284896850586,
"blob_id": "60add53d1ef47a8a1d66ac01028877e9a798ef4b",
"content_id": "b2ef7f9b944ed5be5b17dac8aedffa837f421158",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 14,
"path": "/users/apps.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nConfiguration file for the 'users' application\n\"\"\"\nfrom django.apps import AppConfig\n\n\nclass UsersConfig(AppConfig):\n \"\"\"\n Application specific settings\n \"\"\"\n name = 'users'\n\n def ready(self):\n import users.signals\n"
},
{
"alpha_fraction": 0.6835165023803711,
"alphanum_fraction": 0.6835165023803711,
"avg_line_length": 24.27777862548828,
"blob_id": "d4e734dec3ca34736c282ff329e47a16c5ccaa0c",
"content_id": "dc57db89263477213971157c693ead377f8b6d7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 18,
"path": "/tama/views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"Processing logic for application requests.\"\"\"\nfrom django.shortcuts import render\nfrom django.conf import settings\n\n\ndef index(request):\n \"\"\"\n Default homepage view\n\n Arguments:\n request {HTTPRequestObject} -- Contains information about the user\n request including the method (GET, POST, etc.)\n\n Returns:\n HTML Template -- Renders the default template for the view\n \"\"\"\n\n return render(request, 'index.html')\n"
},
{
"alpha_fraction": 0.5484710335731506,
"alphanum_fraction": 0.5666883587837219,
"avg_line_length": 41.69444274902344,
"blob_id": "257dc5bd0ae2cdd250b1e05373beedafa0ec9a95",
"content_id": "21c8d5dfab0bd982bcc797aa47722410652309a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1537,
"license_type": "no_license",
"max_line_length": 271,
"num_lines": 36,
"path": "/tmdb/migrations/0001_initial.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-13 08:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='MediaItem',\n fields=[\n ('media_id', models.IntegerField(primary_key=True, serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='Row',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('media_type', models.CharField(default='movie', max_length=5, verbose_name='media type')),\n ('category', models.IntegerField(choices=[(0, 'Trending Movies'), (1, 'Popular Movies'), (2, 'Recent Movies'), (3, 'Trending TV Shows'), (4, 'Popular TV Shows'), (5, 'Recent TV Shows'), (6, 'All Trending Media')], default=0, verbose_name='row category')),\n ('is_protected', models.BooleanField(default=False, verbose_name='protected')),\n ('name', models.CharField(default='Trending Movies', max_length=50, verbose_name='row name')),\n ('shortname', models.CharField(default='tmovies', max_length=50, verbose_name='row name')),\n ('media_list', models.ManyToManyField(to='tmdb.MediaItem')),\n ],\n options={\n 'verbose_name': 'Custom User Row',\n 'verbose_name_plural': 'Custom User Rows',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.6493159532546997,
"alphanum_fraction": 0.6566566824913025,
"avg_line_length": 32.29999923706055,
"blob_id": "a7cf2c2d1f384960f1cd5fd49c36b800073c50d2",
"content_id": "264acaa1e5e8c47a94f1feba88c70692a045115d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2997,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 90,
"path": "/users/tests/test_models.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest cases for user models\n\"\"\"\nfrom mixer.backend.django import mixer\nimport pytest\n\nfrom ..models import Profile, CustomUser\n\npytestmark = pytest.mark.django_db\n\n\[email protected]\ndef testuser():\n \"\"\"\n Helper method to create a test user\n \"\"\"\n return mixer.blend(CustomUser)\n\n\ndef test_user_profile_str(testuser):\n \"\"\"\n Verify the __str__ method of the profile model correctly returns expected value\n \"\"\"\n expected = f\"{testuser.username}'s Profile\"\n\n assert str(testuser.profile) == expected\n\n\n# Parameterize the input to test various scenarios\n# - Expand as needed to better test edge cases or special circumstances\[email protected]('height, width, expected', [\n (300, 300, False),\n (0, 0, False),\n (301, 1, True),\n (1, 301, True),\n])\ndef test_user_profile_save_resize_conditional(mocker, height, width, expected):\n \"\"\"\n Verify that the model save function executes resize logic based on conditional\n \"\"\"\n ### ----- SETUP ----- ###\n\n # Patch the Profile model parenet class 'save' function\n # - Makes the call but does nothing\n #\n # - We do this because we can't actually save a fake object\n # - Also, we arent testing the db transactions or the external library\n # - This test is specific to the image resizing branch of the model\n mocker.patch('users.models.models.Model.save', autospec=True)\n\n # Patch the Image class\n # Allows us to:\n # - Overwrite the return value(s) of the 'open' method\n # - Disable the 'thumbnail' function\n #\n # - Specifically, we don't want to have to have a test file on hand\n # - Nor do we want to create extra files just to test with\n # - This technique provides us with just what is required for our test\n mock_image = mocker.patch('users.models.Image', autospec=True)\n\n # Set the attributes we care about for the conditional logic test\n mock_image.open.return_value.height = height\n mock_image.open.return_value.width = width\n\n ### ----- TEST ----- ###\n\n # Create a profile object with random data\n profile = mixer.blend(Profile)\n\n # Call the model save method\n # - This will use our patched model parent class 'save' method,\n # and the patched Image class 'open' method\n # - The instantiated patched Image object will (auto) use all of our\n # parameterized values in seperate tests\n profile.save()\n\n # // ----- NOTE ----- ]]\n #\n # The main thing to realize here is that the image field of Profile\n # (i.e. 'Profile.image') will never be the same as the patched image\n # settings.\n #\n # This is because in our patched version we never actually save\n # the changes made. All we do is set it up, then run the functionality\n # against the unit of code we are testing.\n #\n # [[ ---------------- ]]\n\n # Test the mocked image object attributes with the same conditional logic as model\n assert (height > 300 or width > 300) == expected\n"
},
{
"alpha_fraction": 0.6804056167602539,
"alphanum_fraction": 0.6804056167602539,
"avg_line_length": 25.128204345703125,
"blob_id": "ca2a3c3ce287fea3568d42e1b3140204c210e696",
"content_id": "e13956a1cd5da55bd30b9cd9d333cad9d2868ebe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3057,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 117,
"path": "/tmdb/views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nViews for TMDB CRUD Operations\n\"\"\"\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom cms.views import (CmsListView, CmsDetailView, CmsCreateView,\n CmsUpdateView, CmsDeleteView)\nfrom cms.ajax_views import (AjaxCreateUpdateView, AjaxDeleteView)\n\nfrom .forms import RowForm, MediaItemForm\nfrom .models import Row, MediaItem\n\n\nclass RowListView(LoginRequiredMixin, CmsListView):\n \"\"\"\n Class-Based Generic List View\n \"\"\"\n template_name_suffix = '/list'\n model = Row\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n\nclass RowDetailView(LoginRequiredMixin, CmsDetailView):\n \"\"\"\n Class-Based Generic Detail View\n \"\"\"\n template_name_suffix = '/detail'\n model = Row\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n\nclass RowCreateView(LoginRequiredMixin, AjaxCreateUpdateView, CmsCreateView):\n \"\"\"\n Class-Based Generic Create View\n \"\"\"\n model = Row\n form_class = RowForm\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)\n\n\nclass RowUpdateView(LoginRequiredMixin, AjaxCreateUpdateView, CmsUpdateView):\n \"\"\"\n Class-Based Generic Update View\n \"\"\"\n model = Row\n form_class = RowForm\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n\nclass RowDeleteView(LoginRequiredMixin, AjaxDeleteView, CmsDeleteView):\n \"\"\"\n Class-Based Generic Delete View\n \"\"\"\n model = Row\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\nclass MediaItemListView(LoginRequiredMixin, CmsListView):\n \"\"\"\n Class-Based Generic List View\n \"\"\"\n template_name_suffix = '/list'\n model = MediaItem\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n\nclass MediaItemDetailView(LoginRequiredMixin, CmsDetailView):\n \"\"\"\n Class-Based Generic Detail View\n \"\"\"\n template_name_suffix = '/detail'\n model = MediaItem\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n\nclass MediaItemCreateView(LoginRequiredMixin, AjaxCreateUpdateView, CmsCreateView):\n \"\"\"\n Class-Based Generic Create View\n \"\"\"\n model = MediaItem\n form_class = MediaItemForm\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n response = super(MediaItemCreateView, self).form_valid(form)\n form.instance.user = self.request.user\n return response\n\n\nclass MediaItemUpdateView(LoginRequiredMixin, AjaxCreateUpdateView, CmsUpdateView):\n \"\"\"\n Class-Based Generic Update View\n \"\"\"\n model = MediaItem\n form_class = MediaItemForm\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n\n\nclass MediaItemDeleteView(LoginRequiredMixin, AjaxDeleteView, CmsDeleteView):\n \"\"\"\n Class-Based Generic Delete View\n \"\"\"\n model = MediaItem\n login_url = '/users/login/'\n redirect_field_name = 'redirect_to'\n"
},
{
"alpha_fraction": 0.49468085169792175,
"alphanum_fraction": 0.5771276354789734,
"avg_line_length": 19.88888931274414,
"blob_id": "8e9b837bac39201a3805a79ed468c0f8e1a606de",
"content_id": "5960b6456cc81c81a726e49f05b784279f47038b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 376,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/tmdb/migrations/0006_auto_20190713_1037.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-13 10:37\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tmdb', '0005_auto_20190713_1025'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='mediaitem',\n old_name='realease_date',\n new_name='release_date',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5648535490036011,
"alphanum_fraction": 0.715481162071228,
"avg_line_length": 38.83333206176758,
"blob_id": "160ab1357899cd99864a77d46bf3b64e6892a995",
"content_id": "92bd6366b1603d59c0ee459733d1ad721afb23de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 6,
"path": "/README.md",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "# PSU_Capstone\nPSU - Graduate Degree - SWENG 894 Capstone\n\n- Production Site: http://178.128.184.254/\n- Current Test Coverage: http://178.128.184.254/htmlcov/ [no longer current - tied to old site]\n- Admin Site: http://178.128.184.254/admin/\n"
},
{
"alpha_fraction": 0.5529914498329163,
"alphanum_fraction": 0.5529914498329163,
"avg_line_length": 26.85714340209961,
"blob_id": "7c7c70bc687b65e08f07e8be0b11ee0c6d6734a4",
"content_id": "41ee1dde945f59ffdbe473c48bc52d5026c1c069",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1170,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 42,
"path": "/tmdb/services.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nModule to map TMDB results to TAMA 'MediaItem' objects\n\"\"\"\nfrom . import api_wrapper as tmdb\nfrom .models import MediaItem\n\n\ndef create_media_items(results, media_type):\n \"\"\"\n Locally creates and stores media items in the TAMA database for use in other functions\n\n Returns a list of ids created\n \"\"\"\n media_list = []\n\n for item in results:\n media_id = item['id']\n description = item['overview']\n popularity = item['popularity']\n poster_path = item['poster_path']\n\n if media_type == 'movie' or media_type == 'all':\n name = item['title']\n release_date = item['release_date']\n elif media_type == 'tv':\n name = item['name']\n release_date = item['first_air_date']\n\n item, _ = MediaItem.objects.update_or_create(\n media_id=media_id,\n defaults={\n 'name': name,\n 'release_date': release_date,\n 'description': description,\n 'popularity': popularity,\n 'poster_path': poster_path\n }\n )\n\n media_list.append(item.media_id)\n\n return media_list\n"
},
{
"alpha_fraction": 0.6404586434364319,
"alphanum_fraction": 0.6404586434364319,
"avg_line_length": 32.91666793823242,
"blob_id": "856ca61a449215512e0daf0a943fc3e14a5f026f",
"content_id": "3da0321d0fbba237c5388f1c5f21ce7aca622612",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1221,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 36,
"path": "/tmdb/signals.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nEvent handling for 'tmdb' application\n\"\"\"\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom . import api_wrapper as tmdb\nfrom .services import create_media_items\nfrom .models import Row\n\n\n@receiver(post_save, sender=Row)\ndef populate_row(sender, instance, created, **kwargs):\n \"\"\"\n Populates a row with data specific to the category created\n \"\"\"\n if created:\n if instance.shortname == 'tmovies':\n response = tmdb.get_trending_movies()\n elif instance.shortname == 'pmovies':\n response = tmdb.get_popular_movies()\n elif instance.shortname == 'rmovies':\n response = tmdb.get_trending_movies()\n elif instance.shortname == 'tshows':\n response = tmdb.get_trending_shows()\n elif instance.shortname == 'pshows':\n response = tmdb.get_popular_shows()\n elif instance.shortname == 'rshows':\n response = tmdb.get_trending_movies()\n elif instance.shortname == 'trending':\n response = tmdb.get_trending_movies()\n\n media_list = create_media_items(\n response['results'], instance.media_type)\n\n instance.media_list.add(*media_list)\n"
},
{
"alpha_fraction": 0.7422680258750916,
"alphanum_fraction": 0.7422680258750916,
"avg_line_length": 18.399999618530273,
"blob_id": "7c86b84f9e40223a7ddd5944ed89d05e960d5e47",
"content_id": "b36eb85410c25897bf322808f86da2b2d8a0eda9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 15,
"path": "/tmdb/admin.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAdmin page registration for 'tmdb' application models\n\"\"\"\nfrom django.contrib import admin\n\nfrom .models import Row, MediaItem\n\n\nclass RowAdmin(admin.ModelAdmin):\n model = Row\n filter_horizontal = ('media_list',)\n\n\nadmin.site.register(Row, RowAdmin)\nadmin.site.register(MediaItem)\n"
},
{
"alpha_fraction": 0.6541666388511658,
"alphanum_fraction": 0.6541666388511658,
"avg_line_length": 15,
"blob_id": "42decac8cf7457504f78d8d83e9470243da3e5b8",
"content_id": "83ab3ef9d1da7cdb0d59e9866040a0e1ddffc2f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 15,
"path": "/tmdb/urls.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nURL patterns for the 'tmdb' application\n\"\"\"\nfrom cms.patterns import apps_dict, get_patterns\n\napps_dict['tmdb'] = {\n 'app_name': 'tmdb',\n 'filename': 'views'\n}\n\nurlpatterns = [\n\n]\n\nurlpatterns += get_patterns(apps_dict=apps_dict)\n"
},
{
"alpha_fraction": 0.6737967729568481,
"alphanum_fraction": 0.6737967729568481,
"avg_line_length": 16,
"blob_id": "b1d5cc8d81c3789ad495efe84f0281b6213da1ee",
"content_id": "dbdf6a3e895d9ae16abd1a34b1b9b8d5f1e807bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 11,
"path": "/main/apps.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nConfiguration file for the 'main' application\n\"\"\"\nfrom django.apps import AppConfig\n\n\nclass MainConfig(AppConfig):\n \"\"\"\n Application specific settings\n \"\"\"\n name = 'main'\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 28.18181800842285,
"blob_id": "8ce054ad82ca8ae8c7491316fad017c2c7441890",
"content_id": "b40e5a3e833aba18f49ea4521feb9ae592f7fac7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 963,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 33,
"path": "/cms/templatetags/cms.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "from django import template\nfrom django.urls import reverse_lazy\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef create_title(context, format_string):\n model = context['model']\n return '{} {}'.format(model.__name__.title(), format_string)\n\n\[email protected]_tag(takes_context=True)\ndef create_url(context, format_string):\n model = context['model']\n app = model._meta.app_label\n model_name = model.__name__.lower()\n return reverse_lazy('{}-{}-{}'.format(app, model_name, format_string))\n\n\[email protected]_tag\ndef create_object_url(object, format_string):\n app = object._meta.app_label\n model_name = object.__class__.__name__.lower()\n url_link = reverse_lazy(\n '{}-{}-{}'.format(app, model_name, format_string),\n kwargs={'pk': object.pk})\n return url_link\n\n\[email protected]_tag(takes_context=True)\ndef get_model_name(context):\n model = context['model']\n return model.__name__.lower()\n"
},
{
"alpha_fraction": 0.6895368695259094,
"alphanum_fraction": 0.6895368695259094,
"avg_line_length": 21.423076629638672,
"blob_id": "35596069e4fe4c9f9480508ec9cfef76976ee148",
"content_id": "db88c59baf0502dd8ecda59394e350e715a66115",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 583,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 26,
"path": "/users/signals.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nEvent handling for 'users' application\n\"\"\"\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\n\n\nfrom .models import Profile, CustomUser\n\n\n@receiver(post_save, sender=CustomUser)\ndef create_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create a profile when a user is created\n \"\"\"\n if created:\n profile = Profile(user=instance)\n profile.save()\n\n\n@receiver(post_save, sender=CustomUser)\ndef save_profile(sender, instance, **kwargs):\n \"\"\"\n Saves a profile when it is updated\n \"\"\"\n instance.profile.save()\n"
},
{
"alpha_fraction": 0.6505494713783264,
"alphanum_fraction": 0.6505494713783264,
"avg_line_length": 31.5,
"blob_id": "b385912a8268ac0ef110e00d44993eb7e6fbc92d",
"content_id": "c5729993212d9452f0e7dbe11afb1645c6b52688",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1820,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 56,
"path": "/cms/views.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "from django.views.generic import TemplateView\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom .mixins import SuccessUrl\nfrom .forms import DynamicForm\n\n\nclass CmsView(TemplateView):\n template_name = \"cms/index.html\"\n\n\nclass CmsListView(ListView):\n template_name_suffix = '/list'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['model'] = self.model\n context['name'] = '{} List'.format(self.model.__name__)\n return context\n\n\nclass CmsDetailView(DetailView):\n template_name_suffix = '/detail'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['model'] = self.model\n context['list'] = '{} '.format(self.model.__name__.capitalize())\n context['name'] = '{} Detail'.format(self.get_object())\n return context\n\n\nclass CmsCreateView(DynamicForm, SuccessUrl, CreateView):\n template_name_suffix = '/form'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['model'] = self.model\n context['list'] = '{} '.format(self.model.__name__.capitalize())\n context['name'] = '{} Create'.format(self.model.__name__)\n return context\n\n\nclass CmsUpdateView(DynamicForm, SuccessUrl, UpdateView):\n template_name_suffix = '/form'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['model'] = self.model\n context['list'] = '{} '.format(self.model.__name__.capitalize())\n context['name'] = '{} Update'.format(self.get_object())\n return context\n\n\nclass CmsDeleteView(SuccessUrl, DeleteView):\n template_name_suffix = '/delete'\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7647058963775635,
"avg_line_length": 25.272727966308594,
"blob_id": "36ef647860889a1c5e39ee2fc9fe53fb444ea2ab",
"content_id": "5d129f061201b985148dff17e7d13f18a69f1486",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 578,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 22,
"path": "/users/admin.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nModel registration for 'users' application within Django admin site\n\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom .forms import UserRegistrationForm, UserUpdateForm\nfrom .models import CustomUser, Profile\n\n\nclass CustomUserAdmin(UserAdmin):\n \"\"\"\n Administrative helpers for CustomUser model within Django admin site\n \"\"\"\n add_form = UserRegistrationForm\n form = UserUpdateForm\n model = CustomUser\n list_display = ['username', ]\n\n\nadmin.site.register(CustomUser, CustomUserAdmin)\nadmin.site.register(Profile)\n"
},
{
"alpha_fraction": 0.593794047832489,
"alphanum_fraction": 0.593794047832489,
"avg_line_length": 18.69444465637207,
"blob_id": "6fad7dafaf31fdb213b3989284348f833053d463",
"content_id": "b5780da2ac3d333735850d4b0d6072ac868f9cc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 709,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 36,
"path": "/tmdb/forms.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCustom Forms for 'tmdb' application\n\"\"\"\nfrom django import forms\nfrom cms.forms import BootstrapHelperForm\n\nfrom .models import Row, MediaItem\n\n\nclass RowForm(BootstrapHelperForm, forms.ModelForm):\n \"\"\"\n Form definition for Row\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta definition for RowForm\n \"\"\"\n\n model = Row\n fields = ('category', 'is_protected',)\n\n\nclass MediaItemForm(BootstrapHelperForm, forms.ModelForm):\n \"\"\"\n Form definition for MediaItem\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta definition for MediaItem\n \"\"\"\n\n model = MediaItem\n fields = ('media_id', 'name', 'release_date',\n 'description', 'popularity')\n"
},
{
"alpha_fraction": 0.6491803526878357,
"alphanum_fraction": 0.6491803526878357,
"avg_line_length": 24.41666603088379,
"blob_id": "f024615f2a8e4e3e3bfc09a5649518e5adeffd03",
"content_id": "d8211b6890a2fce2c1b874c4654566be7133a1f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 610,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 24,
"path": "/tmdb/tests/test_models.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest cases for tmdb models\n\"\"\"\nfrom mixer.backend.django import mixer\nimport pytest\n\nfrom ..models import Row\n\npytestmark = pytest.mark.django_db\n\n\ndef test_tmdb_row_str(db):\n \"\"\"\n Verify that the __str__ method on row returns the expected value\n \"\"\"\n row = mixer.blend(Row, name='Trending Movies')\n assert str(row) == f\"{row.user.username}: Trending Movies (id={row.id})\"\n\n# def test_rows_row_get_absolute_url(db):\n# \"\"\"\n# Test method to return full path to row update page\n# \"\"\"\n# row = mixer.blend(Row)\n# assert row.get_absolute_url() == f\"/user/rows/update/{row.pk}\"\n"
},
{
"alpha_fraction": 0.5916030406951904,
"alphanum_fraction": 0.6030534505844116,
"avg_line_length": 22.288888931274414,
"blob_id": "624b647c63e4475db0845ef4c24ef9c6661bf5de",
"content_id": "8d790180557366323d3aaa4b111806bb3b7667ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1048,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 45,
"path": "/users/models.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUser Models\n\"\"\"\nfrom django.db import models\nfrom django.contrib.auth import settings\nfrom django.contrib.auth.models import AbstractUser\n\nfrom PIL import Image\n\n\nclass CustomUser(AbstractUser):\n \"\"\"\n Custom model for users: extends Django default User\n \"\"\"\n pass\n\n def __str__(self):\n return self.username\n\n\nclass Profile(models.Model):\n \"\"\"\n Custom model for user profile\n \"\"\"\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n image = models.ImageField(default='default.jpg',\n upload_to='images/profile')\n\n def __str__(self):\n return f\"{self.user.username}'s Profile\"\n\n def save(self, *args, **kwargs):\n \"\"\"\n Resize profile image on save\n \"\"\"\n\n img = Image.open(self.image.path)\n\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(self.image.path)\n\n super(Profile, self).save(*args, **kwargs)\n"
},
{
"alpha_fraction": 0.5584025979042053,
"alphanum_fraction": 0.5637181401252747,
"avg_line_length": 36.625640869140625,
"blob_id": "5432ae6a46ade7386cf9fa31d52db135d495a492",
"content_id": "ce4edfdb4d5251a7f10dfff7a980e38bc5dd28ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7337,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 195,
"path": "/tmdb/session.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "######################################################################################################################\n# Author:\n# Date: 06-02-19\n# This file is a test file that I will use to run the python methods from the command line to test functionality\n#\n#\n# CW - need to move this code\n# maybe to the row model in main/models.py?\n#\n######################################################################################################################\n\"\"\"\n Current row layout:\n Trending Movie\n Popular Movie\n Now Playing\n Trending TV Show\n Popular TV Show\n TV Airing Today\n\"\"\"\n#from .tmdb import search_movie, get_popular_movies, get_popular_shows, get_trending_movies, get_trending_shows, get_now_playing_movies, get_airing_shows\nfrom tmdb import api_wrapper\nfrom tmdb.base_objects import TAMARow, TAMAObject\n\n\nclass Session():\n \"\"\"Session facsimile for testing\"\"\" \n\n def __init__(self):\n self.cached_movies = None\n # Set up default rows\n # TODO: Idea - default rows as separate dicts from other rows. May stop confusion\n self.rows = {'trending': self.create_trending_movie_row(), 'popular': self.create_popular_movie_row(),\n 'now_playing': self.create_now_playing_row(), 'trending_tv': self.create_trending_tv_row(),\n 'popular_tv': self.create_popular_tv_row(), 'airing_tv': self.create_airing_tv_row()}\n self.num_default_rows = len(self.rows)\n self.result_length = 200 # 10 pages of results. Can specify number of pages elsewhere\n self.num_search_pages = 10\n\n def add_row(self, name, subtitle, contents):\n row = TAMARow(name, subtitle)\n if type(contents) is dict:\n self.populate_row(contents['results'], row)\n elif type(contents) is list:\n self.populate_row(contents, row)\n else:\n return None\n row_num = len(self.rows) - self.num_default_rows\n row.index = row_num\n # May need a better naming system than index after defaults\n # Can't think of one though\n self.rows[row_num] = row\n # used to keep track of what number we are on\n return row_num\n\n def delete_row(self, name):\n if self.rows[name].deletable:\n del self.rows[name]\n\n def search_entries(self, query, media_type):\n \"\"\"\n :param query: string to search for\n :param media_type: TV (0), MOVIE(1), ALL(2)\n :return: new row wwith title Search:<Query> and contents of search\n \"\"\"\n name = 'Search: %s' % query\n good_results = []\n page = 1\n while len(good_results) < self.result_length:\n results = api_wrapper.search(media_type, query, page)\n page += 1\n # Iterate through results. If the result is of a bad type, remove it\n for item in results['result']:\n # ToDo: Compare this value toa list, so new bad types can be easily added - If necessary\n if not item['media_type'] == 'person':\n good_results.append(item)\n # Cap new results at max length. Can be changed in init\n if len(good_results) == self.result_length:\n break\n new_row = self.add_row(name, 'Search Results', good_results)\n\n return new_row\n\n def get_movie_from_selection(self, row, index):\n movie = self.rows[row].contents[index]\n return movie\n\n def create_popular_movie_row(self):\n good_results = []\n page = 1\n while len(good_results) < self.result_length:\n good_results.extend(api_wrapper.get_popular_movies(page)['result'])\n page += 1\n\n pop_row = TAMARow('Popular', 'Most popular movies of all time')\n # Popular movie row is always in this location so hardcode it\n pop_row.index = 1\n\n self.populate_row(good_results, pop_row)\n\n return pop_row\n\n def create_popular_tv_row(self):\n good_results = []\n page = 1\n while len(good_results) < self.result_length:\n good_results.extend(api_wrapper.get_popular_shows(page)['result'])\n page += 1\n\n pop_row = TAMARow('Popular TV', 'Most popular TV Shows of all time')\n pop_row.index = 4\n self.populate_row(good_results, pop_row)\n\n return pop_row\n\n def create_trending_movie_row(self):\n good_results = []\n page = 1\n while len(good_results) < self.result_length:\n good_results.extend(api_wrapper.get_trending_movies(page)['result'])\n page += 1\n trend_row = TAMARow('Trending', 'Currently Trending Movies')\n # Trending movies are always Row 0, so hardcode it\n trend_row.index = 0\n # This row cannot be deleted (can be chagneed just checking the thing)\n trend_row.deletable = False\n self.populate_row(good_results, trend_row)\n\n return trend_row\n\n def create_trending_tv_row(self):\n good_results = []\n page = 1\n while len(good_results) < self.result_length:\n good_results.extend(api_wrapper.get_trending_shows(page)['result'])\n page += 1\n trend_row = TAMARow('Trending TV', 'Currently Trending TV')\n trend_row.index = 3\n self.populate_row(good_results, trend_row)\n\n return trend_row\n\n def create_now_playing_row(self):\n good_results = []\n page = 1\n while len(good_results) < self.result_length:\n good_results.extend(api_wrapper.get_now_playing_movies(page)['result'])\n page += 1\n np_row = TAMARow('Now Playing')\n np_row.index = 2\n self.populate_row(good_results, np_row)\n\n return np_row\n\n def create_airing_tv_row(self):\n good_results = []\n page = 1\n while len(good_results) < self.result_length:\n good_results.extend(api_wrapper.get_airing_shows(page)['result'])\n page += 1\n np_row = TAMARow('Airing Today')\n np_row.index = 5\n self.populate_row(good_results, np_row)\n\n return np_row\n\n # Utility methods for interacting with rows\n @staticmethod\n def parse_json(item):\n \"\"\" Method to parse JSON from a Results style request (search, popularity, trending)\n Do Not Use with any other query types. \"\"\"\n tamaObj = None\n try:\n title = item['title']\n tamaObj = TAMAObject(item['id'], title, item['release_date'], item['genre_ids'],\n item['overview'], item['popularity'])\n except KeyError:\n # Sneaky (and very bad form but I am tired) way to get JSON String to identify itself\n # -- TV Shows don't have title key\n tamaObj = TAMAObject(item['id'], item['name'], item['first_air_date'], item['genre_ids'],\n item['overview'], item['popularity'])\n return tamaObj\n\n def populate_row(self, items, row):\n for i, item in enumerate(items):\n temp = self.parse_json(item)\n if temp:\n row.contents.append(temp)\n if i > 99:\n break\n\n\n# if __name__ == \"__main__\":\n# session = Session()\n# session.test()\n#print('Test complete')\n"
},
{
"alpha_fraction": 0.5689045786857605,
"alphanum_fraction": 0.5883392095565796,
"avg_line_length": 23.65217399597168,
"blob_id": "61c5c9c89c914aefd1d1cda2231d8539ba309c5a",
"content_id": "cbd8eef68c0d9e46f772973465208a7fdbe40046",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 566,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 23,
"path": "/main/templates/main/detail.html",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "{% extends \"layout.html\" %}\n\n{% block title %}Detail{% endblock title %}\n\n{% block content %}\n{% if media_type == 'movie' %}\n<h1><strong>{{ media.title }}</strong></h1>\n{% else %}\n<h1><strong>{{ media.name }}</strong></h1>\n{% endif %}\n<img class=\"img-fluid\" src=\"http://image.tmdb.org/t/p/w500{{ media.poster_path }}\">\n\n<h3><strong>Release Date</strong></h3>\n{% if media_type == 'movie' %}\n<p>{{ media.release_date }}</p>\n{% else %}\n<p>{{ media.first_air_date }}</p>\n{% endif %}\n<h3><strong>Overview</strong></h3>\n<p>{{ media.overview }}</p>\n\n\n{% endblock content %}"
},
{
"alpha_fraction": 0.5024875402450562,
"alphanum_fraction": 0.5870646834373474,
"avg_line_length": 21.33333396911621,
"blob_id": "eda4265ee5006047cb411e1c788109c72ee5e99e",
"content_id": "9f15ef968056bb29f5eb201885f113d2fce04486",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 402,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 18,
"path": "/tmdb/migrations/0007_mediaitem_poster_path.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-13 13:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tmdb', '0006_auto_20190713_1037'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mediaitem',\n name='poster_path',\n field=models.CharField(default='', max_length=500),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6526104211807251,
"alphanum_fraction": 0.6526104211807251,
"avg_line_length": 22.714284896850586,
"blob_id": "1e6bb6640efdb4177cd283b2c3ce48353529b9cb",
"content_id": "4fa194cf256d743ffad64be1f75b8eba40e0c70b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 21,
"path": "/main/urls.py",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "\"\"\"\nApplication specific URL handler\n\"\"\"\nfrom django.urls import path\n\nfrom cms.patterns import apps_dict, get_patterns\nfrom . import views\n\napps_dict['main'] = {\n 'app_name': 'main',\n 'filename': 'views'\n}\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='contact'),\n path('<str:media_type>/<int:media_id>/', views.detail, name='detail'),\n]\n\nurlpatterns += get_patterns(apps_dict=apps_dict)\n"
},
{
"alpha_fraction": 0.7338709831237793,
"alphanum_fraction": 0.7822580933570862,
"avg_line_length": 24,
"blob_id": "3f307cc63e10934a95ea6fbdb484b72ed2ac39c6",
"content_id": "5c6952c814fe59f241c4c040f17d7cb1de141f89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 5,
"path": "/.env.example",
"repo_name": "work-ed/PSU_Capstone",
"src_encoding": "UTF-8",
"text": "PYTHONWARNINGS='once'\nSECRET_KEY='<customkey>'\nDEBUG=True\nALLOWED_HOSTS=localhost, 127.0.0.1\nTHEMOVIEDBAPI_KEY='<customkey>'"
}
] | 37 |
frozen616/Autonomous_Vehicle_Design_Class
|
https://github.com/frozen616/Autonomous_Vehicle_Design_Class
|
36758301d80f4fcb01092417f73af1bedf7b2dc6
|
84b1a87496f5e1fb21c93d6cc1737fe45304a6ed
|
427e2b245eda016d1e80201365795f15f71ec630
|
refs/heads/master
| 2022-12-17T20:05:46.115386 | 2020-09-20T01:57:45 | 2020-09-20T01:57:45 | 296,955,133 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6295503377914429,
"alphanum_fraction": 0.6295503377914429,
"avg_line_length": 22.350000381469727,
"blob_id": "872ad95a8f8925c23abaf6409ee1de5bb50d814e",
"content_id": "cb753195edc2da8519089a45b8f63cb22abb6f5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 467,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 20,
"path": "/final/balancing_driving_bot/helper.cpp",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#include \"helper.h\"\n\n#include <Arduino.h> // Serial\n\nvoid printOffsetValues(const SensorData* offset)\n{\n Serial.print(\"offset values, xA: \");\n Serial.print(offset->x);\n Serial.print(\", yA: \");\n Serial.print(offset->y);\n Serial.print(\", zA: \");\n Serial.println(offset->z);\n\n Serial.print(\"offset values, xG: \");\n Serial.print(offset->xG);\n Serial.print(\", yG: \");\n Serial.print(offset->yG);\n Serial.print(\", zG: \");\n Serial.println(offset->zG);\n}\n"
},
{
"alpha_fraction": 0.5667372941970825,
"alphanum_fraction": 0.6097620725631714,
"avg_line_length": 22.41984748840332,
"blob_id": "0259eabd6fcacf2ff5b3c1a68591571add260f54",
"content_id": "8be2964d2870dcb1eb9c0dd788425678cbcf835d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 12272,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 524,
"path": "/HW3/p1/mympu6050.cpp",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#include <Arduino.h> // Serial\n#include <Wire.h>\n#include \"mympu6050.h\"\n#include <stdio.h>\n\n#define DEBUG\n\n\n\n\n#define SERIAL_DEBUG\n\n\n\n\nMyMPU6050::MyMPU6050(uint8_t i2cAddr)\n{\n devAddr = i2cAddr;\n}\n\n\nuint8_t MyMPU6050::getDeviceAddress()const\n{\n return devAddr;\n}\n\n\nuint8_t MyMPU6050::getConfigState(void)const\n{\n return readByte((uint8_t)MPU6050_RA_CONFIG); \n}\n\n\nuint8_t MyMPU6050::getAccelConfigState(void)const\n{\n return readByte((uint8_t)MPU6050_RA_ACCEL_CONFIG); \n}\n\n\nuint8_t MyMPU6050::getGyroConfigState(void)const\n{\n return readByte((uint8_t)MPU6050_RA_GYRO_CONFIG); \n}\n\n\nuint8_t MyMPU6050::getPowerManagement1State(void)const\n{\n return readByte(MPU6050_RA_PWR_MGMT_1);\n}\n\n/*\n * Wakes from sleep mode\n */\nvoid MyMPU6050::initialize(void)\n{\n setSleepEnabled(false); // wake up device\n}\n\n/*\n * Sets DEVICE_RESET bit to 1\n * All internal registers are reset to their default\n * values.\n * \n * DEVICE_RESET bit automatically clears to 0 once \n * the reset is complete.\n */\nvoid MyMPU6050::reset(void)\n{\n writeByte(MPU6050_RA_PWR_MGMT_1, 0x80);\n}\n\n/* Sets clock source\n * Parameter source\n * 0 Internal 8MHz oscillator\n * 1 PLL with X axis gyroscope reference\n * 2 PLL with Y axis gyroscope reference\n * 3 PLL with Z axis gyroscope reference\n * 4 PLL with external 32.768 kHz reference\n * 5 PLL with external 19.2 MHz reference\n * 6 reserved\n * 7 stops the clock and keeps the timing generator \n * in reset\n */\nvoid MyMPU6050::setClockSource(uint8_t source)\n{\n // clock select is bits 2:0\n writeBits(MPU6050_RA_PWR_MGMT_1, 2, 3, source);\n \n #ifdef SERIAL_DEBUG\n uint8_t b;\n b = readByte(MPU6050_RA_PWR_MGMT_1);\n\n Serial.println(F(\"\\nfunction: setClockSource\"));\n \n if ( (b & 0x07) == source)\n {\n Serial.println(F(\"success updating source\"));\n }\n else\n {\n Serial.print(F(\"error setting clock source, expected: \"));\n Serial.print(source);\n Serial.print(F(\", regVal: \"));\n Serial.println(b,HEX);\n }\n\n #endif\n}\n\n\n/* Sets accelerometer full scale range\n * \n * Parameter scale\n * 0 +- 2g\n * 1 +- 4g\n * 2 +- 8g\n * 3 +- 16g\n * \n */\nvoid MyMPU6050::setFullScaleAccelRange(uint8_t scale)\n{\n // full scale is bits 4:3\n writeBits(MPU6050_RA_ACCEL_CONFIG, 4, 2, scale);\n \n #ifdef SERIAL_DEBUG\n uint8_t b;\n b = readByte(MPU6050_RA_ACCEL_CONFIG);\n\n // full scale is bits 4:3\n uint8_t mask = 0x18; // b0001 1000\n \n Serial.println(F(\"\\nfunction:setFullScaleAccelRange\"));\n \n if( (b & mask)>>3 == scale)\n {\n Serial.print(F(\"Success setting accel scale to \"));\n Serial.println(scale, HEX); \n }\n else\n {\n Serial.print(F(\"error setting accel scale, expected: \"));\n Serial.print(scale);\n Serial.print(F(\", regVal: \"));\n Serial.println(b,HEX);\n }\n #endif \n\n}\n\n\n/* Sets gyroscope full scale range\n * \n * Parameter scale\n * 0 +- 250 deg/sec\n * 1 +- 500 deg/sec\n * 2 +- 1000 deg/sec\n * 3 +- 2000 deg/sec\n * \n */\nvoid MyMPU6050::setFullScaleGyroRange(uint8_t scale)\n{\n // full scale is bits 4:3\n writeBits(MPU6050_RA_GYRO_CONFIG, 4, 2, scale);\n \n #ifdef SERIAL_DEBUG\n uint8_t b;\n b = readByte(MPU6050_RA_GYRO_CONFIG);\n\n // full scale is bits 4:3\n uint8_t mask = 0x18; // b0001 1000\n \n Serial.println(F(\"\\nfunction setFullScaleGyroRange\"));\n \n if( (b & mask)>>3 == scale)\n {\n Serial.print(F(\"Success setting gyro scale to \"));\n Serial.println(scale, HEX); \n }\n else\n {\n Serial.print(F(\"error setting gyro scale, expected: \"));\n Serial.print(scale);\n Serial.print(F(\", regVal: \"));\n Serial.println(b,HEX);\n }\n #endif \n\n}\n\n\n/* Parameter\n * enabled - false wakes from sleep mode\n * - true puts device into sleep mode\n * \n * Register: Power Management 1\n * Bit 6: sleep bit \n * 1 - device is in low power sleep mode\n * 0 - device is not in sleep mode\n */\nvoid MyMPU6050::setSleepEnabled(bool enabled)\n{\n // sleep bit is 6\n writeBits(MPU6050_RA_PWR_MGMT_1, 6, 1, (uint8_t)enabled);\n\n #ifdef SERIAL_DEBUG\n uint8_t b;\n b = readByte(MPU6050_RA_PWR_MGMT_1);\n\n // sleep bit is 6\n uint8_t mask = 0x40; // b0100 0000\n\n //Serial.println(F(\"\\nfunction: setSleepEnabled\"));\n \n if( (b & mask)>>6 == enabled)\n {\n //Serial.print(F(\"Success setting sleep to \"));\n //Serial.println(enabled, HEX); \n }\n else\n {\n Serial.print(F(\"error setting sleep bit, expected: \"));\n Serial.print(enabled);\n Serial.print(F(\", regVal: \"));\n Serial.println(b,HEX);\n }\n #endif \n}\n\n\n/* WHO_AM_I register stores the upper 6 bits of MPU-60X0's\n * 7-bit I2C address. The least significant bit is determined\n * by the vaue of the AD0 pin, which is not reflected in this\n * register.\n * \n * Returns address stored in WHO_AM_I register, 0x68\n * bits 0 and 7 are hard coded to 0 \n */\nuint8_t MyMPU6050::testConnection(void)\n{\n return readByte((uint8_t)MPU6050_RA_WHO_AM_I);\n}\n\n\n/* Reads accel, temperature and gyro measurement registers.\n * \n * Parameter\n * output: sd \n * If all measurement bytes are read, the sd is populated with\n * measurement data.\n * If not all bytes are read, then no data is stored in sd.\n * \n * Returns number of bytes read\n */\nuint8_t MyMPU6050::readAllData(struct SensorData* sd)\n{\n uint8_t buf[MPU6050_ALL_MEASUREMENT_BYTES];\n uint8_t bytesRead;\n bytesRead = readBytes(MPU6050_RA_ACCEL_XOUT_H, buf, (uint8_t)MPU6050_ALL_MEASUREMENT_BYTES);\n if(bytesRead == (uint8_t)MPU6050_ALL_MEASUREMENT_BYTES)\n {\n // first byte read from high order register, second byte from low\n // form 16 bit value from 8 bit values\n sd->accelX = (((int16_t)buf[0]) << 8) | buf[1];\n sd->accelY = (((int16_t)buf[2]) << 8) | buf[3];\n sd->accelZ = (((int16_t)buf[4]) << 8) | buf[5];\n sd->temperature = (((int16_t)buf[6]) << 8) | buf[7];\n sd->gyroX = (((int16_t)buf[8]) << 8) | buf[9];\n sd->gyroY = (((int16_t)buf[10]) << 8) | buf[11];\n sd->gyroZ = (((int16_t)buf[12]) << 8) | buf[13];\n }\n\n return bytesRead;\n}\n\n\n/* Reads one byte from parameter-specified register address\n * \n * regAddr - register address to read \n *\n * Returns byte read\n */\nuint8_t MyMPU6050::readByte(uint8_t regAddr)const\n{\n uint8_t data;\n Wire.beginTransmission(devAddr);\n Wire.write(regAddr);\n Wire.endTransmission(false); // send TX buffer, send restart to keep connection alive\n Wire.requestFrom(devAddr, (uint8_t)1); // request 1 byte, default true sends stop message after request,\n // releasing i2c bus\n data = Wire.read();\n return data;\n}\n\n\n/* Reads bytes from register and stores them in buf.\n * \n * Parameters\n * regAddr - register address to read\n * buf - storage for bytes read\n * count - number of bytes to read\n * \n * Returns number of bytes read\n */\nuint8_t MyMPU6050::readBytes(uint8_t regAddr, uint8_t *buf, uint8_t count)\n{\n uint8_t i = 0;\n \n Wire.beginTransmission(devAddr);\n Wire.write(regAddr);\n Wire.endTransmission(false); // send TX buffer, send restart to keep connection alive\n Wire.requestFrom(devAddr, count);\n \n while(Wire.available() && i < count)\n {\n buf[i] = Wire.read();\n ++i;\n }\n \n return i;\n}\n\n/* Write multiple bits to an 8-bit device register\n*\n* regAddr - register address\n* leftBit - leftmost bit to write (7-0)\n* numBits - number of bits to write\n* data - right-aligned value to write\n*/\nvoid MyMPU6050::writeBits(uint8_t regAddr, uint8_t leftBit, uint8_t numBits, uint8_t data)\n{\n // \n // 76543210 bit positions\n // xx arguments: leftBit = 4, length = 2\n // 00000110 bit mask \n\n uint8_t regVal, mask;\n\n // get current register values\n regVal = readByte(regAddr);\n \n \n // shifts 1 to bit position length value\n // subtracting 1 ensures all bits to the right are 1's\n // and all other bits are 0's \n // Example: length = 4\n // 1 << numBits produces 0001 0000 \n // (1 << numBits) - 1 produces 0000 1111 \n mask = (1 << numBits) - 1;\n \n // shift the 1's to the left and zero fill\n // Example: leftBit is 5\n // 5 - 4 + 1 = 2, 0000 1111 << 2 becomes 0011 1100\n // Now have a bit mask of length 4 with leftmost bit position 5 \n mask = mask << (leftBit - numBits + 1);\n\n // shift data to correct position\n // example: data is 3, 0000 0011\n // shifting 5-4+1 = 2 to the left produces 0000 1100\n data = data << (leftBit - numBits + 1);\n\n // zero bits of interest in existing register value \n regVal &= ~(mask); \n // update register bits with data value \n regVal |= data;\n\n writeByte(regAddr, regVal);\n}\n\n\n\n/*\n * Writes one byte to specified register address.\n * \n * regAddr - register address\n * data - byte to write\n */\nvoid MyMPU6050::writeByte(uint8_t regAddr, uint8_t data)\n{\n Wire.beginTransmission(devAddr); \n Wire.write(regAddr); \n Wire.write(data);\n Wire.endTransmission(true); // transmits bytes that were queued by write\n // sends stop message after transmission, releases i2c bus\n}\n\n\n\n\n/*\n * finds the mean value for each sensor data output. \n * \n * \n */\nvoid MyMPU6050::mean_calc(struct SensorData* sd, struct mean_values* mv, struct offset_values* ov){\n \n int i = 0;\n long buff_ax=0, buff_ay=0, buff_az=0, buff_gx=0, buff_gy=0, buff_gz=0;\n\n\n\n\n while (i<(buffersize+101)){\n \n\n if((i>100) && (i<=(buffersize+100))){\n\n \n buff_ax = buff_ax + (sd->accelX) + (ov->ax_offset);\n buff_ay = buff_ay + (sd->accelY) + (ov->ay_offset);\n buff_az = buff_az + (sd->accelZ) + (ov->az_offset);\n buff_gx = buff_gx + (sd->gyroX) + (ov->gx_offset);\n buff_gy = buff_gy + (sd->gyroY) + (ov->gy_offset);\n buff_gz = buff_gz + (sd->gyroZ) + (ov->gz_offset);\n \n \n }\n\n //int mean_ax, mean_ay, mean_az, mean_gx, mean_gy, mean_gz\n if(i==(buffersize+100)){\n mv->mean_ax = (buff_ax/buffersize);\n mv->mean_ay = (buff_ay/buffersize);\n mv->mean_az = buff_az/buffersize;\n mv->mean_gx = buff_gx/buffersize;\n mv->mean_gy = buff_gy/buffersize;\n mv->mean_gz = buff_gz/buffersize;\n\n \n\n \n }\n i++;\n delay(2); //needed to avoid repeated values\n }\n \n}\n\n\nvoid MyMPU6050::calibration(struct SensorData* sd, struct offset_values* ov, struct mean_values* mv){\n ov->ax_offset = -(mv->mean_ax)/8;\n ov->ay_offset = -(mv->mean_ay)/8;\n ov->az_offset = (16384 -(mv->mean_az))/8;\n\n ov->gx_offset = -(mv->mean_gx)/4;\n ov->gy_offset = -(mv->mean_gy)/4;\n ov->gz_offset = -(mv->mean_gz)/4;\n\n\n\n int loop_iteration = 0;\n while(1){\n \n int ready=0;\n mean_calc(sd, mv, ov);\n\n if (abs(mv->mean_ax)<=acel_deadzone) { ready++;}\n else ov->ax_offset = ov->ax_offset - mv->mean_ax/acel_deadzone; //ax offset = ax offset - \n\n \n if (abs(mv->mean_ay)<=acel_deadzone){ ready++;}\n else ov->ay_offset = ov->ay_offset - mv->mean_ay/acel_deadzone;\n \n \n if (abs(16384-mv->mean_az)<=acel_deadzone) {ready++;}\n else ov->az_offset = ov->az_offset + (16384-mv->mean_az)/acel_deadzone;\n\n\n if (abs(mv->mean_gx)<=giro_deadzone) { ready++;}\n else ov->gx_offset = ov->gx_offset - mv->mean_gx/(giro_deadzone+1);\n\n\n if (abs(mv->mean_gy)<=giro_deadzone){ ready++;}\n else ov->gy_offset = ov->gy_offset - mv->mean_gy/(giro_deadzone+1);\n\n\n if (abs(mv->mean_gz)<=giro_deadzone){ ready++;}\n else ov->gz_offset = ov->gz_offset - mv->mean_gz/(giro_deadzone+1);\n\n #ifdef DEBUG \n\n\n Serial.print(mv->mean_ax);\n Serial.print(\":\");\n Serial.print(mv->mean_ay);\n Serial.print(\":\");\n Serial.print(mv->mean_az);\n Serial.print(\":\");\n Serial.print(mv->mean_gx);\n Serial.print(\":\");\n Serial.print(mv->mean_gy);\n Serial.print(\":\");\n Serial.print(mv->mean_gz);\n Serial.print(\":\");\n\n\n Serial.print(ov->ax_offset);\n Serial.print(\":\");\n Serial.print(ov->ay_offset);\n Serial.print(\":\");\n Serial.print(ov->az_offset);\n Serial.print(\":\");\n Serial.print(ov->gx_offset);\n Serial.print(\":\");\n Serial.print(ov->gy_offset);\n Serial.print(\":\");\n Serial.print(ov->gz_offset);\n Serial.print(\":\");\n\n loop_iteration+=1 ;\n \n Serial.println(loop_iteration);\n \n\n #endif \n \n\n \n if (ready==6) \n {\n break;\n }\n \n }\n\n}\n"
},
{
"alpha_fraction": 0.6982248425483704,
"alphanum_fraction": 0.7218934893608093,
"avg_line_length": 14.363636016845703,
"blob_id": "19f53347a234bcdbe78cf16bf93171ce972433a1",
"content_id": "c40e7df30a68942794deb8561c53f97537476d7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 169,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 11,
"path": "/final/balancing_driving_bot_2/helper.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef HELPER_H_MOTOR_CONTROL\n#define HELPER_H_MOTOR_CONTROL\n\n#include \"mpu6050.h\" // SensorData\n\n\nvoid printOffsetValues(const SensorData* offset);\n\n\n\n#endif\n"
},
{
"alpha_fraction": 0.7164179086685181,
"alphanum_fraction": 0.7164179086685181,
"avg_line_length": 7.375,
"blob_id": "ce2e99a309862a00f324095fb68e5a1bfeef3f56",
"content_id": "62b0db607e844d9488fbea14b8757061344a5f1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 8,
"path": "/final/balancing_driving_bot/voltage.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef VOLTAGE_H\n#define VOLTAGE_H\n\n\n\nvoid voltageInit();\n\n#endif\n"
},
{
"alpha_fraction": 0.587691068649292,
"alphanum_fraction": 0.6359614133834839,
"avg_line_length": 29.9375,
"blob_id": "356a35ac3b1ce453da0071a43f96288182359a50",
"content_id": "75e33e0aa79fbc3f9fc16f9efc92bc70fd335869",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2486,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 80,
"path": "/HW4/Python/HW_4_part_2.py",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "from vpython import *\nimport numpy as np\nfrom time import *\nimport math\nimport serial\n\ndef serialConnect(portName, baudRate):\n try: \n ser = serial.Serial(portName, baudRate)\n print(\"opened port \" + ser.name + '\\n')\n # give Arduino time to reset\n sleep(2)\n # flush input buffer, discarding all contents\n ser.reset_input_buffer()\n return ser \n except serial.SerialException:\n raise IOError(\"problem connecting to \" + portName)\n\nscene.rang=5\ntoRad=2*np.pi/360\ntoDeg=1/toRad\nscene.forward-vector(-1,-1,-1)\n\nscene.width = 900\nscene.height = 600\n\nif __name__ == '__main__':\n\n portName = \"/dev/ttyUSB0\"\n ser = serialConnect(portName,115200)\n sleep(2)\n\n # flush input buffer, discarding all contents\n ser.reset_input_buffer()\n\n frame=box(length =4, width = 5, height=5, color=color.purple,opacity=.3)\n axel=cylinder(axis=vector(0,0,1),pos=vector(0,-2.6,-3),length =6,radius=.3,color=color.yellow,opacity=.3)\n wheel1=cylinder(axis=vector(0,0,1),pos=vector(0,-2.6,2.5),length = 1,radius=1, color=color.blue, opacity=.3)\n wheel2=cylinder(axis=vector(0,0,1),pos=vector(0,-2.6,-3.5),length=1, radius=1, color=color.blue, opacity=.3)\n Xarrow=arrow(axis=vector(1,0,0),length=16,shaftwidth=.1,color=color.red)\n Yarrow=arrow(axis=vector(0,1,0),length=16,shaftwidth=.1,color=color.green)\n Zarrow=arrow(axis=vector(0,0,1),length=16,shaftwidth=.1,color=color.blue)\n\n leftArrow=arrow(axis=vector(1,0,0),length=16,shaftwidth=.1,color=color.red)\n topArrow=arrow(axis=vector(0,1,0),length=16,shaftwidth=.1,color=color.green)\n frontArrow=arrow(axis=vector(0,0,1),length=16,shaftwidth=.1,color=color.blue)\n\n robotObj = compound([frame,axel,wheel1,wheel2])\n j=0\n\n while True:\n\n while (ser.inWaiting() == 0):\n pass\n\n arduinoString = ser.readline().decode(\"utf-8\")\n dataArray = arduinoString.split(',')\n\n roll=float(dataArray[6])*toRad*-1\n pitch=float(dataArray[5])*toRad*-1\n yaw=float(dataArray[4])*toRad*-1\n\n \n\n rate(50)\n k=vector(cos(yaw)*cos(pitch), sin(pitch), sin(yaw)*cos(pitch))\n y=vector(0,1,0)\n s=cross(k,y)\n v=cross(s,k)\n vrot=v*cos(roll)+cross(k,v)*sin(roll)\n\n frontArrow.axis=s\n leftArrow.axis=k\n topArrow.axis=-vrot\n leftArrow.length=16\n frontArrow.length=16\n topArrow.length=-16\n\n robotObj.axis=k\n robotObj.up=vrot\n\n\n\n \n\n\n\n"
},
{
"alpha_fraction": 0.783687949180603,
"alphanum_fraction": 0.783687949180603,
"avg_line_length": 20.69230842590332,
"blob_id": "a4c54d8c1f49cc5ae8979a386754c60f197095f0",
"content_id": "c35bbe55a8da03f503c2e81206ff0023db833ea2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 13,
"path": "/final/balancing_driving_bot/PID.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef PID_H_MOTOR_CONTROL\n#define PID_H_MOTOR_CONTROL\n\nfloat PID_balance(float desiredAngle, float presentAngle);\n\nfloat PID_left_motor(float left_desired_speed, float left_measured_speed);\n\nfloat PID_right_motor(float right_desired_speed, float right_measured_speed);\n\n\n\n\n#endif\n"
},
{
"alpha_fraction": 0.7182662487030029,
"alphanum_fraction": 0.7368420958518982,
"avg_line_length": 22.846153259277344,
"blob_id": "bcbd5191d2d6d38a5128dc92e3a1d510b9b334ab",
"content_id": "bfc973a46df8ce9256faeacff10503919814b56c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 323,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 13,
"path": "/HW4/c/helper.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef HELPER_H_LESSON9\r\n#define HELPER_H_LESSON9\r\n\r\n#include \"mpu6050.h\" // SensorData\r\n\r\n\r\nvoid printOffsetValues(const SensorData* offset);\r\nvoid printSensorData(const SensorData* data);\r\nvoid printAngleData(const AngleData* data, int decimalPlaces);\r\nvoid printAngles(double roll, double pitch);\r\n\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.5453894138336182,
"alphanum_fraction": 0.5758281350135803,
"avg_line_length": 26.51231575012207,
"blob_id": "74c1574fdd069194319e5cfbc4b8b94158e97f69",
"content_id": "ee71c86245febf79f005499d8d77c6c855307edd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5594,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 203,
"path": "/HW4/c/HW_4.ino",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "/* \n * Homework 4\n * Programmer: Tyler Angus\n * \n * Tilt sensing of roll, pitch, yaw angle from gryo data and pitch and roll from the accelerometer\n * \n * \n * Setup\n * MPU-6050 - default configuration \n * i2c clock frequency 400000\n * Serial baud rate 115200\n * \n * \n * \n */\n#include <Arduino.h>\n#include <Wire.h>\n#include \"mpu6050.h\"\n#include \"helper.h\"\n\n\n//#define DEBUG // turn on for all debug info\n//#define PRINT_ALL_GYRO // prints all gyro related info\n//#define PRINT_POSITION // prints gyro position estimate, summed over time \n#define PLOT_POSITION // turn on for serial plotter\n#define PLOT_ROLL // turn on for serial plotter\n#define PLOT_PITCH\n\n#define SAMPLE_INTERVAL_MS 30 // milliseconds\n\n\n// Initialize Serial, I2C, and mpu6050\nvoid setup(void)\n{\n Wire.begin();\n Wire.setClock(400000L);\n Serial.begin(115200);\n delay(1000);\n setupMPU6050();\n \n #ifdef DEBUG\n Serial.println(F(\"setup complete\"));\n #endif\n}\n\n\n\nint main(void)\n{\n SensorData offset, gyroRate, accel;\n AngleData gyroPosition = {0.0, 0.0, 0.0};\n AngleData deltaPosition; // change in position\n unsigned long startTime, elapsedTimems;\n\n int sampleCount = 0;\n\n // xyz specifies rotation order\n double roll, pitch; // roll rotation about x, pitch rotation about y\n double rollF = 0.0, pitchF = 0.0; // filtered roll and pitch from accel\n double gyro_xf = 0.0, gyro_yf = 0.0, gyro_zf = 0.0; //filtered gyro x y and z \n \n init(); // Arduino function to initialize timers, pwm, other hardware\n setup();\n \n #ifdef DEBUG\n Serial.println(F(\"ready to calibrate\"));\n #endif\n\n calibrateAccelerometer(&offset, 10);\n calibrateGyro(&offset, 10);\n\n #ifdef DEBUG\n Serial.println(F(\"calibration complete\"));\n printOffsetValues(&offset);\n delay(1000);\n #endif\n \n startTime = millis();\n while(1)\n {\n if( (elapsedTimems = (millis() - startTime)) >= SAMPLE_INTERVAL_MS)\n {\n readAccelerometer(&accel);\n readGyro(&gyroRate);\n startTime = millis(); \n ++sampleCount;\n\n #ifdef DEBUG\n Serial.println(\"before offset\");\n printSensorData(&gyroRate);\n #endif\n \n gyroRate.x += offset.x; // apply offset\n gyroRate.y += offset.y;\n gyroRate.z += offset.z;\n\n accel.x += offset.x; // apply offset\n accel.y += offset.y;\n accel.z += offset.z;\n\n // Freescale, equations 25 and 26, Rotation order Rxyz\n // roll may vary from -pi to +pi, use atan2\n pitch = atan2(accel.y, accel.z) * 180.0/PI;\n // pitch is restricted to -pi/2 to +pi/2, use atan\n roll = atan(-accel.x / sqrt(pow(accel.y,2) + pow(accel.z,2))) * 180.0/PI;\n \n #ifdef DEBUG\n Serial.println(\"Rotation xyz\");\n printAngles(roll, pitch);\n Serial.println(\"\");\n #endif\n \n #ifdef DEBUG\n Serial.println(\"after offset\");\n printSensorData(&gyroRate);\n #endif\n\n // apply low pass filter\n rollF = 0.94 * rollF + 0.06 * roll;\n pitchF = 0.94 * pitchF + 0.06 * pitch;\n \n #ifdef PLOT_ROLL\n \n //Serial.print(roll);\n //Serial.print(\",\");\n Serial.print(rollF);\n #endif\n\n #ifdef PLOT_PITCH\n \n //Serial.print(pitch);\n Serial.print(\",\");\n Serial.print(pitchF);\n #endif\n\n #ifdef PRINT_ALL_GYRO\n Serial.println(\"gyro rate, raw data\");\n printSensorData(&gyroRate);\n #endif\n\n // scale the data to deg/s and calculate change in position\n deltaPosition.x = (double)gyroRate.x / 131.0 * elapsedTimems / 1000.0;\n deltaPosition.y = (double)gyroRate.y / 131.0 * elapsedTimems / 1000.0;\n deltaPosition.z = (double)gyroRate.z / 131.0 * elapsedTimems / 1000.0;\n\n #ifdef PRINT_ALL_GYRO\n Serial.println(\"change in position, deg\");\n printAngleData(&deltaPosition, 4);\n #endif\n\n gyroPosition.x += deltaPosition.x;\n gyroPosition.y += deltaPosition.y;\n gyroPosition.z += deltaPosition.z;\n\n\n #ifdef PRINT_ALL_GYRO\n Serial.println(\"gyro position, deg\");\n printAngleData(&gyroPosition,4);\n #endif\n \n #ifdef PRINT_POSITION\n if( sampleCount % 100 == 0 )\n {\n Serial.println(\"gyro position, deg\");\n printAngleData(&gyroPosition,4);\n sampleCount = 0;\n }\n #endif\n //θ[n] = α * (θ[n-1] + θdotgyro[n] * δt )\n\n//The general form of a high-pass filter equation is y[i] = α y[i-1] + α (x[i] - x[i-1])\n//where y is the filtered output and x[i] - x[i-1] is the change in input.\n//The integration θdotgyro[n] * δt is the gyro displacement, the equivalent to the x[i] - x[i-1] term. \n//rollF = 0.94 * rollF + 0.06 * roll;\n\n gyro_xf = (0.94*gyro_xf) + (0.94*(deltaPosition.x));\n gyro_yf = (0.94*gyro_yf) + (0.94*(deltaPosition.y));\n gyro_zf = (0.94*gyro_zf) + (0.94*(deltaPosition.z));\n \n #ifdef PLOT_POSITION\n \n Serial.print(\",\");\n Serial.print(gyroPosition.x);\n Serial.print(\",\");\n Serial.print(gyroPosition.y);\n Serial.print(\",\");\n Serial.print(gyroPosition.z);\n Serial.print(\",\");\n //add up the high pass and low pass filtered data for pitch and roll \n //creates the complementary filter\n Serial.print(gyro_xf + pitchF);\n Serial.print(\",\");\n Serial.println(gyro_yf + rollF);\n #endif\n\n\n }\n }\n \n \n return 0;\n \n}\n"
},
{
"alpha_fraction": 0.5608429908752441,
"alphanum_fraction": 0.5933766961097717,
"avg_line_length": 28.504297256469727,
"blob_id": "fa57e9c4ae588eb1e4bf3c0865531b9b6be986a1",
"content_id": "b886109393a00a6f4e439d50ec55fbaef0a995d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 10306,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 349,
"path": "/final/balancing_driving_bot_2/balancing_driving_bot_2.ino",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "/*\n * Program: Balancing the robot kit \n * Programer: Tyler Angus \n * \n * Purpose: The purpose of this program is to balance the robot kit by\n * using a PID control system. The program takes the raw data from the \n * mpu 6050 filters it and then uses a PID to create a balanced robot. \n * \n * \n * \n * \n */\n\n\n#include <Arduino.h>\n#include <Wire.h>\n#include \"mpu6050.h\"\n#include \"helper.h\"\n#include \"pins.h\"\n#include \"motor.h\"\n#include \"PID.h\"\n#include \"voltage.h\"\n#include <util/atomic.h>\n\n//#define DEBUG \n#define DEBUG_BALANCE \n//#define Calibrate\n#define NoCalibrate\n#define SAMPLE_INTERVAL_MS 5 // 5 miliseconds 200hz\n//#define PLOT\n//#define PLOT_PID_MOTOR\n//#define DEBUG_OUTPUT\n\n// Global Constants\nconst unsigned long ENCODER_SAMPLE_INTERVAL = 100UL; // units, milliseconds\nfloat speed_calc = 1000.000/float(ENCODER_SAMPLE_INTERVAL);\n\n// Global Variables\nvolatile unsigned long leftEncoderCount = 0;\nvolatile unsigned long rightEncoderCount = 0;\n\n\n/* Interrupt Service Routine\n * updates left encoder count\n */\nvoid leftEncoderISR(void)\n{\n leftEncoderCount++;\n}\n\nISR(PCINT2_vect)\n{\n rightEncoderCount++;\n}\n\n// Initialize Serial, I2C, and mpu6050\nvoid setup(void)\n{\n //set up interupt for left encoder\n pinMode(LEFT_ENCODER_A_PIN, INPUT);\n attachInterrupt(digitalPinToInterrupt(LEFT_ENCODER_A_PIN), leftEncoderISR, CHANGE);\n\n \n Wire.begin();\n Wire.setClock(400000L);\n Serial.begin(115200);\n setupMPU6050();\n voltageInit();\n // setup for a pin change interupt \n cli();//turn off interupts \n PCICR |= 0b00000100; //enables port D pin change interrupts \n PCMSK2 |= 0b00010000; //PCINT20\n sei();//enable interupts \n delay(4000);\n \n #ifdef DEBUG\n Serial.println(F(\"setup complete\"));\n #endif\n}\n\n\n\nint main(void)\n{\n \n SensorData offset, gyroRate, accel;\n AngleData gyroPosition = {0.0, 0.0, 0.0};\n unsigned long startTime, startTimeEncoder, dTEncoder, elapsedTime;\n AngleData deltaPosition; // change in position\n int count = 0;\n unsigned long encoder_count_left[32] = {0};\n unsigned long encoder_count_right[32] = {0};\n unsigned long elapsed_time[32] = {0};\n float battery_voltage[32] = {0};\n int increment_count[32] = {0};\n\n float left_PID_out = 0.0;\n float right_PID_out = 0.0;\n float left_desired_speed = 127.0;\n float right_desired_speed = 127.0;\n float left_measured_speed = 0.0;\n float right_measured_speed = 0.0;\n\n float mean_speed = 0.0;\n float desired_speed = 0.0;\n float PID_speed_output;\n\n\n\n \n\n\n float presentAngle;\n float desiredAngle = 0.0;//85.45;\n float motorOutput = 0.0;\n\n \n\n\n // xyz specifies rotation order\n double roll, pitch; // roll rotation about x, pitch rotation about y\n double rollF = 0.0, pitchF = 0.0; // filtered roll and pitch from accel\n double gyro_xf = 0.0, gyro_yf = 0.0, gyro_zf = 0.0; //filtered gyro x y and z \n \n init(); // Arduino function to initialize timers, pwm, other hardware\n initMotors();\n setup();\n \n #ifdef DEBUG\n Serial.println(F(\"ready to calibrate\"));\n #endif\n\n #ifdef Calibrate\n calibrateAccelerometer(&offset, 1000);\n calibrateGyro(&offset, 1000);\n #endif\n\n #ifdef NoCalibrate\n offset.xG = 206;\n offset.yG = -185;\n offset.zG = 113;\n offset.x = -416;\n offset.y = 737;\n offset.z = 2235; \n #endif\n\n #ifdef DEBUG\n Serial.println(F(\"calibration complete\"));\n printOffsetValues(&offset);\n delay(1000);\n #endif\n \n startTime = millis();\n startTimeEncoder = millis();\n\n\n \n \n while(1)\n {\n\n\n \n if( (elapsedTime = (millis() - startTime)) >= SAMPLE_INTERVAL_MS)\n {\n readAccelerometer(&accel);\n readGyro(&gyroRate);\n startTime = millis(); \n\n\n\n \n gyroRate.x += offset.xG; // apply offset\n gyroRate.y += offset.yG;\n gyroRate.z += offset.zG;\n\n accel.x += offset.x; // apply offset\n accel.y += offset.y;\n accel.z += offset.z;\n\n // Freescale, equations 25 and 26, Rotation order Rxyz\n // roll may vary from -pi to +pi, use atan2\n pitch = atan2(accel.y, accel.z) * 180.0/PI;\n // pitch is restricted to -pi/2 to +pi/2, use atan\n roll = atan(-accel.x / sqrt(pow(accel.y,2) + pow(accel.z,2))) * 180.0/PI;\n \n \n // apply low pass filter\n rollF = 0.94 * rollF + 0.06 * roll;\n pitchF = 0.94 * pitchF + 0.06 * pitch;\n \n\n // scale the data to deg/s and calculate change in position\n deltaPosition.x = (double)gyroRate.x / 131.0 * elapsedTime / 1000.0;\n deltaPosition.y = (double)gyroRate.y / 131.0 * elapsedTime / 1000.0;\n deltaPosition.z = (double)gyroRate.z / 131.0 * elapsedTime / 1000.0;\n\n gyroPosition.x += deltaPosition.x;\n gyroPosition.y += deltaPosition.y;\n gyroPosition.z += deltaPosition.z;\n\n //θ[n] = α * (θ[n-1] + θdotgyro[n] * δt )\n\n //The general form of a high-pass filter equation is y[i] = α y[i-1] + α (x[i] - x[i-1])\n //where y is the filtered output and x[i] - x[i-1] is the change in input.\n //The integration θdotgyro[n] * δt is the gyro displacement, the equivalent to the x[i] - x[i-1] term. \n //rollF = 0.94 * rollF + 0.06 * roll;\n\n gyro_xf = (0.94*gyro_xf) + (0.94*(deltaPosition.x));\n gyro_yf = (0.94*gyro_yf) + (0.94*(deltaPosition.y));\n gyro_zf = (0.94*gyro_zf) + (0.94*(deltaPosition.z));\n \n presentAngle = (gyro_xf + pitchF);//using the angle given by the complementary filter and adding 90 degrees.\n\n #ifdef DEBUG_BALANCE\n Serial.print(\"presentAngle=\");\n Serial.print(presentAngle);\n Serial.print(\"\\t\");\n #endif\n \n PID_speed_output = PID_speed(desired_speed, mean_speed);\n\n motorOutput = PID_balance(desiredAngle, presentAngle, PID_speed_output); //calls the PID function and sets the output to motorOutput. \n\n left_desired_speed = motorOutput;\n right_desired_speed = motorOutput;\n \n left_PID_out = PID_left_motor(left_desired_speed, left_measured_speed);\n right_PID_out = PID_right_motor(right_desired_speed, right_measured_speed);\n \n balanceBot(left_PID_out, right_PID_out); //balance bot uses motorOutput to know when to drive forward or backword\n\n \n ///The below if statements take care of the speed measurement if the wheels are going backwards.\n if(left_PID_out<0) {\n left_measured_speed = left_measured_speed * -1;\n }\n\n if(right_PID_out>0) {\n \n }\n \n if(right_PID_out<0) {\n right_measured_speed = right_measured_speed * -1;\n }\n\n #ifdef DEBUG_BALANCE\n Serial.print(\"motorOutput=\");\n Serial.println(motorOutput);\n #endif\n\n #ifdef PLOT\n Serial.print(motorOutput);\n Serial.println(\"\");\n #endif \n \n \n \n \n\n\n }\n\n if( (millis() - startTimeEncoder) >= ENCODER_SAMPLE_INTERVAL)\n {\n //keeps the time and stores it in the array\n dTEncoder = millis() - startTimeEncoder;\n elapsed_time[count] = {dTEncoder}; \n startTimeEncoder = millis();\n \n ATOMIC_BLOCK(ATOMIC_RESTORESTATE) {\n // code with interrupts blocked (consecutive atomic operations will not get interrupted)\n\n //set the counts and voltage into the arrays created \n increment_count[count] = count; \n encoder_count_left[count] = leftEncoderCount;\n encoder_count_right[count] = rightEncoderCount;\n battery_voltage[count] = (analogRead(VOL_MEASURE_PIN) * 1.1 / 1024) * ((10 + 1.5) / 1.5);\n left_measured_speed = ((float(encoder_count_left[count])/782.000)*speed_calc*0.210);//this converts the count into velocity the .21 is circumference of the wheel\n right_measured_speed = ((float(encoder_count_right[count])/782.000)*speed_calc*0.210);//right motor velocity conversion\n \n\n }\n\n \n\n #ifdef DEBUG\n Serial.print(\"increment_count=\");\n Serial.print(increment_count[count]); \n Serial.print(\" elapsed time[ms]=\"); \n Serial.println(elapsed_time[count]); \n Serial.print(\" encoder count left=\");\n Serial.print(encoder_count_left[count]); \n Serial.print(\" encoder count right=\");\n Serial.print(encoder_count_right[count]); \n Serial.print(\" Voltage=\");\n Serial.println(battery_voltage[count]);\n Serial.print(\"velocity of left motor = \");\n Serial.print(left_measured_speed, 4); \n Serial.print(\"\\tvelocity of right motor = \");\n Serial.println(right_measured_speed);\n Serial.print(\"distance traveled left motor = \");\n Serial.print((encoder_count_left[count]/782.0)*0.21); \n Serial.print(\"\\tdistance traveled right motor = \");\n Serial.println((encoder_count_right[count]/782.0)*0.21);\n \n #endif \n \n \n left_measured_speed = (left_measured_speed / 0.70) * (255.0); //.7m/s is the max velocity of the left motor and 255 is the max char that can be sent to the motor. \n right_measured_speed = (right_measured_speed / 0.71) * (255.0);//.71m/s is the max velocity of the right motor during testing\n\n \n\n #ifdef DEBUG\n Serial.print(\"left_measured_speed = \");\n Serial.print(left_measured_speed, 6); //prints the number with 6 decimal points\n Serial.print(\" right_measured_speed = \");\n Serial.println(right_measured_speed, 6);\n #endif\n \n mean_speed = ((left_measured_speed + right_measured_speed)/ 2);\n\n \n #ifdef DEBUG_OUTPUT\n Serial.print(left_PID_out);\n Serial.println(right_PID_out);\n #endif\n \n \n balanceBot(left_PID_out, right_PID_out); \n \n #ifdef DEBUG\n Serial.println(\"=============================================================\");\n #endif\n\n count++;\n leftEncoderCount = 0UL; // 0 is type int, 0UL is type unsigned long\n rightEncoderCount = 0UL;\n\n }\n \n \n }\n return 0;\n \n\n \n}\n"
},
{
"alpha_fraction": 0.513095498085022,
"alphanum_fraction": 0.5427923202514648,
"avg_line_length": 22.425121307373047,
"blob_id": "5067f76967e5d478d0167f81f966d635e05b0fb4",
"content_id": "7c931c7c296cb68b213359c66d5805ef76d8dff2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4849,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 207,
"path": "/HW5/velocity_test/Velocity_test.ino",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "/*\n * \n * Program: testing robot encoder counts\n * Program purpose: utilizing interupts count the encoder iteration and organize that data then transmit it serially \n * Test the encoder count at different speeds and transmit the voltage of the battery. \n * \n * \n */\n\n\n\n\n\n\n\n\n#include <Arduino.h>\n#include \"pins.h\"\n#include \"motor.h\"\n#include <util/atomic.h>\n\n\n// Global Constants\nconst unsigned long ENCODER_SAMPLE_INTERVAL = 500UL; // units, milliseconds\n\n// Global Variables\nvolatile unsigned long leftEncoderCount = 0;\nvolatile unsigned long rightEncoderCount = 0;\n\n\n\n/* Interrupt Service Routine\n * updates left encoder count\n */\nvoid leftEncoderISR(void)\n{\n leftEncoderCount++;\n}\n/*\n * initializes the voltage to be taken \n */\nvoid voltageInit()\n{\n analogReference(INTERNAL);\n}\n\n/* Interrupt Service Routine\n * updates left encoder count\n */\n\nISR(PCINT2_vect)\n{\n rightEncoderCount++;\n}\n\nISR(PCINT0_vect)\n {\n rightEncoderCount++;\n }\n\n \nvoid mysetup(void)\n{\n pinMode(LEFT_ENCODER_A_PIN, INPUT);\n\n attachInterrupt(digitalPinToInterrupt(LEFT_ENCODER_A_PIN), leftEncoderISR, CHANGE);\n voltageInit();\n // setup for a pin change interupt \n cli();//turn off interupts \n PCICR |= 0b00000100; //enables port D pin change interrupts \n PCMSK2 |= 0b00010000; //PCINT20\n sei();//enable interupts \n \n}\n\nint main(void){\n\n unsigned char motorSpeed = 64; // This is 25% duty cycle or 25% of 255\n\n unsigned long startTime, dTEncoder;\n int count = 0;\n int i = 0;\n unsigned long encoder_count_left[32] = {0};\n unsigned long encoder_count_right[32] = {0};\n unsigned long elapsed_time[32] = {0};\n float battery_voltage[32] = {0};\n int increment_count[32] = {0};\n \n \n init();\n mysetup(); \n \n Serial.begin(115200);\n initMotors();\n\n\n \n while(1)\n {\n if(count == 0){\n driveForward(motorSpeed); \n delay(2000); //ensure that motor gets up to speed \n startTime = millis();\n }\n\n \n \n\n while(count < 32){\n if( (millis() - startTime) >= ENCODER_SAMPLE_INTERVAL)\n {\n //keeps the time and stores it in the array\n dTEncoder = millis() - startTime;\n elapsed_time[count] = {dTEncoder}; \n startTime = millis();\n \n ATOMIC_BLOCK(ATOMIC_RESTORESTATE) {\n // code with interrupts blocked (consecutive atomic operations will not get interrupted)\n\n //set the counts and voltage into the arrays created \n increment_count[count] = count; \n encoder_count_left[count] = leftEncoderCount;\n encoder_count_right[count] = rightEncoderCount;\n battery_voltage[count] = (analogRead(VOL_MEASURE_PIN) * 1.1 / 1024) * ((10 + 1.5) / 1.5);\n \n \n }\n if(count == 30){\n for(int i = 0; i < 31; i++){\n Serial.print(increment_count[i]);\n Serial.print(\",\");\n Serial.print(elapsed_time[i]);\n Serial.print(\",\");\n Serial.print(encoder_count_left[i]);\n Serial.print(\",\");\n Serial.print(encoder_count_right[i]);\n Serial.print(\",\");\n Serial.print(battery_voltage[i]);\n Serial.print(\",\");\n Serial.println(\" \");\n } \n }\n \n\n if(count == 10){\n motorSpeed = 128;//This is 50% duty cycle\n if(i==0){\n driveForward(motorSpeed); \n delay(2000);//to ensure motor gets up to speed \n startTime = millis(); \n }\n\n if(i==1){\n driveBackward(motorSpeed);\n delay(2000);//to ensure motor gets up to speed \n startTime = millis();\n }\n }\n \n if(count == 20){\n motorSpeed = 191;//This is 75% duty cycle\n if(i==0){\n driveForward(motorSpeed); \n delay(2000);//to ensure motor gets up to speed \n startTime = millis();\n }\n\n if(i==1){\n driveBackward(motorSpeed);\n delay(2000);//to ensure motor gets up to speed \n startTime = millis();\n }\n }\n\n if(count == 30 & i == 0){\n stopMotors();\n delay(1000);//keeps the motor from switching directions to fast \n motorSpeed = 64;//this is 50% duty cycle \n driveBackward(motorSpeed);\n delay(2000);//to ensure motor gets up to speed \n startTime = millis();\n count = 0;\n i = 1;\n }\n \n if(count == 30 & i == 1){\n stopMotors();\n }\n count++;\n leftEncoderCount = 0UL; // 0 is type int, 0UL is type unsigned long\n rightEncoderCount = 0UL;\n\n \n }\n \n }\n\n\n \n\n \n \n\n }\n \n return 0;\n}\n"
},
{
"alpha_fraction": 0.780701756477356,
"alphanum_fraction": 0.780701756477356,
"avg_line_length": 15.214285850524902,
"blob_id": "ba1e3cf029d1db75312006eb17ab49d054dc17ba",
"content_id": "d55962b091fafd6c711348095282bbade600ac95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 228,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 14,
"path": "/midterm/balancing_bot/motor.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef MOTOR_H_INCLUDED\n#define MOTOR_H_INCLUDED\n\n\nvoid initMotors( void);\nvoid stopMotors(void);\n\nvoid driveForward(unsigned char speed);\nvoid driveBackward(unsigned char speed);\nvoid balanceBot(float motorOutput);\n\n\n\n#endif \n"
},
{
"alpha_fraction": 0.5926904678344727,
"alphanum_fraction": 0.6094231605529785,
"avg_line_length": 25.395349502563477,
"blob_id": "f7392b9689dee9b945ec14aa2d37f13d158d6bfd",
"content_id": "3173cd9af425d8549a1143afccf096a31b09aab8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4542,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 172,
"path": "/midterm/python/Midterm.py",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "# Plot 4 signals in real time. \n# programmer Tyler Angus \n# used to analyze data coming from the self balancing robot kit\n\nimport serial\nimport numpy as np\nfrom time import sleep \nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\n\ndef serialConnect(portName, baudRate):\n try: \n ser = serial.Serial(portName, baudRate)\n print(\"opened port \" + ser.name + '\\n')\n # give Arduino time to reset\n sleep(2)\n # flush input buffer, discarding all contents\n ser.reset_input_buffer()\n return ser \n except serial.SerialException:\n raise IOError(\"problem connecting to \" + portName)\n\n\ndef init():\n #initialize the graph data sets \n graph_presAngle.set_data([], [])\n graph_error.set_data([], [])\n graph_pid.set_data([], [])\n graph_motorControl.set_data([], [])\n \n return graph_presAngle, graph_error, graph_pid, graph_motorControl\n\ndef animate(i):\n global t, presAngle, error, pid, motorControl\n \n\n while (ser.inWaiting() == 0):\n pass\n\n arduinoString = ser.readline().decode(\"utf-8\") #decodes the information sent through serial\n dataArray = arduinoString.split(',') #seperates the info into an array and uses the ',' to know where to split the data \n\n #This is where we take the data from the data array and append it to some globals\n presAngle.append(float(dataArray[0])) \n error.append(float(dataArray[1])) \n pid.append(float(dataArray[2]))\n motorControl.append(float(dataArray[3]))\n\n\n presAngle.pop(0)\n error.pop(0)\n pid.pop(0)\n motorControl.pop(0)\n\n\n\n graph_presAngle.set_data(t, presAngle)\n graph_error.set_data(t, error)\n graph_pid.set_data(t, pid)\n graph_motorControl.set_data(t, motorControl)\n\n\n\n return graph_presAngle, graph_error, graph_pid, graph_motorControl\n\n \n\nif __name__ == '__main__':\n\n portName = \"/dev/ttyUSB2\"\n ser = serialConnect(portName,115200)\n sleep(2) # give Arduino time to reset\n\n # flush input buffer, discarding all contents\n ser.reset_input_buffer()\n\n numPoints = 201 # number of data points\n\n \n fig = plt.figure(figsize=(7, 4)) # create figure window\n ax = plt.axes(xlim=(0,numPoints-1), ylim=(-45, 45)) # specify axis limits\n\n plt.title('Real-time data')\n plt.xlabel('Data points')\n plt.ylabel('Error')\n ax.grid(True)\n\n \n graph_error, = ax.plot([], [], 'b', label = 'Error from Desired Angle') #graph the error make a blue line and label it Error from desired Angle\n \n\n ax.legend(loc='upper right')\n\n\n\n #creating the figure for the plot of PID \n fig1 = plt.figure(figsize=(7,4))\n ax = plt.axes(xlim=(0,numPoints-1), ylim=(-1000, 1000)) # specify axis limits\n\n #label the plot with info \n plt.title('Real-time data')\n plt.xlabel('Data points')\n plt.ylabel('PID')\n ax.grid(True)\n\n graph_pid, = ax.plot([], [], 'b', label = 'PID Output')\n \n #this says where to put the legend \n ax.legend(loc='upper right')\n\n\n #creating a third figure to plot the motor control \n fig2 = plt.figure(figsize=(7,4))\n ax = plt.axes(xlim=(0,numPoints-1), ylim=(-500, 500))\n\n plt.title('Real-time data')\n plt.xlabel('Data points')\n plt.ylabel('Motor Speed')\n ax.grid(True)\n\n graph_motorControl, = ax.plot([], 'b', label = 'Motor Control')\n\n ax.legend(loc='upper right')\n\n\n\n fig3 = plt.figure(figsize=(7,4))\n ax = plt.axes(xlim=(0,numPoints-1), ylim=(60, 120))\n\n plt.title('Real-time data')\n plt.xlabel('Data points')\n plt.ylabel('Present Angle')\n ax.grid(True)\n\n graph_presAngle, = ax.plot([], [], 'b', label = 'Present Angle of Bot')\n\n\n ax.legend(loc='upper right')\n\n\n\n\n t = list(range(0, numPoints))\n presAngle = []\n error = []\n pid = []\n motorControl = []\n\n\n\n for i in range(0, numPoints):\n presAngle.append(0)\n error.append(0)\n pid.append(0)\n motorControl.append(0)\n\n\n\n delay = 20\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n interval=delay, blit=True)\n\n anim = animation.FuncAnimation(fig1, animate, init_func=init,\n interval=delay, blit=True)\n\n anim = animation.FuncAnimation(fig2, animate, init_func=init,\n interval=delay, blit=True)\n\n anim = animation.FuncAnimation(fig3, animate, init_func=init,\n interval=delay, blit=True)\n plt.show()\n\n\n"
},
{
"alpha_fraction": 0.4802507758140564,
"alphanum_fraction": 0.4921630024909973,
"avg_line_length": 32.547367095947266,
"blob_id": "459984f9664ca648f5a83d4e2e5bf15ed7823339",
"content_id": "c579e31c9197c1dd455baa098a74ebe87ff5be8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3190,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 95,
"path": "/HW3/HW3_P1.py",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "'''\nLesson 5 Description: Extracts x, y values from serial data read\n\nUses CTRL+C signal handler to terminate while loop execution\n\n'''\nimport serial\nfrom signal import signal, SIGINT\nfrom time import sleep\n\nkeepRunning = True \n\ndef handler(signal, frame):\n global keepRunning \n print('SIGINT or CTRL+C detected, setting keepRunning to False')\n keepRunning = False\n\n\ndef serialConnect(portName):\n try: \n ser = serial.Serial(portName, 38400)\n print(\"opened port \" + ser.name + '\\n')\n # give Arduino time to reset\n sleep(2)\n # flush input buffer, discarding all contents\n ser.reset_input_buffer()\n return ser \n except serial.SerialException:\n raise IOError(\"problem connecting to \" + portName)\n\n\n\n\nif __name__ == '__main__':\n\n #register the signal handler\n signal(SIGINT, handler)\n\n portName = \"/dev/ttyUSB3\"\n ser = serialConnect(portName)\n \n file = open(\"HW3_#1.txt\", 'w')\n while keepRunning == True:\n\n if ser.in_waiting > 0:\n bytesRead = ser.read_until()\n read_list = [int(v) for v in bytesRead.decode().split(':')]\n if len(read_list) == 13: \n \n ax_m = read_list[0]\n ay_m = read_list[1]\n az_m = read_list[2]\n gx_m = read_list[3]\n gy_m = read_list[4]\n gz_m = read_list[5]\n\n ax_o = read_list[6]\n ay_o = read_list[7]\n az_o = read_list[8]\n gx_o = read_list[9]\n gy_o = read_list[10]\n gz_o = read_list[11]\n\n loop_it = read_list[12]\n\n print(\"ax_mean: \\t\" + str(ax_m) + \", \\tay_mean: \\t\" + str(ay_m) + \", \\taz_mean: \\t\" + str(az_m))\n print(\"gx_mean: \\t\" + str(gx_m) + \", \\tgy_mean: \\t\" + str(gy_m) + \", \\tgz_mean: \\t\" + str(az_m))\n print(\"ax_offset: \\t\" + str(ax_o) + \", \\tay_offset: \\t\" + str(ay_o) + \", \\taz_offset: \\t\" + str(az_o))\n print(\"gx_offset: \\t\" + str(gx_o) + \", \\tgy_offset: \\t\" + str(gy_o) + \", \\tgz_offset: \\t\" + str(gz_o))\n print(\"loop iteration #\" + str(loop_it))\n print(\" \")\n\n file.write(\"ax_mean: \\t\" + str(ax_m) + \", \\tay_mean: \" + str(ay_m) + \", \\t\\taz_mean: \" + str(az_m)+ '\\n')\n file.write(\"gx_mean: \\t\" + str(gx_m) + \", \\tgy_mean: \" + str(gy_m) + \", \\t\\tgz_mean: \" + str(az_m)+ '\\n')\n file.write(\"ax_offset: \\t\" + str(ax_o) + \", \\tay_offset: \" + str(ay_o) + \", \\taz_offset: \" + str(az_o)+ '\\n')\n file.write(\"gx_offset: \\t\" + str(gx_o) + \", \\tgy_offset: \" + str(gy_o) + \", \\tgz_offset: \" + str(gz_o)+ '\\n')\n file.write(\"loop iteration #\" + str(loop_it)+ '\\n'+ '\\n')\n file.write(\" \")\n\n\n else:\n print(\"warning read_list length is \" + len(read_list))\n \n \n \n \n\n # kill a bit of time before running through loop again\n # Arduino program transmitting every 300 ms\n sleep(30/1000)\n\n print('while loop terminated')\n file.close() \n ser.close()\n print(\"closed port\")\n\n\n\n"
},
{
"alpha_fraction": 0.7840136289596558,
"alphanum_fraction": 0.7840136289596558,
"avg_line_length": 23.45833396911621,
"blob_id": "24a3cd62b928acc4e4073a004ab1f8d7121d9dc0",
"content_id": "d8bb0c7aca9e72e59aa953d43454e15f10e5c35e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 24,
"path": "/final/balancing_driving_bot_2/motor.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef MOTOR_H_INCLUDED\n#define MOTOR_H_INCLUDED\n\n\nvoid initMotors( void);\nvoid stopMotors(void);\n\nvoid drive_left_forward(unsigned char left_motor_speed);\nvoid drive_right_forward(unsigned char right_motor_speed);\nvoid driveForward(unsigned char speed);\nvoid driveBackward(unsigned char speed);\n\nvoid drive_right_backward(unsigned char right_PID_out);\nvoid drive_left_backward(unsigned char left_PID_out);\nvoid drive_right_forward(unsigned char right_PID_out);\nvoid drive_left_forward(unsigned char left_PID_out);\n\n\nvoid balanceBot(float left_PID_out, float right_PID_out);\n\n\n\n\n#endif \n"
},
{
"alpha_fraction": 0.5836065411567688,
"alphanum_fraction": 0.6196721196174622,
"avg_line_length": 18.0625,
"blob_id": "47d8d961c4e54b2c53e76805f53ade5e504a6ee0",
"content_id": "a1ff3192b3875c180ce87734f2ce02afbc1a0564",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 16,
"path": "/final/balancing_driving_bot_2/pins.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef pins_h\n#define pins_h\n\n#define VOL_MEASURE_PIN A2\n\n// motor control pins\n#define AIN1_PIN 7\n#define PWMA_LEFT_PIN 5\n#define BIN1_PIN 12\n#define PWMB_RIGHT_PIN 6\n#define STBY_PIN 8\n\n#define LEFT_ENCODER_A_PIN 2\n#define RIGHT_ENCODER_A_PIN 4\n\n#endif\n"
},
{
"alpha_fraction": 0.5623509287834167,
"alphanum_fraction": 0.6031229496002197,
"avg_line_length": 24.33516502380371,
"blob_id": "939450963013abbc826729c8aef3b04cb48973b1",
"content_id": "accd61d4ab7ce4acda182d4328b1fd41c00340ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4620,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 182,
"path": "/midterm/balancing_bot/Balancing_Bot.ino",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "/*\n * Program: Balancing the robot kit \n * Programer: Tyler Angus \n * \n * Purpose: The purpose of this program is to balance the robot kit by\n * using a PID control system. The program takes the raw data from the \n * mpu 6050 filters it and then uses a PID to create a balanced robot. \n * \n * \n * \n * \n */\n\n\n#include <Arduino.h>\n#include <Wire.h>\n#include \"mpu6050.h\"\n#include \"helper.h\"\n#include \"pins.h\"\n#include \"motor.h\"\n#include \"PID.h\"\n\n\n//#define DEBUG \n//#define Calibrate\n#define NoCalibrate\n#define SAMPLE_INTERVAL_MS 10 // milliseconds 33.33Hz\n#define PLOT\n\n// Initialize Serial, I2C, and mpu6050\nvoid setup(void)\n{\n Wire.begin();\n Wire.setClock(400000L);\n Serial.begin(115200);\n delay(1000);\n setupMPU6050();\n \n #ifdef DEBUG\n Serial.println(F(\"setup complete\"));\n #endif\n}\n\n\n\nint main(void)\n{\n \n SensorData offset, gyroRate, accel;\n AngleData gyroPosition = {0.0, 0.0, 0.0};\n AngleData deltaPosition; // change in position\n unsigned long startTime, elapsedTime;\n\n\n\n float presentAngle;\n float desiredAngle = 88.2;//85.45;\n float motorOutput = 0.0;\n \n \n \n int sampleCount = 0;\n\n // xyz specifies rotation order\n double roll, pitch; // roll rotation about x, pitch rotation about y\n double rollF = 0.0, pitchF = 0.0; // filtered roll and pitch from accel\n double gyro_xf = 0.0, gyro_yf = 0.0, gyro_zf = 0.0; //filtered gyro x y and z \n \n init(); // Arduino function to initialize timers, pwm, other hardware\n initMotors();\n setup();\n \n #ifdef DEBUG\n Serial.println(F(\"ready to calibrate\"));\n #endif\n\n #ifdef Calibrate\n calibrateAccelerometer(&offset, 1000);\n calibrateGyro(&offset, 1000);\n #endif\n\n #ifdef NoCalibrate\n offset.xG = 206;\n offset.yG = -185;\n offset.zG = 113;\n offset.x = -416;\n offset.y = 737;\n offset.z = 2235; \n #endif\n\n #ifdef DEBUG\n Serial.println(F(\"calibration complete\"));\n printOffsetValues(&offset);\n delay(1000);\n #endif\n \n startTime = millis();\n \n while(1)\n {\n if( (elapsedTime = (millis() - startTime)) >= SAMPLE_INTERVAL_MS)\n {\n readAccelerometer(&accel);\n readGyro(&gyroRate);\n startTime = millis(); \n ++sampleCount;\n\n\n \n gyroRate.x += offset.xG; // apply offset\n gyroRate.y += offset.yG;\n gyroRate.z += offset.zG;\n\n accel.x += offset.x; // apply offset\n accel.y += offset.y;\n accel.z += offset.z;\n\n // Freescale, equations 25 and 26, Rotation order Rxyz\n // roll may vary from -pi to +pi, use atan2\n pitch = atan2(accel.y, accel.z) * 180.0/PI;\n // pitch is restricted to -pi/2 to +pi/2, use atan\n roll = atan(-accel.x / sqrt(pow(accel.y,2) + pow(accel.z,2))) * 180.0/PI;\n \n \n // apply low pass filter\n rollF = 0.94 * rollF + 0.06 * roll;\n pitchF = 0.94 * pitchF + 0.06 * pitch;\n \n\n // scale the data to deg/s and calculate change in position\n deltaPosition.x = (double)gyroRate.x / 131.0 * elapsedTime / 1000.0;\n deltaPosition.y = (double)gyroRate.y / 131.0 * elapsedTime / 1000.0;\n deltaPosition.z = (double)gyroRate.z / 131.0 * elapsedTime / 1000.0;\n\n gyroPosition.x += deltaPosition.x;\n gyroPosition.y += deltaPosition.y;\n gyroPosition.z += deltaPosition.z;\n\n //θ[n] = α * (θ[n-1] + θdotgyro[n] * δt )\n\n //The general form of a high-pass filter equation is y[i] = α y[i-1] + α (x[i] - x[i-1])\n //where y is the filtered output and x[i] - x[i-1] is the change in input.\n //The integration θdotgyro[n] * δt is the gyro displacement, the equivalent to the x[i] - x[i-1] term. \n //rollF = 0.94 * rollF + 0.06 * roll;\n\n gyro_xf = (0.94*gyro_xf) + (0.94*(deltaPosition.x));\n gyro_yf = (0.94*gyro_yf) + (0.94*(deltaPosition.y));\n gyro_zf = (0.94*gyro_zf) + (0.94*(deltaPosition.z));\n \n presentAngle = (gyro_xf + pitchF + 90);//using the angle given by the complementary filter and adding 90 degrees.\n\n #ifdef DEBUG\n Serial.print(\"presentAngle=\");\n Serial.print(presentAngle);\n Serial.print(\"\\t\");\n #endif\n\n motorOutput = PID(desiredAngle, presentAngle); //calls the PID function and sets the output to motorOutput. \n\n #ifdef DEBUG\n Serial.print(\"motorOutput=\");\n Serial.println(motorOutput);\n #endif\n\n #ifdef PLOT\n Serial.print(motorOutput);\n Serial.println(\"\");\n #endif \n \n \n balanceBot(motorOutput); //balance bot uses motorOutput to know when to drive forward or backword \n \n\n\n }\n }\n\n \n \n return 0;\n \n}\n"
},
{
"alpha_fraction": 0.5723816156387329,
"alphanum_fraction": 0.5972315073013306,
"avg_line_length": 18.394821166992188,
"blob_id": "bb7297f10bcb68405e96eb83fcf967c2d1bdd995",
"content_id": "422e6033b7cf72ff10d0c264e76b5125ab249f13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5996,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 309,
"path": "/final/balancing_driving_bot/PID.cpp",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "\n\n\n#include \"PID.h\"\n\n#include <Arduino.h> \n\n//#define DEBUG_BALANCE\n//#define DEBUG_MOTOR\n//#define PLOT_BALANCE\n//#define LEFT_PLOT_MOTOR\n//#define RIGHT_PLOT_MOTOR\n//#define DEBUG_SPEED\n//#define SPEED_PLOT\n\n\nfloat PID_balance(float desiredAngle, float presentAngle){\n float error = 0; \n const float kp = 60.0;//60\n const float kd = 0.1;//0.1\n const float ki = 140.00;//140\n float currentTime = 0.0;\n float dT = 0.0;\n float pid = 0.0;\n float dterm = 0.0;\n \n\n static float lastTime = 0.0;\n static float iterm = 0;\n\n static float prevAngle = 90;\n\n //read the timer values\n currentTime = (millis()/1000.0); \n dT = currentTime - lastTime;\n lastTime = currentTime;\n \n #ifdef DEBUG_BALANCE\n Serial.print(\"currentTime=\");\n Serial.print(currentTime);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef PLOT_BALANCE\n Serial.print(presentAngle);\n Serial.print(\",\");\n #endif\n\n\n #ifdef DEBUG_BALANCE\n Serial.print(\"dT=\");\n Serial.print(dT);\n Serial.print(\" \\t\");\n #endif\n\n \n\n\n error = desiredAngle - presentAngle;\n\n #ifdef DEBUG BALANCE\n Serial.print(\"error=\");\n Serial.print(error);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef PLOT_BALANCE\n Serial.print(error);\n Serial.print(\",\");\n #endif\n \n //calculate integral term, the cumulative error\n iterm = iterm + error*dT;\n\n //calculate derivative term, the rate error\n dterm = (presentAngle - prevAngle)/dT;\n\n //update previous to current\n prevAngle = presentAngle;\n\n //calculate PID value\n pid = (error*kp) + (iterm*ki) + (dterm*kd);\n\n #ifdef DEBUG_BALANCE\n Serial.print(\"pid=\");\n Serial.print(pid);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef PLOT_BALANCE\n Serial.print(pid);\n Serial.print(\",\");\n #endif\n\n //limit the pid to 255 the max speed \n if ( pid > 255 ){\n pid = 255;\n }\n else if (pid < -255){\n pid = -255;\n }\n\n return pid;\n \n}\n\n\n\n\n\n\n\n\nfloat PID_left_motor(float left_desired_speed, float left_measured_speed){\n float error = 0; \n const float kp = 3.5;//3.5 had the best results while testing.\n const float kd = 0.1;//0.1 had the best results during testing\n const float ki = 0.0;//adding ki made the results bad\n float currentTime = 0.0;\n float dT = 0.0;\n float pid = 0.0;\n float dterm = 0.0;\n \n\n static float lastTime = 0.0;\n static float iterm = 0;\n\n static float prev_speed = 0.0;\n\n //read the timer values\n currentTime = (millis()/1000.0); \n dT = currentTime - lastTime;\n lastTime = currentTime;\n \n #ifdef DEBUG_MOTOR\n Serial.println(\"----------------------------------------------------------------------\");\n Serial.println(\"Left Motor PID numbers\");\n Serial.print(\"currentTime=\");\n Serial.print(currentTime);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef LEFT_PLOT_MOTOR\n Serial.print(left_measured_speed);\n Serial.print(\",\");\n #endif\n\n\n #ifdef DEBUG_MOTOR\n Serial.print(\"dT=\");\n Serial.print(dT);\n Serial.print(\" \\t\");\n #endif\n\n \n\n\n error = left_desired_speed - left_measured_speed;\n\n #ifdef DEBUG_MOTOR\n Serial.print(\"error=\");\n Serial.print(error);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef LEFT_PLOT_MOTOR\n Serial.print(error);\n Serial.print(\",\");\n #endif\n \n //calculate integral term, the cumulative error\n iterm = iterm + error*dT;\n\n //calculate derivative term, the rate error\n dterm = (left_measured_speed - prev_speed)/dT;\n\n //update previous to current\n prev_speed = left_measured_speed;\n\n //calculate PID value\n pid = (error*kp) + (iterm*ki) + (dterm*kd);\n\n #ifdef DEBUG_MOTOR\n Serial.print(\"pid=\");\n Serial.print(pid);\n Serial.println(\"\\t\");\n Serial.println(\"----------------------------------------------------------------------\");\n #endif\n\n #ifdef LEFT_PLOT_MOTOR\n Serial.println(pid);\n \n #endif\n\n //limit the pid to 255 the max speed \n if ( pid > 255 ){\n pid = 255;\n }\n else if (0 > pid >= -127){\n pid = 127+pid;\n }\n else if (pid < -127){\n pid = 0; \n }\n\n return pid;\n \n}\n\n\n\n\n\n\n\n\n\nfloat PID_right_motor(float right_desired_speed, float right_measured_speed){\n float error = 0; \n const float kp = 2.5;//2.5 had the best results during testing\n const float kd = 0.1;//0.1 had the best results during testing\n const float ki = 0.0;//adding ki caused the system to be off.\n float currentTime = 0.0;\n float dT = 0.0;\n float pid = 0.0;\n float dterm = 0.0;\n \n\n static float lastTime = 0.0;\n static float iterm = 0;\n\n static float prev_speed = 0.0;\n\n //read the timer values\n currentTime = (millis()/1000.0); \n dT = currentTime - lastTime;\n lastTime = currentTime;\n \n #ifdef DEBUG_MOTOR\n Serial.println(\"----------------------------------------------------------------------\");\n Serial.println(\"Right Motor PID numbers\");\n Serial.print(\"currentTime=\");\n Serial.print(currentTime);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef RIGHT_PLOT_MOTOR\n Serial.print(right_measured_speed);\n Serial.print(\",\");\n #endif\n\n\n #ifdef DEBUG_MOTOR\n Serial.print(\"dT=\");\n Serial.print(dT);\n Serial.print(\" \\t\");\n #endif\n\n \n\n\n error = right_desired_speed - right_measured_speed;\n\n #ifdef DEBUG_MOTOR\n Serial.print(\"error=\");\n Serial.print(error);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef RIGHT_PLOT_MOTOR\n Serial.print(error);\n Serial.print(\",\");\n #endif\n \n //calculate integral term, the cumulative error\n iterm = iterm + error*dT;\n\n //calculate derivative term, the rate error\n dterm = (right_measured_speed - prev_speed)/dT;\n\n //update previous to current\n prev_speed = right_measured_speed;\n\n //calculate PID value\n pid = (error*kp) + (iterm*ki) + (dterm*kd);\n\n #ifdef DEBUG_MOTOR\n Serial.print(\"pid=\");\n Serial.print(pid);\n Serial.println(\"\\t\");\n Serial.println(\"----------------------------------------------------------------------\");\n #endif\n\n #ifdef RIGHT_PLOT_MOTOR\n Serial.println(pid);\n \n #endif\n\n //limit the pid to 255 the max speed \n if ( pid > 255 ){\n pid = 255;\n }\n else if (0 > pid >= -127){\n pid = 127+pid;\n }\n else if (pid < -127){\n pid = 0; \n }\n\n return pid;\n \n}\n"
},
{
"alpha_fraction": 0.5868077278137207,
"alphanum_fraction": 0.6185438632965088,
"avg_line_length": 15.53608226776123,
"blob_id": "795fd6256c2d9fed11ba9545b736a8c1186192ed",
"content_id": "ea0cb5ccee99360685d1ee9a184e5125deeeed4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1607,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 97,
"path": "/midterm/balancing_bot/PID.cpp",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "\n\n\n#include \"PID.h\"\n\n#include <Arduino.h> \n\n//#define DEBUG\n#define PLOT\n\n\nfloat PID(float desiredAngle, float presentAngle){\n float error = 0; \n float kp = 60.0;//60\n float kd = 0.1;//0.1\n float ki = 140.00;//140\n float currentTime = 0.0;\n float dT = 0.0;\n float pid = 0.0;\n float dterm = 0.0;\n \n\n static float lastTime = 0.0;\n static float iterm = 0;\n\n static float prevAngle = 90;\n\n //read the timer values\n currentTime = (millis()/1000.0); \n dT = currentTime - lastTime;\n lastTime = currentTime;\n \n #ifdef DEBUG\n Serial.print(\"currentTime=\");\n Serial.print(currentTime);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef PLOT\n Serial.print(presentAngle);\n Serial.print(\",\");\n #endif\n\n\n #ifdef DEBUG\n Serial.print(\"dT=\");\n Serial.print(dT);\n Serial.print(\" \\t\");\n #endif\n\n \n\n\n error = desiredAngle - presentAngle;\n\n #ifdef DEBUG\n Serial.print(\"error=\");\n Serial.print(error);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef PLOT\n Serial.print(error);\n Serial.print(\",\");\n #endif\n \n //calculate integral term, the cumulative error\n iterm = iterm + error*dT;\n\n //calculate derivative term, the rate error\n dterm = (presentAngle - prevAngle)/dT;\n\n //update previous to current\n prevAngle = presentAngle;\n\n //calculate PID value\n pid = (error*kp) + (iterm*ki) + (dterm*kd);\n\n #ifdef DEBUG\n Serial.print(\"pid=\");\n Serial.print(pid);\n Serial.print(\"\\t\");\n #endif\n\n #ifdef PLOT\n Serial.print(pid);\n Serial.print(\",\");\n #endif\n\n //limit the pid to 255 the max speed \n if ( pid > 255 ){\n pid = 255;\n }\n else if (pid < -255){\n pid = -255;\n }\n\n return pid;\n \n}\n"
},
{
"alpha_fraction": 0.7924528121948242,
"alphanum_fraction": 0.7924528121948242,
"avg_line_length": 50,
"blob_id": "b4653606ff24228ab3b58fdc41fb79e063832c34",
"content_id": "59dddd010be1faecc9515308444f57f29466105f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 1,
"path": "/midterm/balancing_bot/PID.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "\n\nfloat PID(float desiredAngle, float presentAngle);\n"
},
{
"alpha_fraction": 0.6702916622161865,
"alphanum_fraction": 0.686109721660614,
"avg_line_length": 19.434343338012695,
"blob_id": "3b41488377113e4d4b4ef641e3d9f0353191a26b",
"content_id": "c0dac80153aaf883e66b7819d6b74e6e3cc893dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2023,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 99,
"path": "/final/balancing_driving_bot_2/motor.cpp",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#include \"pins.h\"\n#include \"motor.h\"\n#include <Arduino.h>\n\n#define DEBUG\n\nvoid initMotors( void)\n{\n pinMode(AIN1_PIN, OUTPUT);\n pinMode(BIN1_PIN, OUTPUT);\n pinMode(PWMA_LEFT_PIN, OUTPUT);\n pinMode(PWMB_RIGHT_PIN, OUTPUT);\n pinMode(STBY_PIN, OUTPUT);\n stopMotors();\n}\n\nvoid stopMotors(void)\n{\n digitalWrite(AIN1_PIN, LOW);\n digitalWrite(BIN1_PIN, LOW);\n digitalWrite(STBY_PIN, HIGH);\n analogWrite(PWMA_LEFT_PIN, 0);\n analogWrite(PWMB_RIGHT_PIN, 0);\n}\n\nvoid stop_right_motor(void)\n{\n digitalWrite(BIN1_PIN, LOW);\n analogWrite(PWMB_RIGHT_PIN, 0);\n}\n\nvoid stop_left_motor(void)\n{\n digitalWrite(AIN1_PIN, LOW);\n analogWrite(PWMA_LEFT_PIN, 0);\n}\n\nvoid drive_left_forward(unsigned char left_PID_out){\n digitalWrite(AIN1_PIN, 0);\n analogWrite(PWMA_LEFT_PIN, left_PID_out); \n}\n\nvoid drive_right_forward(unsigned char right_PID_out){\n digitalWrite(BIN1_PIN, 0);\n analogWrite(PWMB_RIGHT_PIN, right_PID_out);\n}\n\nvoid drive_left_backward(unsigned char left_PID_out){\n digitalWrite(AIN1_PIN, 1);\n analogWrite(PWMA_LEFT_PIN, left_PID_out); \n}\n\nvoid drive_right_backward(unsigned char right_PID_out){\n digitalWrite(BIN1_PIN, 1);\n analogWrite(PWMB_RIGHT_PIN, right_PID_out);\n}\n\nvoid driveForward(unsigned char speed)\n{\n digitalWrite(AIN1_PIN, 0);\n digitalWrite(BIN1_PIN, 0);\n analogWrite(PWMA_LEFT_PIN, speed);\n analogWrite(PWMB_RIGHT_PIN, speed);\n}\n\n\nvoid driveBackward(unsigned char speed)\n{\n digitalWrite(AIN1_PIN, 1);\n digitalWrite(BIN1_PIN, 1);\n analogWrite(PWMA_LEFT_PIN, speed);\n analogWrite(PWMB_RIGHT_PIN, speed);\n}\n\n\nvoid balanceBot(float left_PID_out, float right_PID_out){\n \n if(left_PID_out>0) {\n drive_left_backward(char(abs(left_PID_out)));\n }\n if(left_PID_out==0){\n stop_left_motor();\n }\n if(left_PID_out<0) {\n drive_left_forward(char(abs(left_PID_out)));\n }\n\n if(right_PID_out>0) {\n drive_right_backward(char(abs(right_PID_out)));\n }\n if(right_PID_out==0){\n stop_right_motor();\n }\n if(right_PID_out<0) {\n drive_right_forward(char(abs(right_PID_out)));\n }\n\n \n}\n"
},
{
"alpha_fraction": 0.5892394185066223,
"alphanum_fraction": 0.6052290201187134,
"avg_line_length": 28.653846740722656,
"blob_id": "2a23d59ff8f2a380df05e12f35f84f5c24c86be7",
"content_id": "6e5fe162b6eaf64922d909995e17852aa178f88f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4628,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 156,
"path": "/HW4/Python/HW_4_part_1.py",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "# Plot 3 signals\n\nimport serial\nimport numpy as np\nfrom time import sleep \nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\n\ndef serialConnect(portName, baudRate):\n try: \n ser = serial.Serial(portName, baudRate)\n print(\"opened port \" + ser.name + '\\n')\n # give Arduino time to reset\n sleep(2)\n # flush input buffer, discarding all contents\n ser.reset_input_buffer()\n return ser \n except serial.SerialException:\n raise IOError(\"problem connecting to \" + portName)\n\n\ndef init():\n graph_aroll.set_data([], [])\n graph_groll.set_data([], [])\n graph_froll.set_data([], [])\n graph_apitch.set_data([], [])\n graph_gpitch.set_data([], [])\n graph_fpitch.set_data([], [])\n graph_gyaw.set_data([], []) \n return graph_aroll, graph_groll, graph_froll, graph_apitch, graph_gpitch, graph_fpitch, graph_gyaw\n\ndef animate(i):\n global t, accelRoll, gyroRoll, cfilterRoll, accelPitch, gyroPitch, cfilterPitch, gyroYaw\n \n\n while (ser.inWaiting() == 0):\n pass\n\n arduinoString = ser.readline().decode(\"utf-8\")\n dataArray = arduinoString.split(',')\n\n accelRoll.append(float(dataArray[0])) \n gyroRoll.append(float(dataArray[3])) \n CfilterRoll.append(float(dataArray[6]))\n accelPitch.append(float(dataArray[1]))\n gyroPitch.append(float(dataArray[2]))\n cfilterPitch.append(float(dataArray[5]))\n gyroYaw.append(float(dataArray[4]))\n\n accelRoll.pop(0)\n gyroRoll.pop(0)\n CfilterRoll.pop(0)\n accelPitch.pop(0)\n gyroPitch.pop(0)\n cfilterPitch.pop(0)\n gyroYaw.pop(0)\n\n\n graph_aroll.set_data(t, accelRoll)\n graph_groll.set_data(t, gyroRoll)\n graph_froll.set_data(t, CfilterRoll)\n graph_apitch.set_data(t, accelPitch)\n graph_gpitch.set_data(t, gyroPitch)\n graph_fpitch.set_data(t, cfilterPitch)\n graph_gyaw.set_data(t, gyroYaw)\n\n\n return graph_aroll, graph_groll, graph_froll, graph_apitch, graph_gpitch, graph_fpitch, graph_gyaw\n\n \n\nif __name__ == '__main__':\n\n portName = \"/dev/ttyUSB0\"\n ser = serialConnect(portName,115200)\n sleep(2) # give Arduino time to reset\n\n # flush input buffer, discarding all contents\n ser.reset_input_buffer()\n\n numPoints = 201 # number of data points\n fig = plt.figure(figsize=(7, 4)) # create figure window\n ax = plt.axes(xlim=(0,numPoints-1), ylim=(-180, 180)) # specify axis limits\n\n plt.title('Real-time sensor data')\n plt.xlabel('Data points')\n plt.ylabel('Rotation [Degrees]')\n ax.grid(True)\n\n graph_aroll, = ax.plot([], [], 'b', label = 'Accel Roll')\n graph_groll, = ax.plot([], [], 'r', label = 'Gyro Integration Roll')\n graph_froll, = ax.plot([], [], 'g', label = 'Filtered Roll')\n ax.legend(loc='upper right')\n ax.legend(loc='upper right')\n ax.legend(loc='upper right')\n\n fig1 = plt.figure(figsize=(7,4))\n ax = plt.axes(xlim=(0,numPoints-1), ylim=(-180, 180)) # specify axis limits\n\n plt.title('Real-time sensor data')\n plt.xlabel('Data points')\n plt.ylabel('Rotation [Degrees]')\n ax.grid(True)\n\n graph_apitch, = ax.plot([], [], 'b', label = 'Accel Pitch')\n graph_gpitch, = ax.plot([], [], 'r', label = 'Gyro Integration Pitch')\n graph_fpitch, = ax.plot([], [], 'g', label = 'Filtered Pitch')\n ax.legend(loc='upper right')\n ax.legend(loc='upper right')\n ax.legend(loc='upper right')\n\n fig2 = plt.figure(figsize=(7,4))\n ax = plt.axes(xlim=(0,numPoints-1), ylim=(-180, 180)) # specify axis limits\n\n plt.title('Real-time sensor data')\n plt.xlabel('Data points')\n plt.ylabel('Rotation [Degrees]')\n ax.grid(True)\n\n graph_gyaw, = ax.plot([], [], 'b', label = 'Gyro Yaw')\n\n ax.legend(loc='upper right')\n\n\n t = list(range(0, numPoints))\n accelRoll = []\n gyroRoll = []\n CfilterRoll = []\n accelPitch = []\n gyroPitch = []\n cfilterPitch = []\n gyroYaw = []\n\n\n for i in range(0, numPoints):\n accelRoll.append(0)\n gyroRoll.append(0)\n CfilterRoll.append(0)\n accelPitch.append(0)\n gyroPitch.append(0)\n cfilterPitch.append(0)\n gyroYaw.append(0)\n\n\n\n delay = 20\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n interval=delay, blit=True)\n\n anim = animation.FuncAnimation(fig1, animate, init_func=init,\n interval=delay, blit=True)\n\n anim = animation.FuncAnimation(fig2, animate, init_func=init,\n interval=delay, blit=True)\n plt.show() \n\n"
},
{
"alpha_fraction": 0.7157894968986511,
"alphanum_fraction": 0.7157894968986511,
"avg_line_length": 12.571428298950195,
"blob_id": "e6b2152b39bf95d5e4c93b20341a47077c841e48",
"content_id": "cf0e51ea10b72851bc962bb80dea802602a21378",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 95,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 7,
"path": "/final/balancing_driving_bot/voltage.cpp",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#include \"voltage.h\"\n#include <Arduino.h>\n\nvoid voltageInit()\n{\n analogReference(INTERNAL);\n}\n"
},
{
"alpha_fraction": 0.5847009420394897,
"alphanum_fraction": 0.6169808506965637,
"avg_line_length": 32.504547119140625,
"blob_id": "39a203df2f920732933b22f15d6491cd25472c1d",
"content_id": "dc1ae0fd843e54bafd4467d80a7240bcfa05c14f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7373,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 220,
"path": "/HW5/Velocity.py",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "# Plot 3 signals\n\nimport serial\nimport numpy as np\nfrom time import sleep \nimport csv\nimport matplotlib\nimport matplotlib.pyplot as plt \n\n\n\nser = serial.Serial('/dev/ttyUSB9', 115200)\nser.flushInput()\nfile = open(\"HW_5_data.txt\", 'w')\ncount = 0\ni = 0\nl_f_twenty_five = 0.0 # left motor foward at 25% duty cycle average variable\nl_f_fifty = 0.0 \nl_f_seventy_five = 0.0\nl_b_twenty_five = 0.0\nl_b_fifty = 0.0\nl_b_seventy_five = 0.0\nr_f_twenty_five = 0.0 #right motor foward at 25% duty cycle average variable\nr_f_fifty = 0.0\nr_f_seventy_five = 0.0\nr_b_twenty_five = 0.0\nr_b_fifty = 0.0\nr_b_seventy_five = 0.0 #right motor backword at 75% dudty cycle average variable\nbat_volt = 0.0\n\nwhile count < 31: \n \n if ser.in_waiting > 0:\n ser_bytes = ser.readline().decode(\"utf-8\")\n decoded_bytes = (ser_bytes.split(','))\n decoded_bytes[5]='0'\n\n #disregard the first line of data because it is not accurate time\n if(count >= 1): \n print(decoded_bytes)\n #write to the file the data collected through serial\n file.write('\\n')\n file.write(\"count = \")\n file.write(str(decoded_bytes[0]))\n file.write(\"\\tDt [ms] = \")\n file.write(str(decoded_bytes[1]))\n file.write(\"\\tEncoder left = \")\n file.write(str(decoded_bytes[2]))\n file.write(\"\\tEncoder right = \")\n file.write(str(decoded_bytes[3]))\n file.write(\"\\tBattery Voltage = \")\n file.write(str(decoded_bytes[4]))\n bat_volt += float(decoded_bytes[4]) # the battery voltage doesn't change significantly with speed so I will use the same average for every test.\n #use if statements to add up the data as it comes in. \n if count<=10 and i==0: #take the first ten tests and add them up \n l_f_twenty_five += float(decoded_bytes[2]) \n if 10<count<=20 and i==0:\n l_f_fifty += float(decoded_bytes[2])\n if 20<count<=30 and i==0:\n l_f_seventy_five += float(decoded_bytes[2])\n if count<=10 and i==1:\n l_b_twenty_five += float(decoded_bytes[2])\n if 10<count<=20 and i==1:\n l_b_fifty += float(decoded_bytes[2])\n if 20<count<=30 and i==1:\n l_b_seventy_five += float(decoded_bytes[2])\n\n\n if count<=10 and i==0:\n r_f_twenty_five += float(decoded_bytes[3])\n if 10<count<=20 and i==0:\n r_f_fifty += float(decoded_bytes[3])\n if 20<count<=30 and i==0:\n r_f_seventy_five += float(decoded_bytes[3])\n if count<=10 and i==1:\n r_b_twenty_five += float(decoded_bytes[3])\n if 10<count<=20 and i==1:\n r_b_fifty += float(decoded_bytes[3])\n if 20<count<=30 and i==1:\n r_b_seventy_five += float(decoded_bytes[3])\n #This starts the loop over to take in the backward data i is used to seperate backward and forward data collection\n count = count + 1\n if (count == 31) and (i == 0):\n count = 0\n i = 1\n print(\"Backward Test: \")\n file.write('\\n')\n file.write('Backword Data: ')\n file.write(\"\\n\")\n#write to the file all of the sums of data\nfile.write('\\n')\nfile.write('25% Left Motor Forward Total = ')\nfile.write(str(l_f_twenty_five))\nfile.write('\\t 50% Left Motor Forward Total = ')\nfile.write(str(l_f_fifty))\nfile.write(' \\t 75% Left Motor Forward Total = ')\nfile.write(str(l_f_seventy_five))\nfile.write('\\n')\n\nfile.write('25% Left Motor Backward Total = ')\nfile.write(str(l_b_twenty_five))\nfile.write('\\t 50% Left Motor Backward Total = ')\nfile.write(str(l_b_fifty))\nfile.write('\\t 75% Left Motor Backward Total = ')\nfile.write(str(l_b_seventy_five))\nfile.write('\\n')\n\nfile.write('25% Right Motor Forward Total = ')\nfile.write(str(r_f_twenty_five))\nfile.write('\\t 50% Right Motor Forward Total = ')\nfile.write(str(r_f_fifty))\nfile.write('\\t 75% Right Motor Forward Total = ')\nfile.write(str(r_f_seventy_five))\nfile.write('\\n')\n\nfile.write('25% Right Motor Backward Total = ')\nfile.write(str(r_b_twenty_five))\nfile.write('\\t 50% Right Motor Backward Total = ')\nfile.write(str(r_b_fifty))\nfile.write('\\t 75% Right Motor Backward Total = ')\nfile.write(str(r_b_seventy_five))\nfile.write('\\n')\n\n#Average all of the data and store it \nl_f_twenty_five = l_f_twenty_five/10.0\nl_f_fifty = l_f_fifty/10.0\nl_f_seventy_five = l_f_seventy_five/10.0\nl_b_twenty_five = l_b_twenty_five/10.0\nl_b_fifty = l_b_fifty/10.0\nl_b_seventy_five = l_b_seventy_five/10.0\n\nr_f_twenty_five = r_f_twenty_five/10.0\nr_f_fifty = r_f_fifty/10.0\nr_f_seventy_five = r_f_seventy_five/10.0\nr_b_twenty_five = r_b_twenty_five/10.0\nr_b_fifty = r_b_fifty/10.0\nr_b_seventy_five = r_b_seventy_five/10.0\n#average the voltage \nbat_volt = bat_volt / 60.0\n\n#write all of the averages into the file\nfile.write('\\n')\nfile.write('Batter Voltage Average for tests = ')\nfile.write(str(bat_volt))\nfile.write('\\n')\n\nfile.write('\\n')\nfile.write('25% Left Motor Forward Average = ')\nfile.write(str(l_f_twenty_five))\nfile.write('\\t 50% Left Motor Forward Average = ')\nfile.write(str(l_f_fifty))\nfile.write('\\t 75% Left Motor Forward Average = ')\nfile.write(str(l_f_seventy_five))\nfile.write('\\n')\n\nfile.write('25% Left Motor Backward Average = ')\nfile.write(str(l_b_twenty_five))\nfile.write('\\t 50% Left Motor Backward Average = ')\nfile.write(str(l_b_fifty))\nfile.write('\\t 75% Left Motor Backward Average = ')\nfile.write(str(l_b_seventy_five))\nfile.write('\\n')\n\nfile.write('25% Right Motor Forward Average = ')\nfile.write(str(r_f_twenty_five))\nfile.write('\\t 50% Right Motor Forward Average = ')\nfile.write(str(r_f_fifty))\nfile.write('\\t 75% Right Motor Forward Average = ')\nfile.write(str(r_f_seventy_five))\nfile.write('\\n')\n\nfile.write('25% Right Motor Backward Average = ')\nfile.write(str(r_b_twenty_five))\nfile.write('\\t 50% Right Motor Backward Average = ')\nfile.write(str(r_b_fifty))\nfile.write('\\t 75% Right Motor Backward Average = ')\nfile.write(str(r_b_seventy_five))\nfile.write('\\n')\n\n\nfile.close()\n\n#create a figure using bar graphs to represent the data\nprint(\"closed file\")\nser.close()\nprint('closed Port')\n \nlabels = ['F 25%', 'F 50%', 'F 75%', 'B 25%', 'B 50%' ,'B 75%']\nleft_means = [l_f_twenty_five,l_f_fifty,l_f_seventy_five,l_b_twenty_five,l_b_fifty,l_b_seventy_five]\nright_means= [r_f_twenty_five,r_f_fifty,r_f_seventy_five,r_b_twenty_five,r_b_fifty,r_b_seventy_five]\n\nx = np.arange(len(labels)) #label locations\nwidth = 0.45 # the width of the bars\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(x - width/2, left_means, width, label = 'left motor')\nrects2 = ax.bar(x + width/2, right_means, width, label = 'right motor') \n\nax.set_ylabel('Encoder Rotations')\nax.set_title('Encoder data')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n\ndef autolabel(rects):\n #put labels above each bar\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x()+ rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\nautolabel(rects1)\nautolabel(rects2)\n\nfig.tight_layout()\n\nplt.show()\n\n\n"
},
{
"alpha_fraction": 0.7884057760238647,
"alphanum_fraction": 0.7884057760238647,
"avg_line_length": 19.235294342041016,
"blob_id": "4d6dcd60e5c02dfe27299dceb5ff72043f94aed9",
"content_id": "cd3cae8b7d91d32505fa4c6af5db29f3f13bae24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 345,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 17,
"path": "/final/balancing_driving_bot/motor.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef MOTOR_H_INCLUDED\n#define MOTOR_H_INCLUDED\n\n\nvoid initMotors( void);\nvoid stopMotors(void);\n\nvoid drive_left_forward(unsigned char left_motor_speed);\nvoid drive_right_forward(unsigned char right_motor_speed);\nvoid driveForward(unsigned char speed);\nvoid driveBackward(unsigned char speed);\nvoid balanceBot(float motorOutput);\n\n\n\n\n#endif \n"
},
{
"alpha_fraction": 0.7867035865783691,
"alphanum_fraction": 0.7867035865783691,
"avg_line_length": 26.769229888916016,
"blob_id": "8b3cb09512e1fdf6cc837c902d57fd6b7d539030",
"content_id": "320f25c0258ac387459b6fe49912297770bff9a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 13,
"path": "/final/balancing_driving_bot_2/PID.h",
"repo_name": "frozen616/Autonomous_Vehicle_Design_Class",
"src_encoding": "UTF-8",
"text": "#ifndef PID_H_MOTOR_CONTROL\n#define PID_H_MOTOR_CONTROL\n\nfloat PID_balance(float desiredAngle, float presentAngle, float PID_speed_output);\n\nfloat PID_left_motor(float left_desired_speed, float left_measured_speed);\n\nfloat PID_right_motor(float right_desired_speed, float right_measured_speed);\n\nfloat PID_speed(float desired_speed, float mean_speed);\n\n\n#endif\n"
}
] | 25 |
jamesm2w/CardEngine
|
https://github.com/jamesm2w/CardEngine
|
b5669cf807415d4ca3d7712c2958ce13b283d96d
|
5ab982aaeeec5cab380842ad738d1912a8ef6b51
|
7afad57b5568edb4efdda4733f60dd27363a75cd
|
refs/heads/master
| 2022-11-27T13:16:37.723620 | 2020-07-25T13:40:51 | 2020-07-25T13:40:51 | 282,451,216 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.40305009484291077,
"alphanum_fraction": 0.4313725531101227,
"avg_line_length": 19.863636016845703,
"blob_id": "3bda6ce4318728920869ca35ddaa7bcdc1079145",
"content_id": "beb9e2b3b05c03fd60793137b3fb2aecd7ff7bcc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 22,
"path": "/KBIO.py",
"repo_name": "jamesm2w/CardEngine",
"src_encoding": "UTF-8",
"text": "from msvcrt import getch, kbhit\n\nnumberKeys = [\n b\"0\", b\"1\", b\"2\", b\"3\", b\"4\", b\"5\", b\"6\", b\"7\", b\"8\", b\"9\"\n ]\n\ndef numberInput(mi=0, ma=9, brk=b\"\\r\"):\n while True:\n key = getch()\n \n if key == brk:\n break\n \n try:\n index = numberKeys.index(key)\n if index >= mi and index <= ma:\n return index\n \n except ValueError:\n continue\n\n return -1\n"
},
{
"alpha_fraction": 0.48633691668510437,
"alphanum_fraction": 0.5005188584327698,
"avg_line_length": 26.342857360839844,
"blob_id": "f77f445bd794f16b589124a651762038a7b91495",
"content_id": "feae81124868086e34fb9c399a38100b4813524c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2891,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 105,
"path": "/holdem.py",
"repo_name": "jamesm2w/CardEngine",
"src_encoding": "UTF-8",
"text": "from colorama import Fore, Back, init, Style\nimport os\n\nfrom CardEngine import Suit, Rank, Card, Hand, Deck\nfrom msvcrt import kbhit, getch\ninit()\n\ndef showHand(hand):\n print(hand.toString() + (\" \" * (hand.handSize - len(hand.hand))))\n\ndef showBlankedHand(hand):\n print((Back.WHITE + \" \" + Style.RESET_ALL + \" \") * hand.handSize)\n\ndef showCommunity(com):\n print(\"Community\\t\", end=\"\")\n for card in com:\n if card == None:\n print(Back.WHITE + \" \" + Style.RESET_ALL + \" \", end=\"\")\n else:\n print(card.toString() + \" \", end=\"\")\n\n print(end=\"\\r\")\n\ndef waitForEnter():\n while True:\n if kbhit():\n if getch() == b\"\\r\":\n return\n\ndef main():\n the_deck = Deck()\n\n player1 = Hand(handSize = 2)\n player2 = Hand(handSize = 2)\n community = [None, None, None, None, None]\n burn = []\n\n games = 0\n\n while True:\n try:\n games += 1\n\n for i in range(2):\n player1.addCard(the_deck.randomCard())\n player2.addCard(the_deck.randomCard())\n \n print(\"Game\", games)\n community = [None, None, None, None, None]\n print(\"You (P1)\\t\", end=\"\")\n showHand(player1)\n print()\n print(\"Player 2\\t\", end=\"\")\n showBlankedHand(player2)\n print()\n\n for i in range(3):\n community[i] = the_deck.randomCard()\n\n showCommunity(community)\n \n for i in range(2):\n waitForEnter()\n burn.append(the_deck.randomCard())\n community[3+i] = the_deck.randomCard()\n showCommunity(community)\n\n waitForEnter()\n \n print()\n player1Score = Hand.score7(Hand(*(player1.hand + community), handSize=7))\n player2Score = Hand.score7(Hand(*(player2.hand + community), handSize=7))\n print(player1Score)\n print(\"You (P1) Score:\", Hand.scoreString(player1Score))\n print(\"Player 2 Score:\", Hand.scoreString(player2Score))\n\n if player1Score > player2Score:\n print(\"Player 1 wins\")\n elif player2Score > player1Score:\n print(\"Player 2 wins\")\n else:\n print(\"Draw\")\n\n waitForEnter()\n\n for card in community:\n the_deck.addCard(card)\n\n for card in burn:\n the_deck.addCard(card)\n\n while not player1.isEmpty():\n the_deck.addCard(player1.discard(0))\n while not player2.isEmpty():\n the_deck.addCard(player2.discard(0))\n\n #os.system(\"cls\")\n the_deck.shuffle()\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n main()\n a = input()\n\n \n\n \n"
},
{
"alpha_fraction": 0.47830474376678467,
"alphanum_fraction": 0.5045408606529236,
"avg_line_length": 18.41176414489746,
"blob_id": "e652dc077ce5f288886f6f155765798e624b4acd",
"content_id": "382ea5d5b39d4a6a85de600c0187d28d990c6d47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 991,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 51,
"path": "/hand generators.py",
"repo_name": "jamesm2w/CardEngine",
"src_encoding": "UTF-8",
"text": "from CardEngine import Rank, Suit, Hand, Deck\nfrom msvcrt import getch, kbhit\n\ngames = 0\n\nscores = {\n \"0\": 0, \"1\": 0, \"2\": 0, \"3\": 0, \"4\": 0, \"5\": 0,\n \"6\": 0, \"7\": 0, \"8\": 0\n}\n\nnames = [\n \"High Card\", \"Pair\", \"Two Pair\", \"Trips\", \"Straight\", \"Flush\", \"Full House\", \"Quad\", \"Straight Flush\"\n ]\n\ndef main():\n global games\n global scores\n \n deck = Deck()\n player = Hand()\n\n while True:\n\n for i in range(5):\n player.addCard(deck.randomCard())\n\n print(player.handDescription())\n \n scores[str(player.handScore[0])] += 1\n\n if player.handScore[0] >= 8:\n return\n\n while not player.isEmpty():\n deck.addCard(player.discard(0))\n\n games += 1\n waiting = kbhit()\n if waiting:\n key = getch()\n if key == b\"\\r\":\n return\n\n deck.shuffle()\n\nmain()\nprint(games, \"hands\")\nfor i in names:\n print(i, scores[str(names.index(i))])\n\na = input()\n\n"
},
{
"alpha_fraction": 0.4684109687805176,
"alphanum_fraction": 0.47415444254875183,
"avg_line_length": 25.116666793823242,
"blob_id": "9c690e0d3552a0ee0e56f8a639ff78f6fc4ad24d",
"content_id": "f4aa24375a37cf46ba5d453846f40909f3240ed2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1567,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 60,
"path": "/Game.py",
"repo_name": "jamesm2w/CardEngine",
"src_encoding": "UTF-8",
"text": "from colorama import Fore, Back, Style, init\nimport os\n\nfrom CardEngine import Rank, Suit, Card, Hand, Deck\nfrom KBIO import numberInput\n\ninit()\n\ndef showHand(hand):\n print(hand.toString() + (\" \" * (hand.handSize - len(hand.hand))), end=\"\\r\")\n\ndef main():\n the_deck = Deck()\n player = Hand()\n games = 0\n while True:\n try:\n games += 1\n\n for i in range(5):\n player.addCard(the_deck.randomCard())\n\n print(\"Game #\", games)\n print(\"\".join(\" \" + str(i + 1) + \" \" for i in range(player.handSize)))\n showHand(player)\n \n discard = []\n while True:\n\n index = numberInput(mi = 1, ma = len(player.hand))\n \n if index >= 1 and index <= player.handSize:\n discard.append(player.discard(index - 1))\n showHand(player)\n elif index == -1:\n break\n else:\n continue\n\n for i in range(len(discard)):\n player.addCard(the_deck.randomCard())\n the_deck.addCard(discard[i])\n\n showHand(player)\n\n print(\"\\n\" + player.handDescription())\n\n a = input(\"Press enter to continue\")\n\n while not player.isEmpty():\n the_deck.addCard(player.discard(0))\n\n os.system(\"cls\")\n the_deck.shuffle()\n except Exception as e:\n print(e)\n \nif __name__ == \"__main__\":\n main()\n a = input()\n"
},
{
"alpha_fraction": 0.49178820848464966,
"alphanum_fraction": 0.5083431601524353,
"avg_line_length": 28.5,
"blob_id": "dc99052d31d4a28429f4df12951c125081ef9540",
"content_id": "373f7e74abdb6c6c6a771a8ec9b5a0f68e42c0a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7611,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 258,
"path": "/CardEngine.py",
"repo_name": "jamesm2w/CardEngine",
"src_encoding": "UTF-8",
"text": "try:\n from colorama import Fore, Back, Style\n COLOR = True\nexcept ImportError:\n COLOR = False\n\nimport random\nimport itertools\n\nclass HandFullError(Exception):\n pass\n\nclass Rank():\n\n ranks = [\n \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"T\", \"J\", \"Q\", \"K\", \"A\"\n ]\n\n def __init__(self, value):\n try:\n self.value = int(value)\n except:\n value = value.upper()\n self.value = Rank.ranks.index(value) + 2\n\n def toString(self):\n return Rank.ranks[self.value - 2]\n\n def toValue(self):\n return self.value\n\nclass Suit():\n\n suits = [\n \"h\", \"s\", \"d\", \"c\"\n ]\n\n def __init__(self, value):\n try:\n self.value = int(value)\n except:\n value = value.lower()\n self.value = Suit.suits.index(value) + 1\n finally:\n if self.value == 1 or self.value == 3:\n self.color = \"red\"\n else:\n self.color = \"black\"\n\n def toString(self):\n return Suit.suits[self.value - 1]\n \n def toValue(self):\n return self.value\n\nclass Card():\n\n def __init__(self, rank=Rank(\"A\"), suit=Suit(\"h\")):\n if type(rank) is str or type(rank) is int:\n self.rank = Rank(rank)\n else:\n self.rank = rank\n\n if type(suit) is str:\n self.suit = Suit(suit)\n else:\n self.suit = suit\n\n def toString(self, color=True):\n global COLOR\n returnStr = self.rank.toString() + self.suit.toString()\n if COLOR and color:\n returnStr = Back.WHITE + returnStr + Style.RESET_ALL\n if self.suit.color == \"red\":\n return Fore.RED + Style.BRIGHT + returnStr\n elif self.suit.color == \"black\":\n return Fore.BLACK + returnStr\n else: \n return returnStr\n\n def compare(self, card):\n \"True if self larger than card passed\"\n return self.rank.toValue() > card.rank.toValue()\n\n def parseStr(string):\n newCard = Card()\n if string[0] in Rank.ranks:\n newCard.rank = Rank(string[0])\n\nclass Hand():\n\n def __init__(self, *args, handSize=5):\n self.handSize = handSize\n self.hand = []\n for arg in args:\n if type(arg) is list:\n if len(arg) <= 2:\n self.addCard(Card(arg[0], arg[1]))\n elif type(arg) is tuple and len(arg) == self.handSize:\n self.hand = list(arg)\n elif type(arg) is Card:\n self.addCard(arg)\n \n def isEmpty(self):\n return len(self.hand) == 0\n\n def isFull(self):\n return len(self.hand) == self.handSize\n\n def toString(self, color=True):\n if not self.isEmpty():\n handString = [card.toString(color=color) for card in self.hand]\n return \" \".join(handString)\n else:\n return \"\"\n\n def addCard(self, card):\n if not self.isFull():\n self.hand.append(card)\n\n def discard(self, index):\n if not self.isEmpty():\n return self.hand.pop(index)\n else:\n return None\n\n def handDescription(self):\n handScore = Hand.scoreHand(self)\n self.handScore = handScore\n return Hand.scoreString(self.handScore)\n\n def scoreString(handScore):\n if handScore[0] == 8:\n if handScore[1] == 13:\n return \"Royal Flush\"\n else:\n if handScore[2][1] == 5 and handScore[2][0] == 14:\n value = 1\n else:\n value = 0\n return \"Straight Flush, \" + Rank(handScore[2][value]).toString() + \" high\"\n \n elif handScore[0] == 7:\n return \"Quad \" + Rank(handScore[1]).toString() + \"s\"\n elif handScore[0] == 6:\n return \"Full House, \" + Rank(handScore[1][0]).toString() + \"s full of \" + Rank(handScore[2][0]).toString() + \"s\"\n elif handScore[0] == 5:\n return \"Flush, \" + \" \".join( [Rank(i).toString() for i in handScore[1] ] )\n elif handScore[0] == 4:\n if handScore[2][1] == 5 and handScore[2][0] == 14:\n value = 1\n else:\n value = 0\n return \"Straight, \" + Rank(handScore[2][value]).toString() + \" high\"\n elif handScore[0] == 3:\n return \"Three of a Kind \" + Rank(handScore[1][0]).toString() + \"s\"\n elif handScore[0] == 2:\n return \"Two Pair \" + Rank(handScore[1][0]).toString() + \"s and \" + Rank(handScore[1][1]).toString() + \"s\"\n elif handScore[0] == 1:\n return \"Pair of \" + Rank(handScore[1][0]).toString() + \"s\"\n elif handScore[0] == 0:\n return \"High Card \" + Rank(max(handScore[2])).toString()\n else:\n return \"Error in calculation\"\n\n def scoreHand(hand): # Returns (Score, Kicker, Hand Values)\n values = sorted([card.rank.value for card in hand.hand], reverse=True)\n suits = [card.suit.value for card in hand.hand]\n\n straight = (values == list(range(values[0], values[0] -5, -1))\n or values == [14, 5, 4, 3, 2])\n \n flush = all(suit == suits[0] for suit in suits)\n \n if straight and flush:\n return 8, values[1], values # Straight / Royal Flush\n if flush:\n return 5, values # Flush\n if straight:\n return 4, values[1], values # Straight\n\n trips = []\n pairs = []\n for v, group in itertools.groupby(values):\n count = sum(1 for i in group)\n if count == 4:\n return 7, v, values # Quads\n elif count == 3:\n trips.append(v)\n elif count == 2:\n pairs.append(v)\n\n if trips:\n return (6 if pairs else 3), trips, pairs, values # Full House / Trips\n\n return len(pairs), pairs, values # Two Pair / One Pair / High Card\n\n def compareHands(hand1, hand2):\n\n hand1Score = Hand.scoreHand(hand1)\n hand2Score = Hand.scoreHand(hand2)\n if hand1Score > hand2Score:\n return hand1\n elif hand2Score > hand1Score:\n return hand2\n else:\n return -1\n\n def score7(hand): # Score a hand of seven cards (e.g. holdem)\n if len(hand.hand) != 7:\n raise ValueError\n else:\n combinations = itertools.combinations(hand.hand, 5)\n maxScore = ()\n \n for combination in combinations:\n comboHand = Hand(combination)\n score = Hand.scoreHand(comboHand)\n if score >= maxScore:\n maxScore = score\n\n return maxScore\n\n\nclass Deck():\n\n standardDeck = [\n Card(rank=Rank(i), suit=Suit(j)) for i in range(2, 15) for j in range(4)\n ]\n\n def __init__(self):\n self.deck = Deck.standardDeck.copy()\n self.deckSize = len(self.deck)\n random.shuffle(self.deck)\n\n def randomCard(self):\n if not self.isEmpty():\n card = self.deck[0]\n del self.deck[0]\n self.deckSize -= 1\n return card\n\n def peek(self):\n return self.deck[0]\n\n def addCard(self, card):\n if not self.isFull():\n self.deck.append(card)\n self.deckSize += 1\n\n def isFull(self):\n return self.deckSize == len(Deck.standardDeck)\n\n def isEmpty(self):\n return self.deckSize == 0\n\n def shuffle(self):\n random.shuffle(self.deck)\n"
},
{
"alpha_fraction": 0.8545454740524292,
"alphanum_fraction": 0.8545454740524292,
"avg_line_length": 26.5,
"blob_id": "efc669a32e2228c50cdf0d8406bbfcff23e6c5a3",
"content_id": "2529bbdee83512ecb1f94a44e11310424bc7175a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 2,
"path": "/README.md",
"repo_name": "jamesm2w/CardEngine",
"src_encoding": "UTF-8",
"text": "# CardEngine\nPython based playing card representations\n"
}
] | 6 |
yiwanggecaonima/helloworld
|
https://github.com/yiwanggecaonima/helloworld
|
e80afa6a5a0527139a01693f9a8b177db47be134
|
726bd71094926fe6975cbc66f08a342fa093716a
|
ebf9c17fcfb0db94c8215ba197512a077f4df459
|
refs/heads/master
| 2018-11-05T02:29:28.165365 | 2018-08-27T01:55:05 | 2018-08-27T01:55:05 | 146,232,735 | 0 | 0 | null | 2018-08-27T01:42:01 | 2018-08-27T01:42:04 | 2018-08-27T01:51:26 | null |
[
{
"alpha_fraction": 0.761904776096344,
"alphanum_fraction": 0.761904776096344,
"avg_line_length": 20,
"blob_id": "4c3e086ff56e5856d9a79c107e1b85a53e4923c3",
"content_id": "fb8b92487360dafb16dcfea9d1688bc5979a352c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/caonima.py",
"repo_name": "yiwanggecaonima/helloworld",
"src_encoding": "UTF-8",
"text": "print('这是一句caonima')\n"
},
{
"alpha_fraction": 0.8703703880310059,
"alphanum_fraction": 0.8703703880310059,
"avg_line_length": 17,
"blob_id": "bf20852e2955bfd23f1379dab3b62d9533008b8b",
"content_id": "c7fb7c9b7a8323643533162e2f4db91571052cf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 3,
"path": "/README.md",
"repo_name": "yiwanggecaonima/helloworld",
"src_encoding": "UTF-8",
"text": "# helloworld\n这是一个测试你好世界!\n这是一个测试文件,第一次使用的github上上特别的紧张\n"
}
] | 2 |
LeoXiao001/job_todo_list
|
https://github.com/LeoXiao001/job_todo_list
|
74736177b023102f32b3c9bc33eb06a25d83a676
|
116273ed38bcab737adae866e1173c9eaab305c6
|
33e21aef68d971f08e85b1ae4b6a8be2a068bfc4
|
refs/heads/master
| 2022-12-07T13:03:53.346715 | 2020-04-23T04:58:33 | 2020-04-23T04:58:33 | 247,007,180 | 0 | 0 |
MIT
| 2020-03-13T06:56:52 | 2020-04-23T04:58:47 | 2022-11-22T05:52:04 |
CSS
|
[
{
"alpha_fraction": 0.6108779311180115,
"alphanum_fraction": 0.6112136840820312,
"avg_line_length": 31.551912307739258,
"blob_id": "0044eaf8e63a305d4447d4c16ed9e6f4eecf9fb7",
"content_id": "1c797747e51bd9ab9a1bf72220e29d407ccee452",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5957,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 183,
"path": "/todo_list/views.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate, login, get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.models import User\n\nfrom .forms import UserRegistrationForm, ListCreateForm, ItemCreateForm, UserEditForm, ProfileEditForm\nfrom .models import ToDoList, ToDoItem, Profile\n\n# def user_login(request):\n# if request.method == 'POST':\n# form = LoginForm(request.POST)\n# if form.is_valid():\n# cd = form.cleaned_data\n# user = authenticate(request,\n# username=cd['username'],\n# password=cd['password'])\n# if user is not None:\n# if user.is_active:\n# login(request, user)\n# return HttpResponse('Authenticated successfully')\n# else:\n# return HttpResponse('Disabled account')\n# else:\n# return HttpResponse('Invalid login')\n# else:\n# form = LoginForm()\n#\n# return render(request, 'todo_list/login.html', {'form': form})\n\n@login_required\ndef dashboard(request):\n return render(request,\n 'todo_list/dashboard.html',\n {'section': 'dashboard'})\n\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n new_user = user_form.save(commit=False)\n # user set_password to handle encryption for safety reason\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n Profile.objects.create(user=new_user)\n return render(request,\n 'registration/register_done.html',\n {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n\n return render(request, 'registration/register.html', {'form': user_form})\n\n\n@login_required\ndef todolist_create(request):\n if request.method == 'POST':\n form = ListCreateForm(data=request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n new_list = form.save(commit=False)\n new_list.user = request.user\n new_list.save()\n return redirect('dashboard')\n else:\n form = ListCreateForm()\n\n return render(request, 'todo_list/create_list.html', {'form': form})\n\n\n@login_required\ndef todoitem_create(request, list_id):\n list = ToDoList.objects.get(id=list_id)\n if list in request.user.lists.all():\n if request.method == 'POST':\n form = ItemCreateForm(data=request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n new_item = form.save(commit=False)\n new_item.list = list\n new_item.save()\n\n # return redirect('dashboard')\n return redirect(list)\n else:\n form = ItemCreateForm()\n\n return render(request, 'todo_list/create_item.html', {'form': form, 'list': list})\n\n else:\n return redirect('dashboard')\n\n\nclass TodoitemUpdateView(UpdateView):\n model = ToDoItem\n template_name = 'todo_list/item_update.html'\n fields = ['item_name', 'due_date', 'priority', 'description', 'notification', 'complete']\n\n\nclass TodoitemDeleteView(DeleteView):\n model = ToDoItem\n template_name = 'todo_list/item_confirm_delete.html'\n\n def get_success_url(self):\n list_id = ToDoItem.objects.get(pk=self.kwargs['pk']).list.id\n return reverse_lazy('list_detail', args=[str(list_id)])\n\n\nclass TodoitemDetailView(DetailView):\n model = ToDoItem\n template_name = 'todo_list/item_detail.html'\n\n\nclass TodolistDetailView(DetailView):\n model = ToDoList\n template_name = 'todo_list/list_detail.html'\n\n\nclass TodolistDeleteView(DeleteView):\n model = ToDoList\n template_name = 'todo_list/list_confirm_delete.html'\n success_url = reverse_lazy('dashboard')\n\n\n# class UserDetailView(DetailView):\n# model = User\n# template_name = 'todo_list/user_detail.html'\n#\n# def get_object(self):\n# return self.request.user\n\n@login_required\ndef user_detail(request):\n user = request.user\n return render(request, 'todo_list/user_detail.html', {'user': user})\n\n\n# class UserUpdateView(UpdateView):\n# model = User\n# template_name = 'todo_list/user_update.html'\n# fields = ['username', 'email']\n#\n# def get_object(self):\n# return self.request.user\n#\n# def get_success_url(self):\n# return reverse_lazy('user_detail')\n\n@login_required\ndef user_edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user,\n data=request.POST)\n profile_form = ProfileEditForm(instance=request.user.profile,\n data=request.POST,\n files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n return redirect('user_detail')\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n\n return render(request,\n 'todo_list/user_update.html',\n {'user_form': user_form,\n 'profile_form': profile_form})\n\n\nclass ItemsListView(ListView):\n model = ToDoItem\n context_object_name = 'items'\n template_name = 'todo_list/item_list.html'\n paginate_by = 30\n\n def get_queryset(self):\n return ToDoItem.objects.filter(list__user=self.request.user)\n"
},
{
"alpha_fraction": 0.6156174540519714,
"alphanum_fraction": 0.6180387139320374,
"avg_line_length": 26.53333282470703,
"blob_id": "73c1838b316f5f6bd205ee68ccbde1679b5d2a54",
"content_id": "a256cf17f2d04042258ca94bc147dd46af423e0e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1652,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 60,
"path": "/todo_list/forms.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n\nimport datetime\n\nfrom .models import ToDoList, ToDoItem, Profile\n\n\n# class LoginForm(forms.Form):\n# username = forms.CharField()\n# password = forms.CharField(widget=forms.PasswordInput)\n\n\nclass UserRegistrationForm(forms.ModelForm):\n password = forms.CharField(label='Passowrd',\n widget=forms.PasswordInput)\n password2 = forms.CharField(label='Repeat password',\n widget=forms.PasswordInput)\n\n class Meta:\n model = User\n fields = ('username', 'email')\n\n def clean_password2(self):\n cd = self.cleaned_data\n if cd['password'] != cd['password2']:\n raise forms.ValidationError('Passwords don\\'t match.')\n return cd['password2']\n\n\nclass UserEditForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('username', 'email')\n\n\nclass ProfileEditForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ('avatar',)\n\n\nclass ListCreateForm(forms.ModelForm):\n class Meta:\n model = ToDoList\n fields = {'list_name'}\n\n\nclass ItemCreateForm(forms.ModelForm):\n field_order = ['item_name', 'description', 'priority', 'due_date', 'notification']\n class Meta:\n model = ToDoItem\n fields = {'item_name', 'description', 'priority', 'due_date', 'notification'}\n widgets = {\n 'description': forms.Textarea,\n 'due_date': forms.DateInput,\n 'priority': forms.Select,\n 'notification': forms.CheckboxInput,\n }\n"
},
{
"alpha_fraction": 0.508849561214447,
"alphanum_fraction": 0.6991150379180908,
"avg_line_length": 16.842105865478516,
"blob_id": "be6b5eedb4008a7ccb5e641e7ad1a906792987f4",
"content_id": "6e0f92c8e9d720e60c8959dcfa397995e77d4ea0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 678,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 38,
"path": "/requirements.txt",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "amqp==2.5.2\nanyjson==0.3.3\nAPScheduler==3.6.3\nasgiref==3.2.5\nbilliard==3.6.3.0\nboto3==1.12.41\nbotocore==1.15.41\ncelery==4.4.2\ndj-database-url==0.5.0\nDjango==3.0.4\ndjango-appconf==1.0.3\ndjango-imagekit==4.0.2\ndjango-storages==1.9.1\ndjango-tempus-dominus==5.1.2.13\ndjango-widget-tweaks==1.4.8\ndocutils==0.15.2\ngunicorn==20.0.4\nimportlib-metadata==1.5.2\njmespath==0.9.5\nkombu==4.6.8\npilkit==2.0\nPillow==7.1.1\npsycopg2-binary==2.7.7\npython-dateutil==2.8.1\npython-decouple==3.3\npython-http-client==3.2.7\npytz==2019.3\nredis==3.4.1\ns3transfer==0.3.3\nsendgrid==6.2.2\nsix==1.14.0\nsqlparse==0.3.1\nsupervisor==4.1.0\ntzlocal==2.0.0\nurllib3==1.25.9\nvine==1.3.0\nwhitenoise==5.0.1\nzipp==3.1.0\n"
},
{
"alpha_fraction": 0.570493757724762,
"alphanum_fraction": 0.5841760635375977,
"avg_line_length": 41.025001525878906,
"blob_id": "9cd9fec5ed3d2e05abdeee06733652f4047ef670",
"content_id": "b337f90b176a1685d28ae6f364beb41c93a3dd3a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1681,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 40,
"path": "/todo_list/migrations/0001_initial.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.4 on 2020-03-19 04:36\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ToDoList',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('list_name', models.CharField(max_length=50)),\n ('create_date', models.DateField(auto_now_add=True)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lists', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='ToDoItem',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('item_name', models.CharField(max_length=50)),\n ('create_date', models.DateField(auto_now_add=True)),\n ('due_date', models.DateField()),\n ('priority', models.IntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3, 'High')], default=1)),\n ('description', models.TextField()),\n ('notification', models.BooleanField(default=False)),\n ('complete', models.BooleanField(default=False)),\n ('list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='todo_list.ToDoList')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.5569767355918884,
"alphanum_fraction": 0.559883713722229,
"avg_line_length": 30.290908813476562,
"blob_id": "52d7c4931361cf8ab7d691efc0bac4812f8c7324",
"content_id": "19e15a536f99f9443ec7f263e7da4d5bb7fe2460",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1720,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 55,
"path": "/todo_list/emails.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom django.conf import settings\n\nimport datetime\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nfrom .models import ToDoItem\n\n\nsched = BlockingScheduler()\n\[email protected]_job('interval', minutes=3)\ndef send_email_reminder():\n today = datetime.date.today()\n users = User.objects.all()\n # from_email = settings.EMAIL_HOST_USER\n from_email = settings.FROM_EMAIL\n for user in users:\n query = ToDoItem.objects. \\\n filter(list__user=user). \\\n filter(complete=False). \\\n filter(notification=True). \\\n filter(due_date__lte=(today+datetime.timedelta(days=20))). \\\n order_by('due_date', '-priority')\n\n if not query:\n continue\n\n to_email = [user.email]\n if len(query) > 1:\n subject = '{} job todo items are due soon'.format(len(query))\n else:\n subject = '1 job todo item is due soon'\n\n msg_html = render_to_string('todo_list/email.html',\n {\n 'username': user.username,\n 'query': query,\n 'SITE_URL': settings.SITE_URL,\n })\n email = EmailMessage(\n subject=subject,\n body=msg_html,\n from_email=from_email,\n to=to_email,\n )\n email.content_subtype = 'html'\n email.send(fail_silently=False)\n\n\nsched.start()"
},
{
"alpha_fraction": 0.41258278489112854,
"alphanum_fraction": 0.41920530796051025,
"avg_line_length": 34.9523811340332,
"blob_id": "7022661a35d1a620cb4bfff4b66001a55e9a4f30",
"content_id": "cc3c31b50c667ad4d310416338dcafbba8d8188f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1510,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 42,
"path": "/todo_list/templates/todo_list/create_item.html",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n{% load widget_tweaks %}\n\n{% block title %}Create Job Todo Item{% endblock title %}\n\n{% block content %}\n<!-- <h1>{{ list.list_name }}</h1>\n<h2>Create a job todo item</h2>\n<form action=\"?\" method=\"post\">\n {{ form.as_p }}\n {% csrf_token %}\n <input type=\"submit\" value=\"save item\">\n</form> -->\n\n<div class=\"content-wrapper\">\n <div class=\"row\">\n <div class=\"col-lg-12 grid-margin stretch-card\">\n <div class=\"card\">\n <div class=\"card-body\">\n <h4 class=\"card-title\">{{ list.list_name }}</h4>\n <p>Create a job todo item</p>\n <form class=\"forms-sample\" action=\"?\" method=\"post\">\n {% csrf_token %}\n {% include 'bs4_form.html' with form=form %}\n <script>\n $(function () {\n $(\"#id_due_date\").datetimepicker({\n format: 'Y-m-d',\n timepicker: false,\n });\n });\n </script>\n <button type=\"submit\" value=\"save item\" class=\"btn btn-primary mr-2\">Submit</button>\n <a class=\"btn btn-secondary btn-fw\" href=\"{{ request.META.HTTP_REFERER }}\">Cancel</a>\n </form>\n </div>\n </div>\n </div>\n </div>\n</div>\n\n{% endblock content %}\n"
},
{
"alpha_fraction": 0.6691762804985046,
"alphanum_fraction": 0.6700620055198669,
"avg_line_length": 46.04166793823242,
"blob_id": "3cc6bff1dd04150a9e2d58989e99301a2a2a52aa",
"content_id": "e1462676707f74121320092ad155caf0cb61d220",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2258,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 48,
"path": "/todo_list/urls.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom .views import TodoitemDetailView, TodolistDetailView, TodoitemUpdateView, TodoitemDeleteView, TodolistDeleteView, ItemsListView\nfrom . import views\n\n\nurlpatterns = [\n path('', views.dashboard, name='dashboard'),\n # list related\n path('create_list/', views.todolist_create, name='list_create'),\n path('list_detail/<int:pk>', TodolistDetailView.as_view(), name='list_detail'),\n path('list_delete/<int:pk>', TodolistDeleteView.as_view(), name='list_delete'),\n # item related\n path('create_item/<int:list_id>', views.todoitem_create, name='item_create'),\n path('item_detail/<int:pk>', TodoitemDetailView.as_view(), name='item_detail'),\n path('item_update/<int:pk>', TodoitemUpdateView.as_view(), name='item_update'),\n path('item_delete/<int:pk>', TodoitemDeleteView.as_view(), name='item_delete'),\n path('item_list/', ItemsListView.as_view(), name='item_list'),\n # user profile\n # path('user_profile/', UserDetailView.as_view(), name='user_detail'),\n # path('user_update/', UserUpdateView.as_view(), name='user_update'),\n path('user_profile/', views.user_detail, name='user_detail'),\n path('user_update/', views.user_edit, name='user_update'),\n # user login/out\n path('login/', auth_views.LoginView.as_view(), name='login'),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n # chagne password urls\n path('password_change/',\n auth_views.PasswordChangeView.as_view(),\n name='password_change'),\n path('password_change/done/',\n auth_views.PasswordChangeDoneView.as_view(),\n name='password_change_done'),\n # reset password urls\n path('password_reset/',\n auth_views.PasswordResetView.as_view(),\n name='password_reset'),\n path('password_reset/done/',\n auth_views.PasswordResetDoneView.as_view(),\n name='password_reset_done'),\n path('reset/<uidb64>/<token>/',\n auth_views.PasswordResetConfirmView.as_view(),\n name='password_reset_confirm'),\n path('reset/don/',\n auth_views.PasswordResetCompleteView.as_view(),\n name='password_reset_complete'),\n path('register/', views.register, name='register'),\n]\n"
},
{
"alpha_fraction": 0.6355019211769104,
"alphanum_fraction": 0.643257200717926,
"avg_line_length": 33.132354736328125,
"blob_id": "ed44b8eaf2b92add3218f383cdba44307b9a6f44",
"content_id": "cca6295119292b47e1127367d7890211b21da461",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2321,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 68,
"path": "/todo_list/models.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\n\nimport datetime\nfrom imagekit.models import ProcessedImageField, ImageSpecField\nfrom imagekit.processors import ResizeToFit\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n avatar = ProcessedImageField(upload_to='images',\n format='JPEG',\n processors=[ResizeToFit(100, 100)],\n options={'quality': 80})\n\n @property\n def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n\n\nclass ToDoList(models.Model):\n list_name = models.CharField(max_length=50)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='lists')\n create_date = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.list_name\n\n def get_absolute_url(self):\n return reverse('list_detail', args=[str(self.id)])\n\n\nclass ToDoItem(models.Model):\n PRIORITY_CHOICES = [\n (1, 'Low'),\n (2, 'Medium'),\n (3, 'High'),\n ]\n item_name = models.CharField(max_length=50)\n create_date = models.DateField(auto_now_add=True)\n due_date = models.DateField()\n priority = models.IntegerField(choices=PRIORITY_CHOICES, default=1)\n description = models.TextField()\n list = models.ForeignKey(ToDoList, on_delete=models.CASCADE, related_name='items')\n # notification; if True, send email to notify user\n notification = models.BooleanField(default=False)\n # complete; if True, turn off notification\n complete = models.BooleanField(default=False)\n\n def __str__(self):\n return self.item_name\n\n def get_absolute_url(self):\n return reverse('item_detail', args=[str(self.id)])\n\n def close_to_due_date(self):\n today = datetime.date.today()\n item_query = ToDoItem.objects. \\\n filter(list__user=self.list.user). \\\n filter(complete=False). \\\n filter(notification=True). \\\n filter(due_date__lte=(today+datetime.timedelta(days=20))). \\\n order_by('due_date', '-priority')\n\n return item_query\n"
},
{
"alpha_fraction": 0.7080152630805969,
"alphanum_fraction": 0.7080152630805969,
"avg_line_length": 26.63157844543457,
"blob_id": "ab58deb5a26289d8cc17a6d7fa7a09eb30a45014",
"content_id": "a3f398d38f0e6a732db90af2e76bed81a6009f87",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 524,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 19,
"path": "/todo_list/admin.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import ToDoList, ToDoItem, Profile\n\n\[email protected](ToDoList)\nclass ToDoListAdmin(admin.ModelAdmin):\n list_display = ['list_name', 'create_date']\n list_filter = ['create_date']\n\n\[email protected](ToDoItem)\nclass ToDoItem(admin.ModelAdmin):\n list_display = ['item_name', 'priority', 'due_date', 'create_date']\n list_filter = ['priority', 'due_date', 'create_date']\n\n\[email protected](Profile)\nclass ProfileAdmin(admin.ModelAdmin):\n list_display = ['user', 'avatar']"
},
{
"alpha_fraction": 0.639144241809845,
"alphanum_fraction": 0.6496037840843201,
"avg_line_length": 27.94495391845703,
"blob_id": "077eb5af2d787114862e1df4c0ec15c67df12a2c",
"content_id": "f6aa8d6588e167a20fde0a19bdc705dc1099ea9c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6310,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 218,
"path": "/job_todo_list/settings.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDjango settings for job_todo_list project.\n\nGenerated by 'django-admin startproject' using Django 3.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\nfrom decouple import config\nfrom celery.schedules import crontab\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\n# SECRET_KEY = '95&*rymw8t&&!z6!0%%fh$+cabaw^*6@fzp!thm37e)1#v!-*g'\nSECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '95&*rymw8t&&!z6!0%%fh$+cabaw^*6@fzp!thm37e)1#v!-*g')\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n# DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'\n\nALLOWED_HOSTS = ['job-todo-list.herokuapp.com', '127.0.0.1']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'todo_list.apps.TodoListConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'widget_tweaks',\n 'imagekit',\n 'storages',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'job_todo_list.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'job_todo_list.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'America/Los_Angeles'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n# MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\n\nAWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = config('AWS_STORAGE_BUCKET_NAME')\nAWS_S3_REGION_NAME = config('AWS_S3_REGION_NAME')\nDEFAULT_FILE_STORAGE = config('DEFAULT_FILE_STORAGE')\nAWS_DEFAULT_ACL = None\n\nLOGIN_REDIRECT_URL = 'dashboard'\nLOGIN_URL = 'login'\nLOGOUT_RUL = 'logout'\n\n# JobTodoList email\nEMAIL_HOST = config('EMAIL_HOST')\nEMAIL_HOST_USER = config('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')\nEMAIL_PORT = config('EMAIL_PORT', cast=int)\nEMAIL_USE_TLS = config('EMAIL_USE_TLS', cast=bool)\nFROM_EMAIL = config('FROM_EMAIL')\n# sendgrid eamil settings\nSENDGRID_USERNAME = config('SENDGRID_USERNAME')\nSENDGRID_PASSWORD = config('SENDGRID_PASSWORD')\nSENDGRID_API_KEY = config('SENDGRID_API_KEY')\n\n# Celery settings\n# CELERY_BROKER_URL = 'redis://localhost:6379'\n# CELERY_RESULT_BACKEND = 'redis://localhost:6379'\n# CELERY_BROKER_URL = config('CELERY_BROKER_URL')\n# CELERY_RESULT_BACKEND = config('CELERY_RESULT_BACKEND')\n# CELERY_ACCEPT_CONTENT = ['application/json']\n# CELERY_TASK_SERIALIZER = 'json'\n# CELERY_RESULT_SERIALIZER = 'json'\n# CELERY_TIMEZONE = TIME_ZONE\n# CELERY_BEAT_SCHEDULE = {\n# 'task_send_email_reminder': {\n# 'task': 'todo_list.tasks.task_send_email_reminder',\n# 'schedule': crontab(hour=1, minute=0),\n# # 'schedule': crontab(minute='*/5'),\n# }\n# }\n\nSITE_URL = config('SITE_URL')\n\n# Heroku: Update database configuration from $DATABASE_URL.\nimport dj_database_url\ndb_from_env = dj_database_url.config(conn_max_age=500)\nDATABASES['default'].update(db_from_env)\n\n# Simplified static file serving.\n# https://warehouse.python.org/project/whitenoise/\n# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# LOGGING = {\n# 'version': 1,\n# 'disable_existing_loggers': False,\n# 'formatters': {\n# 'verbose': {\n# 'format' : \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n# 'datefmt' : \"%d/%b/%Y %H:%M:%S\"\n# },\n# 'simple': {\n# 'format': '%(levelname)s %(message)s'\n# },\n# },\n# 'handlers': {\n# 'file': {\n# 'level': 'DEBUG',\n# 'class': 'logging.FileHandler',\n# 'filename': 'mysite.log',\n# 'formatter': 'verbose'\n# },\n# },\n# 'loggers': {\n# 'django': {\n# 'handlers':['file'],\n# 'propagate': True,\n# 'level':'DEBUG',\n# },\n# 'MYAPP': {\n# 'handlers': ['file'],\n# 'level': 'DEBUG',\n# },\n# }\n# }\n"
},
{
"alpha_fraction": 0.7340824007987976,
"alphanum_fraction": 0.7340824007987976,
"avg_line_length": 23.272727966308594,
"blob_id": "dc5a5cf30f6600595582f39530eeda3badf878b2",
"content_id": "00a73a8bc5f8da52ea88ab7246f4c61f06d8ba8f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 267,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 11,
"path": "/todo_list/tasks.py",
"repo_name": "LeoXiao001/job_todo_list",
"src_encoding": "UTF-8",
"text": "from celery import task\nfrom .emails import send_email_reminder\n\n\"\"\"\nsupervisor will shut down if the website has no traffic,\nso the scheduler doesn't work.\nChange to use Heroku APScheduler.\n\"\"\"\n# @task()\n# def task_send_email_reminder():\n# send_email_reminder()\n"
}
] | 11 |
jtyers/assume-role
|
https://github.com/jtyers/assume-role
|
cb4661a5c73e550317cd5c68bce1e3822272ae92
|
08ac28e672ce3c6615f0bc6a25108f180a07c135
|
a0aaea5823b09a155fd3c5e3d84c12e0cb5b13ce
|
refs/heads/main
| 2023-07-08T06:43:25.874415 | 2021-08-12T12:51:19 | 2021-08-12T12:51:19 | 395,317,467 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6145099997520447,
"alphanum_fraction": 0.6193827986717224,
"avg_line_length": 26.16176414489746,
"blob_id": "2ec5f0f7ccf7e7ae3400242367452277fb7345b3",
"content_id": "5e2e7fb57937596960b2da6221120bb47648b1f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1847,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 68,
"path": "/assume-role.py",
"repo_name": "jtyers/assume-role",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport configparser\nimport colorama\nimport json\nimport os\n#import nuclear\nimport subprocess\nimport sys\n\ndef msg(*a):\n print(colorama.Fore.GREEN, *a, colorama.Style.RESET_ALL, file=sys.stderr)\n\ndef warn(*a):\n print(colorama.Fore.RED, *a, colorama.Style.RESET_ALL, file=sys.stderr)\n\ndef main(profile, cmd=os.environ['SHELL']):\n config = configparser.RawConfigParser()\n config.read(os.path.expanduser('~/.aws/config'))\n\n try:\n role_arn = config.get('profile '+profile, 'role_arn')\n\n except configparser.NoSectionError:\n warn('profile', profile, 'does not exist')\n sys.exit(1)\n\n except configparser.NoOptionError:\n warn('profile', profile, 'has no role_arn defined')\n sys.exit(1)\n\n p = subprocess.run([\n 'aws', 'sts', 'assume-role',\n '--role-arn', role_arn,\n '--role-session-name', 'assume-role-session',\n ], capture_output=True)\n \n try:\n p.check_returncode()\n\n except subprocess.CalledProcessError as e:\n warn('assume-role for profile', profile, 'failed')\n warn(e.stderr.decode('utf-8'))\n sys.exit(1)\n\n response = json.loads(p.stdout)\n msg(profile, ': running', cmd)\n\n p = subprocess.run(cmd, env={\n **os.environ,\n 'AWS_ACCESS_KEY_ID': response['Credentials']['AccessKeyId'],\n 'AWS_SECRET_ACCESS_KEY': response['Credentials']['SecretAccessKey'],\n 'AWS_SESSION_TOKEN': response['Credentials']['SessionToken'],\n 'AWS_SESSION_EXPIRATION': response['Credentials']['Expiration'],\n 'AWS_PROFILE': profile,\n 'AWS_ASSUMED_ROLE_ARN': response['AssumedRoleUser']['Arn'],\n })\n\n sys.exit(p.returncode)\n\nif __name__ == '__main__':\n colorama.init()\n\n if len(sys.argv) > 2:\n main(sys.argv[1], sys.argv[2:])\n\n else:\n main(sys.argv[1])\n"
},
{
"alpha_fraction": 0.5961538553237915,
"alphanum_fraction": 0.5961538553237915,
"avg_line_length": 12,
"blob_id": "08e115c1bf27b83ada18354e741ab2cf5c2d6ae9",
"content_id": "f2137bda1d2c7186ab54be7df781e1d91d785435",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 4,
"path": "/assume-role",
"repo_name": "jtyers/assume-role",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nset -eu\npipenv run ./assume-role.py \"$@\"\n"
},
{
"alpha_fraction": 0.7570093274116516,
"alphanum_fraction": 0.7570093274116516,
"avg_line_length": 34.66666793823242,
"blob_id": "55b30e95033d54a311415614c863cb3e7132ef7f",
"content_id": "44dcbd3e51e4c5103c0da4e1dc75c1b0ceb350bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 3,
"path": "/README.md",
"repo_name": "jtyers/assume-role",
"src_encoding": "UTF-8",
"text": "# assume-role\n\nSimple python script which allows you to assume an AWS role configured via `~/.aws/config`.\n"
}
] | 3 |
MarcoGlez/Mision-03
|
https://github.com/MarcoGlez/Mision-03
|
5189a1544605c07b74d27ab8984a7a208d979f36
|
8a20f7856e0fc8776634fc28e16a8ce784e8df65
|
42dd3e1c43c175c60d75b96a7380111722a8529e
|
refs/heads/master
| 2020-03-27T17:00:52.238035 | 2018-08-31T02:55:42 | 2018-08-31T02:55:42 | 146,823,237 | 0 | 0 | null | 2018-08-31T00:59:55 | 2018-08-29T13:48:07 | 2018-08-30T20:15:24 | null |
[
{
"alpha_fraction": 0.7117552161216736,
"alphanum_fraction": 0.7206119298934937,
"avg_line_length": 37.8125,
"blob_id": "256de6fc945d42475e30c19a9406494093aab952",
"content_id": "dfbe6d21afb826e3507b97301a76e1a4982a2a68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1248,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 32,
"path": "/datosDeUnTrapecio.py",
"repo_name": "MarcoGlez/Mision-03",
"src_encoding": "UTF-8",
"text": "#Autor: Marco González Elizalde\n'''Proposito: Calcular e imprimir el area y perimetro de un trapecio isósceles\npreguntando al usuario los valores de la base mayor, base menor y altura'''\n\n#Calcula el area del trapecio isoceles, regresa valor del area\ndef calcularArea(baseMayor,baseMenor,altura):\n area = (baseMayor + baseMenor) /2 *altura\n return area\n\n#Calcula el valor de un lado del trapecio y del perimetro, regresa valor del perímetro\ndef calcularPerimetro(baseMayor,baseMenor,altura):\n # Calcula el lado del trapecio utilizando el teorema de pitagoras\n lado = ((altura **2) + (((baseMayor - baseMenor)/2) **2)) **0.5\n \n perimetro = baseMayor + baseMenor + (lado *2)\n return perimetro\n\n'''Pregunta al usuario los valores de las bases y altura del trapecio,\ncalcula y despues imprime el valor del área y del perímetro'''\ndef main():\n baseMayor = float(input(\"Escribe la longitud de la base mayor: \"))\n baseMenor = float(input(\"Escribe la longitud de la base mmenor: \"))\n altura = float(input(\"Escribe la altura: \"))\n\n area = calcularArea(baseMayor,baseMenor,altura)\n perimetro = calcularPerimetro(baseMayor,baseMenor,altura)\n\n print(\"\"\"Área: %.02f\nPerímetro: %.02f\"\"\" %(area, perimetro))\n\n#Corre el programa\nmain()\n"
},
{
"alpha_fraction": 0.7334536910057068,
"alphanum_fraction": 0.7454873919487,
"avg_line_length": 31.58823585510254,
"blob_id": "274b2bfc4853ae5f2da2e829c8819c6a96951ec6",
"content_id": "05af2540adc28fb6f81716daae9762e308be26e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1667,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 51,
"path": "/rendimientoDeUnAuto.py",
"repo_name": "MarcoGlez/Mision-03",
"src_encoding": "UTF-8",
"text": "# Autor: Marco González Elizalde\n'''Proposito: Calcular el rendimiento de un auto en Km/L y en Mi/Gal\ny dar la cantidad de gasolina necesaria para un viaje de ciertos Km'''\n\n\n# Calcula el rendimiento del coche en Km/L\ndef calcularRendimientoKm(kmRecorridos, litrosUsados):\n rendimientoEnKm = kmRecorridos / litrosUsados\n return rendimientoEnKm\n\n\n# Convierte el valor del rendimiento en Km/L a Mi/Gal\ndef convertirAMillasPorGalon(rendimientoEnKm):\n rendimientoEnMi = rendimientoEnKm / 1.6093 / 0.264\n return rendimientoEnMi\n\n\n# Calcula la cantidad de gasolina necesaria para realizar un viaje de ciertos Km\ndef calcularGasNecesaria(kmDeViaje, rendimientoEnKm):\n gasNecesaria = kmDeViaje / rendimientoEnKm\n return gasNecesaria\n\n\n'''Pregunta por los km recorridos por el usuario, la gasolina utilizada\ny la extension del viaje, regresa el rendimiento en Km/L y Mi/Gal, y\nla gasolina necesaria para el viaje'''\n\n\ndef main():\n kmRecorridos = float(input(\"Teclea el número de km recorridos: \"))\n litrosUsados = float(input(\"Teclea el número de litros de gasolina usados: \"))\n print(\"\")\n\n rendimientoEnKm = calcularRendimientoKm(kmRecorridos, litrosUsados)\n rendimientoEnMi = convertirAMillasPorGalon(rendimientoEnKm)\n\n print(\"\"\"Si recorres 475 kms con 17 litros de gasolina, el rendimiento es:\n%.02f km/l\n%.02f mi/gal\n\"\"\" % (rendimientoEnKm, rendimientoEnMi))\n\n kmDeViaje = int(input(\"¿Cuántos kilómetros vas a recorrer? \"))\n print(\"\")\n gasRequerida = calcularGasNecesaria(kmDeViaje, rendimientoEnKm)\n\n print(\"Para recorrer %d km. necesitas %.02f litros de gasolina\"\n % (kmDeViaje, gasRequerida))\n\n\n# Ejecuta el programa\nmain()\n"
},
{
"alpha_fraction": 0.6997411847114563,
"alphanum_fraction": 0.7109577059745789,
"avg_line_length": 34.121212005615234,
"blob_id": "baa77bc0c555fa9a1d75fc8307624ca4492787e2",
"content_id": "d42d5b3c73a38b32738eea5cf80ea8ef2d6bccbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1159,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 33,
"path": "/pagoDeUnTrabajador.py",
"repo_name": "MarcoGlez/Mision-03",
"src_encoding": "UTF-8",
"text": "#Autor: Marco González Elizalde\n'''Proposito: Calcula el pago de un trabajador basado en las horas\nnormales y extras trabajadas, considerando la cantidad que se le paga por hora'''\n\n#Calcula el pago de las horas normales trabajadas\ndef pagoNormal(horasNormales, pagoXHora):\n pagoNormal = horasNormales * pagoXHora\n return pagoNormal\n\n#Calcula el pago de las horas extras trabajadas, 1 hora extra = 1.85 hora normal\ndef pagoExtra(horasExtra, pagoXHora):\n pagoExtra = horasExtra * pagoXHora * 1.85\n return pagoExtra\n\n'''Pregunta por el pago por hora, horas normales y extra trabajadas\ne imprime el pago normal extra y total'''\ndef main():\n horasNormales = float(input(\"Teclea las horas normales trabajadas: \"))\n horasExtra = float(input(\"Teclea las horas extras trabajadas: \"))\n pagoXHora = float(input(\"Teclea el pago por hora: \"))\n print(\"\")\n\n pago_normal = pagoNormal(horasNormales, pagoXHora)\n pago_extra = pagoExtra(horasExtra, pagoXHora)\n pagoTotal = pago_normal + pago_extra\n\n print(\"\"\"Pago normal: $%.02f\nPago extra: $%.02f\n-----------------------\nPago total: $%.02f\"\"\" %(pago_normal,pago_extra,pagoTotal))\n\n#Corre el programa\nmain()\n"
},
{
"alpha_fraction": 0.6953316926956177,
"alphanum_fraction": 0.7174447178840637,
"avg_line_length": 37.761905670166016,
"blob_id": "d0f176a7b8cef996e4f5307a5ea796dc246c156d",
"content_id": "e5895b1af197c122a0e190802ebab350e5d2d4a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 21,
"path": "/asientosEnUnEstadio.py",
"repo_name": "MarcoGlez/Mision-03",
"src_encoding": "UTF-8",
"text": "#Autor: Marco González Elizalde A0137627\n'''Propósito: Calcular el total a pagar dependiendo del tipo y cantidad de boletos\nque se pidan e imprime el resultado'''\n\n#Calcula el total a pagar dependiento del numero y tipo de boletos\ndef calcularPago(asientosA,asientosB,asientosC):\n costoA = asientosA * 925\n costoB = asientosB * 775\n costoC = asientosC * 360\n totalPago = costoA + costoB + costoC\n return totalPago\n\n#Pide la cantidad de boletos de cada tipo que se va a pedir e imprime el total a pagar\ndef main():\n asientosA = float(input(\"Número de boletos de clase A: \"))\n asientosB = float(input(\"Número de boletos de clase B: \"))\n asientosC = float(input(\"Número de boletos de clase C: \"))\n print(\"El costo total es: $%.02f\" %(calcularPago(asientosA,asientosB,asientosC)))\n\n#Corre el programa\nmain()\n"
}
] | 4 |
aashifb/python
|
https://github.com/aashifb/python
|
06a4363284c150bcd7caf6b5c38b308c0a63859f
|
a8d82bd78473ba03d5434878a4661419be8284cd
|
e2a7d68a669e0d7f2846b08057df22a601c36355
|
refs/heads/master
| 2018-12-08T23:43:37.315004 | 2018-11-28T07:44:52 | 2018-11-28T07:44:52 | 142,745,392 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7179487347602844,
"alphanum_fraction": 0.7179487347602844,
"avg_line_length": 12,
"blob_id": "23cf88ed1673e7af85a8ffb0c2c9b8aaf33b403d",
"content_id": "ce93a57335c778a806831d94e89afd6cefab8c25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 39,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 3,
"path": "/README.md",
"repo_name": "aashifb/python",
"src_encoding": "UTF-8",
"text": "# python\n\nThese are my work in Python.\n"
},
{
"alpha_fraction": 0.5724802613258362,
"alphanum_fraction": 0.5953687429428101,
"avg_line_length": 49.14093780517578,
"blob_id": "59e7c714ac4eacbb2e771751877292451569763a",
"content_id": "3c44303ef396c2e1e3bf48d9628a0bd20033b1b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7471,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 149,
"path": "/ops_getasanastats",
"repo_name": "aashifb/python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport subprocess\nimport json\nimport re\nimport datetime\nimport argparse\nimport requests\nimport time\nfrom requests.exceptions import ConnectionError\n\n#Global variables in use\nASANA_TOKEN = open('/path/of/.asana_token','r').read().rstrip('\\n')\nASANA_REQUESTS_HEADER = {\"Authorization\":\"Bearer \"+ASANA_TOKEN}\nASANA_REQUESTS_URL_PART1 = None\nREPORT = dict()\nCUSTOM_FIELDS_ALL = ['Unset_Problem_Type']\nCUSTOM_FIELDS_REPORTED = []\nTHIRD_LEVEL_URL = \"https://app.asana.com/0/8551066337564/\"\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Generates stats of issues reported in Asana 3rd level and feeds metrics\")\n group1 = parser.add_argument_group('Feeds metrics')\n group2 = parser.add_argument_group('User command')\n group1.add_argument(\"--metrics\", action=\"store_true\", help=\"Send data to metrics\")\n group2.add_argument(\"-d\",\"--date\", help=\"Format: YYYY-MM-DD\")\n group2.add_argument(\"--ptype\", help=\"Use PTYPE as all OR <Problem Type>\")\n group2.add_argument(\"--categories\", action=\"store_true\", help=\"lists all categories of problem types\")\n args = parser.parse_args()\n\n if args.metrics == True and (args.date != None or args.ptype != None or args.categories != False):\n parser.error('--metrics must be given alone')\n elif args.categories == True and (args.date != None or args.ptype != None or args.metrics != False):\n parser.error('--categories must be given alone')\n elif len([x for x in (args.date,args.ptype) if x is not None]) == 1:\n parser.error('--date and --ptype must be given together')\n\n try:\n CUSTOM_FIELDS_RAW = json.loads(requests.get(\"https://app.asana.com/api/1.0/projects/8551066337564/\",headers=ASANA_REQUESTS_HEADER,timeout=5).text) ['data']['custom_field_settings'][2]['custom_field']['enum_options']\n except (ValueError, ConnectionError) as ERROR:\n print ('Error occurred while retrieving custom fields', ERROR)\n exit (1)\n else:\n for VALUES in CUSTOM_FIELDS_RAW:\n CUSTOM_FIELDS_ALL.append(VALUES.get('name'))\n if args.metrics:\n push_data_into_metrics()\n print ('Metrics updated successfully!!')\n elif args.categories:\n for PROBLEM_TYPE in CUSTOM_FIELDS_ALL:\n print (PROBLEM_TYPE)\n elif not args.date and not args.ptype:\n print ('ops_get_asana_stats --help')\n else:\n try:\n datetime.datetime.strptime(args.date, '%Y-%m-%d')\n except ValueError:\n print ('Invalid date format, should be YYYY-MM-DD')\n exit (1)\n else:\n DATE = args.date\n global ASANA_REQUESTS_URL_PART1\n ASANA_REQUESTS_URL_PART1 = 'https://app.asana.com/api/1.0/workspaces/661439586593/tasks/search?projects.any=8551066337564&created_on='+str(DATE)\n if args.ptype == 'all':\n print ('Gathering stats, please hold on...' '\\n')\n for status in ['true', 'false']:\n create_report_based_on_problem_type(status)\n print (json.dumps(REPORT, indent=4, sort_keys=False))\n elif args.ptype not in CUSTOM_FIELDS_ALL:\n print ('Invalid Problem Type')\n else:\n print ('Gathering stats, please hold on...' '\\n')\n for status in ['true', 'false']:\n create_report_based_on_problem_type(status)\n if args.ptype not in REPORT:\n print ('No issues reported for %s' % args.ptype)\n else:\n print (args.ptype)\n print (json.dumps(REPORT[args.ptype], indent=4, sort_keys=False))\n\n\ndef push_data_into_metrics():\n DATE = datetime.date.today() - datetime.timedelta(days=1)\n global ASANA_REQUESTS_URL_PART1\n ASANA_REQUESTS_URL_PART1 = 'https://app.asana.com/api/1.0/workspaces/661439586593/tasks/search?projects.any=8551066337564&created_on='+str(DATE)\n for status in ['true', 'false']:\n create_report_based_on_problem_type(status)\n for KEY, VALUE in REPORT.items():\n ISSUECOUNT = (str(KEY)+str(':')+str(dict(VALUE.items())['count']))\n METRIC = subprocess.Popen((['echo', 'asana.3rdlevel.'+str(ISSUECOUNT)+'|g']), stdout=subprocess.PIPE, shell=False)\n for j in range(5):\n subprocess.check_output(('nc', '-w', '2', '-u', 'metrics.env.tools-cph3.one.com', '8125'), stdin=METRIC.stdout, shell=False)\n for PROBLEM_TYPE in CUSTOM_FIELDS_ALL:\n if PROBLEM_TYPE not in REPORT:\n METRIC = subprocess.Popen((['echo', 'asana.3rdlevel.'+PROBLEM_TYPE+':0|g']), stdout=subprocess.PIPE, shell=False)\n for j in range(5):\n subprocess.check_output(('nc', '-w', '2', '-u', 'metrics.env.tools-cph3.one.com', '8125'), stdin=METRIC.stdout, shell=False)\n\n\ndef create_report_based_on_problem_type(status):\n ASANA_REQUESTS_URL_PART2 = str('&custom_fields.744920550438275.is_set=')+str(status)\n try:\n TASK_LIST_IN_JSON = json.loads(requests.get(ASANA_REQUESTS_URL_PART1+ASANA_REQUESTS_URL_PART2,headers=ASANA_REQUESTS_HEADER,timeout=5).text)\n except (ValueError, ConnectionError) as ERROR:\n print ('Error occurred while retrieving 3rd level task list', ERROR)\n exit (1)\n else:\n if status == 'true':\n for task in TASK_LIST_IN_JSON['data']:\n PROBLEM_TYPE = identify_problem_type(task['id'])\n if PROBLEM_TYPE in REPORT:\n REPORT[PROBLEM_TYPE]['count'] += 1\n REPORT[PROBLEM_TYPE]['id'].append(THIRD_LEVEL_URL+str(task['id']))\n\n else:\n REPORT[PROBLEM_TYPE] = {}\n REPORT[PROBLEM_TYPE]['count'] = 1\n REPORT[PROBLEM_TYPE]['id'] = [THIRD_LEVEL_URL+str(task['id'])]\n\n if status == 'false':\n for task in TASK_LIST_IN_JSON['data']:\n if 'Unset_Problem_Type' in REPORT:\n REPORT['Unset_Problem_Type']['count'] += 1\n REPORT['Unset_Problem_Type']['id'].append(THIRD_LEVEL_URL+str(task['id']))\n else:\n REPORT['Unset_Problem_Type'] = {}\n REPORT['Unset_Problem_Type']['count'] = 1\n REPORT['Unset_Problem_Type']['id'] = [THIRD_LEVEL_URL+str(task['id'])]\n\n\ndef identify_problem_type(ID):\n for CUSTOM_FIELD_POSITION in range(10):\n try:\n CUSTOM_FIELD_ID = json.loads(requests.get(str(\"https://app.asana.com/api/1.0/tasks/\")+str(ID),headers=ASANA_REQUESTS_HEADER,timeout=5).text) ['data']['custom_fields'][CUSTOM_FIELD_POSITION]['id']\n if CUSTOM_FIELD_ID == 744920550438275:\n try:\n PROBLEM_TYPE = json.loads(requests.get(str(\"https://app.asana.com/api/1.0/tasks/\")+str(ID),headers=ASANA_REQUESTS_HEADER,timeout=5).text)['data']['custom_fields'][CUSTOM_FIELD_POSITION]['enum_value']['name']\n return PROBLEM_TYPE\n break\n except (ValueError, ConnectionError) as ERROR:\n print ('Error occurred while retrieving problem type', ERROR)\n exit (1)\n except (ValueError, ConnectionError) as ERROR:\n print ('Error occurred while retrieving custom field id', ERROR)\n exit (1)\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 2 |
RMWinslow/LittleScripts
|
https://github.com/RMWinslow/LittleScripts
|
2a9fe838982d8591de53818369dcf6f09e78cc62
|
7ddaea3a4d5af74cee4fe6aa9d36cd128729393b
|
d5cf4b262ca762ec0a6d01d318a295a1663a996e
|
refs/heads/master
| 2020-05-31T23:42:38.182216 | 2019-10-07T03:47:20 | 2019-10-07T03:47:20 | 190,501,281 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6353503465652466,
"alphanum_fraction": 0.6576433181762695,
"avg_line_length": 30.94915199279785,
"blob_id": "fcfc2f881903a3a3bd96d811ff216f5391974235",
"content_id": "d358f7c700953461146cddd7617ad7c9739514db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1884,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 59,
"path": "/Mandelbrot/Mandelbrot.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 00:24:02 2019\n@author: RobertWinslow\nSimple Mandelbrot visualizer. All the parameters are global variables. Change those then run script. \n\"\"\"\n\nfrom PIL import Image\nfrom colorsys import hsv_to_rgb\nimport numpy as np\n\nmaxIterations = 200\nminX = -2\nmaxX = 1\nminY = -1.5\nmaxY = 1.5\npixelSize = 0.0005\n\nimgWidth = int((maxX-minX)/pixelSize)\nimgHeight = int((maxY-minY)/pixelSize)\n\n'''Create an empty 2d array of the appropriate size, then perform the following.\nFor each pixel, get the cooresponding complex coordinate, c, then iterate using:\n z(0) = 0. z(n+1) = z(n)^2 + c\nIf the iteration exceeds 2 in magnitude, then quit and record the number of iterations.\nIf the iteration reaches the maximum, then assume the point is in the set; leave as max.\nNote that when determining c, pixel zero starts at the upper left of the image.''' \niterationArray = np.zeros([imgHeight,imgWidth])\nfor pixelX in range(imgWidth):\n for pixelY in range(imgHeight):\n c = complex(minX+pixelSize*pixelX,maxY-pixelSize*pixelY)\n Z = c\n i = 1\n while abs(Z) < 2:\n Z = Z*Z + c\n i += 1\n if i > maxIterations:\n break\n iterationArray[pixelY][pixelX] = i\n \n \ndef colorMapping(iterations,maxIterations):\n '''Takes in the number of iterations and transforms it into rgb values'''\n value = iterations / maxIterations\n if value > 1:\n value = 0\n return tuple(int(i*255) for i in hsv_to_rgb(1, 0, value))\n\n\ndef create_image(w,h,name):\n img = Image.new('RGB', size=(w, h))\n pix = img.load()\n for x in range(w):\n for y in range(h):\n #print(colorMapping(iterationArray[y][x],maxIterations))\n pix[x,y] = colorMapping(iterationArray[y][x],maxIterations)\n img.save(name)\n \ncreate_image(imgWidth,imgHeight,\"bigTest.png\")"
},
{
"alpha_fraction": 0.5464363098144531,
"alphanum_fraction": 0.5711542963981628,
"avg_line_length": 29.423357009887695,
"blob_id": "7f3227550e6894ee447bdd6502d11578db0e4007",
"content_id": "583fc753956fa432ddb6fd8046cdf70904b183e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4215,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 137,
"path": "/castleGenerator/castleGen.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 30 12:28:21 2019\n@author: robertwinslow\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom random import shuffle, choice\n\nsuits = [\"♥\",\"♦\",\"♣\",\"♠\"]\nranks = [\"A\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"X\",\"J\",\"Q\",\"K\"]\njokers = [\"★★\",\"☆☆\"]\n\ncards = [suit+rank for suit in suits for rank in ranks] + jokers\ndeck = cards.copy()\nshuffle(deck)\n\n#define two mappings to establish a bijection between position and card\ncard_to_pos = {}\npos_to_card = {}\n\ndef place_card(card,position):\n card_to_pos[card] = position\n pos_to_card[position] = card\n deck.remove(card)\n \n#initialize castle position\nplace_card(\"★★\",(0,0))\nplace_card(\"♥2\",(-1,0))\nplace_card(\"♣2\",(1,0))\nplace_card(\"♠2\",(0,-1))\nplace_card(\"♦2\",(0,1))\n\n#%%functions for manipulating card position\n\ndef find_valid_positions(newcard):\n possiblePositions = set()\n #chack adjacency of all cards already placed.\n for card in card_to_pos:\n if card[0] == newcard[0] or card[1] == newcard[1] or newcard == \"☆☆\" :\n #valid adjacency\n x = card_to_pos[card][0]\n y = card_to_pos[card][1]\n for newpos in [(x,y+1),(x,y-1),(x+1,y),(x-1,y)]:\n if newpos not in pos_to_card:\n possiblePositions.add(newpos)\n #add ground spaces\n minX = min([card_to_pos[card][0] for card in card_to_pos])\n maxX = max([card_to_pos[card][0] for card in card_to_pos])\n possiblePositions.add((maxX+2,0))\n possiblePositions.add((minX-2,0))\n \n return possiblePositions\n\n\n\ndef find_valid_positions_v2(newcard):\n possiblePositions = []\n \n #check for extendible floors\n for card in card_to_pos:\n if card[0] == newcard[0] or card[1] == newcard[1] or newcard == \"☆☆\" :\n #valid adjacency\n x = card_to_pos[card][0]\n y = card_to_pos[card][1]\n for newpos in [(x,y+1),(x,y-1),(x+1,y),(x-1,y)]:\n if newpos not in pos_to_card:\n possiblePositions.append(newpos)\n \n #check for staircases\n stairranks = \"234567\"\n if newcard[1] in stairranks:\n for card in card_to_pos:\n if card[1] in stairranks:\n if card[0] == newcard[0] or card[1] == newcard[1] or newcard == \"☆☆\" :\n x = card_to_pos[card][0]\n y = card_to_pos[card][1]\n for newpos in [(x,y+1),(x,y-1)]:\n if newpos not in pos_to_card:\n #I like stairs, so I'm giving this extra weight.\n possiblePositions.append(newpos)\n possiblePositions.append(newpos)\n \n \n #add ground spaces\n minX = min([card_to_pos[card][0] for card in card_to_pos])\n maxX = max([card_to_pos[card][0] for card in card_to_pos])\n possiblePositions.append((maxX+2,0))\n possiblePositions.append((minX-2,0))\n #possiblePositions.append((maxX+3,0))\n #possiblePositions.append((minX-3,0))\n \n \n return possiblePositions\n\n\n\ndef random_placement(newcard):\n positions = list(find_valid_positions_v2(newcard))\n shuffle(positions)\n return positions[0]\n\n#%%generate castle by placing cards\ncx = -15\nwhile deck != []:\n place_card(deck[0],random_placement(deck[0]))\n \n\n#%% Draw the castle by loading all the cards into a grid\n#get bounds\nminX = min([card_to_pos[card][0] for card in card_to_pos])\nmaxX = max([card_to_pos[card][0] for card in card_to_pos])\nminY = min([card_to_pos[card][1] for card in card_to_pos])\nmaxY = max([card_to_pos[card][1] for card in card_to_pos])\nXoffset = -minX\nYoffset = -minY\nwidth = (maxX-minX+1)\nheight = (maxY-minY+1)\n \n#draw the air/canvas\ncanvas = [[\" \" for x in range(width)] for y in range(height)]\n#draw the ground\nfor y in range(Yoffset):\n for x in range(width):\n canvas[y][x] = \"████\"\n \nfor card in card_to_pos:\n x = card_to_pos[card][0]+Xoffset\n y = card_to_pos[card][1]+Yoffset\n canvas[y][x] = \"[\"+card+\"]\"\n \n#render canvas\nfor row in reversed(canvas):\n for cell in row:\n print(cell, end='')\n print()"
},
{
"alpha_fraction": 0.46236559748649597,
"alphanum_fraction": 0.5913978219032288,
"avg_line_length": 14.333333015441895,
"blob_id": "facb0cf3e9e26861f907fc6c179927eb4d00f64e",
"content_id": "90c86ed5939c0d894c94889faf3b1504eeb68158",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/SpotifyScraper/spotifyVisualizer.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 2 01:37:14 2019\n\n@author: RobertWinslow\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.5797023773193359,
"alphanum_fraction": 0.5917159914970398,
"avg_line_length": 34.36942672729492,
"blob_id": "cdc448719292d4dbcccf648c871d7d5a5688e171",
"content_id": "dde2a394126fa4e16c81e458789c8af584db69d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5577,
"license_type": "no_license",
"max_line_length": 736,
"num_lines": 157,
"path": "/SpotifyScraper/spotifyScraper.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 13 16:32:10 2019\n\n@author: RobertWinslow\n\"\"\"\n\nimport urllib.request\nimport re\nimport datetime\nimport time\nfrom pandas import date_range\nfrom bs4 import BeautifulSoup as bSoup\nimport csv\nimport codecs\nfrom multiprocessing import Pool\nimport sys\n\ntoday = datetime.date.today()\ndates = date_range(start='2017-01-01', end=today)[:-1] #don't include today because that data doesn't exist yet.\n\ncountries = [\"global\",\"us\",\"gb\",\"ad\",\"ar\",\"at\",\"au\",\"be\",\"bg\",\"bo\",\"br\",\"ca\",\"ch\",\"cl\",\"co\",\"cr\",\"cy\",\"cz\",\"de\",\"dk\",\"do\",\"ec\",\"ee\",\"es\",\"fi\",\"fr\",\"gr\",\"gt\",\"hk\",\"hn\",\"hu\",\"id\",\"ie\",\"il\",\"in\",\"is\",\"it\",\"jp\",\"lt\",\"lu\",\"lv\",\"mc\",\"mt\",\"mx\",\"my\",\"ni\",\"nl\",\"no\",\"nz\",\"pa\",\"pe\",\"ph\",\"pl\",\"pt\",\"py\",\"ro\",\"se\",\"sg\",\"sk\",\"sv\",\"th\",\"tr\",\"tw\",\"uy\",\"vn\",\"za\"]\ncountryFullnames = [\"Global\",\"United States\",\"United Kingdom\",\"Andorra\",\"Argentina\",\"Austria\",\"Australia\",\"Belgium\",\"Bulgaria\",\"Bolivia\",\"Brazil\",\"Canada\",\"Switzerland\",\"Chile\",\"Colombia\",\"Costa Rica\",\"Cyprus\",\"Czech Republic\",\"Germany\",\"Denmark\",\"Dominican Republic\",\"Ecuador\",\"Estonia\",\"Spain\",\"Finland\",\"France\",\"Greece\",\"Guatemala\",\"Hong Kong\",\"Honduras\",\"Hungary\",\"Indonesia\",\"Ireland\",\"Israel\",\"India\",\"Iceland\",\"Italy\",\"Japan\",\"Lithuania\",\"Luxembourg\",\"Latvia\",\"Monaco\",\"Malta\",\"Mexico\",\"Malaysia\",\"Nicaragua\",\"Netherlands\",\"Norway\",\"New Zealand\",\"Panama\",\"Peru\",\"Philippines\",\"Poland\",\"Portugal\",\"Paraguay\",\"Romania\",\"Sweden\",\"Singapore\",\"Slovakia\",\"El Salvador\",\"Thailand\",\"Turkey\",\"Taiwan\",\"Uruguay\",\"Viet Nam\",\"South Africa\"]\n\n#%%\ndef urlToStr(url):\n #Some websites will refuse a connection without this metadata:\n hdr = {'User-Agent': 'Mozilla/5.0'}\n #Connect to the site:\n req = urllib.request.Request(url,headers=hdr) \n fp = urllib.request.urlopen(req)\n #Convert the stream of data into \n mybytes = fp.read()\n urlStr = mybytes.decode(\"utf8\")\n fp.close() \n return urlStr\n#%%\ndef spotifyRequest(country,date):\n page = urlToStr(\"https://spotifycharts.com/regional/\"+country+\"/daily/\"+date)\n return page\n#%%\ndef parseWebChart(page):\n soup = bSoup(page, 'html.parser') \n chartRows = soup.table.find_all(\"tr\")\n \n ranks = []\n titles = []\n artists = []\n streams = []\n links = []\n \n for row in chartRows[1:]:\n cells = row.find_all('td')\n ranks.append(int(cells[1].text))\n titles.append(cells[3].strong.text)\n artists.append(cells[3].span.text)\n streams.append(int(cells[4].text.replace(',', '')))\n links.append(cells[0].a[\"href\"])\n \n return list(zip(ranks,titles,artists,streams,links))\n#%%\ndef spotifyCSVRequest(country,date):\n page = urlToStr(\"https://spotifycharts.com/regional/\"+country+\"/daily/\"+date+\"/download\")\n return page\n#%%\ndef downloadSpotifyCSV(country,date):\n print(country,date)\n try:\n data = spotifyCSVRequest(country,date) \n data = '\\n'.join(data.split('\\n')[1:]) #strip out first row\n folder = countryFullnames[countries.index(country)]\n text_file = codecs.open(\"data\\\\\"+folder+\"\\\\\"+country+\"-\"+date+\".csv\", \"w\",\"utf-8\")\n text_file.write(data)\n text_file.close() \n return \"good\"\n except Exception as e:\n #print(\"ERROR IN:\",country,date, \"---\",e)\n return str(e)\n#%%\ndef mappable(inputs):\n \"\"\"Pool.map() only accepts one iterable argument.\n This function simply takes a list of two arguments\n and plugs them intot the function we actually want.\"\"\"\n result = downloadSpotifyCSV(inputs[0],inputs[1])\n if result == \"good\":\n return None\n else:\n return(\"ERROR IN: \"+inputs[0]+\" \"+inputs[1]+\" \"+result)\n#%% singlethread parsing\n#cioontinue at 348\n'''\ntic = time.clock()\nfor country in countries[0:3]:\n log = codecs.open(\"data\\\\\"+countryFullnames[countries.index(country)]+\"\\\\\"+country+\"-log.txt\", \"w\",\"utf-8\")\n for date in dates[-32:]:\n result = downloadSpotifyCSV(country,str(date)[:10])\n if result != \"good\":\n print(\"ERROR IN: \"+country+\" \"+str(date)[:10]+\" \"+result, end='\\r\\n', file=log)\n log.close()\n \ntoc = time.clock()\nprint(toc-tic)\n'''\n\nindex = 1\n\n\n#multithreaded parsing \nif __name__ == '__main__':\n for country in countries[index:index+1]:\n tic = time.clock()\n \n inputs = [(country,str(x)[:10]) for x in dates[0:100]]\n with Pool(6) as p:\n output = p.map(mappable, inputs)\n \n #record all of the problems. \n log = codecs.open(\"data\\\\\"+countryFullnames[countries.index(country)]+\"\\\\\"+country+\"-log.txt\", \"w\",\"utf-8\")\n for item in output:\n if item != None:\n print(item)\n print(item, end='\\r\\n', file=log)\n log.close()\n \n toc = time.clock()\n print(country, toc-tic)\n\n\n'''\nCurrentTarget = \"Canada\"\nindex = countryFullnames.index(CurrentTarget)\n\nindex = 7\n\n\n#multithreaded parsing \nif __name__ == '__main__':\n for country in countries:\n tic = time.clock()\n \n inputs = [(country,str(x)[:10]) for x in dates[-32:]]\n with Pool(6) as p:\n output = p.map(mappable, inputs)\n \n #record all of the problems. \n log = codecs.open(\"data\\\\\"+countryFullnames[countries.index(country)]+\"\\\\\"+country+\"-logmarch.txt\", \"w\",\"utf-8\")\n for item in output:\n if item != None:\n print(item)\n print(item, end='\\r\\n', file=log)\n log.close()\n \n toc = time.clock()\n print(country, toc-tic)\n\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.46236559748649597,
"alphanum_fraction": 0.5913978219032288,
"avg_line_length": 14.333333015441895,
"blob_id": "57e9b9a674c50218fc3d3233b44f929a44329c2d",
"content_id": "c9ea095e8824bb449fdb0e57469d871828f59dcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/Mandelbrot.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 00:24:02 2019\n\n@author: RobertWinslow\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.6760918498039246,
"alphanum_fraction": 0.6815509796142578,
"avg_line_length": 38.63333511352539,
"blob_id": "937c0a804e3cd38cb2feca1abbb8834fb84f85c1",
"content_id": "b403afb5e3800a4b5721a567a7c264926b765bed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7146,
"license_type": "no_license",
"max_line_length": 323,
"num_lines": 180,
"path": "/mazeGen/mazeGen.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 6 13:51:32 2019\nThis generates a maze. Lots of algorithms generate a maze.\nThe rub is that I want the maze to generate line by line and still be valid, in the sense that either the maze is connected or that the maze would be connected if the next line were to consist solely of passages.\n\n0 = open path, 1 = wall \n\"\"\"\nimport random\nfrom PIL import Image\nimport numpy as np\n\n#%%Drawing the maze:\n\nwallCharacter = \"█\"\npassageCharacter = \" \"\ncharacterMapping = {0:passageCharacter, 1:wallCharacter}\n\n \ndef printMazeLine(line):\n print(''.join([characterMapping[cell] for cell in line]))\n\ndef printMaze(maze):\n for line in maze:\n printMazeLine(line)\n\n#%% generating chunks of the maze\n\ndef genLine(length):\n '''This is the function for generating each new line of the maze. Right now, it just puts walls on the outside and flips a coin to determine whether an interior cell is a wall or an open path. Pretty basic. Makes it look like a cave or something.'''\n line = [1]*length\n for i in range(1,length-1): #shrunken bounds to account for the walls\n line[i] = random.randint(0,1)\n \n return line\n\ndef genFirstLine(length):\n '''This generates the first line of the maze. I think it looks nicer if there is only one entrance.'''\n line = [1]*length\n entrancePosition = random.randint(1,length-2)\n line[entrancePosition] = 0\n return line\n\n#%% New layer validity, and assorted graph theory stuff\n \n'''To check for a new layer's validity, we need to ensure that for each cell in the old maze, there is a path to a cell in the new row.\n The computationally easiest way to do this is to keep track of which frontier cells in the old maze are connected to each other within the old maze. This will be a partition of the columns. Then for each set within the partition, we need to check to see if there is a cell within that set that connects to the new edge.\n Additionally, we need to update the partition to account for the new edge.'''\n \n \n \ndef lineToPartition(line):\n '''This function simply takes a line and figures out which cells are connected *within* that line.\n First, the line is iterated through, every time we hit a wall, we add the column to the wall set, and then we create a new subset to add passage cells to. Every time we hit a passage, we add the column to the current passage subset.\n We do this using an array so we can look at the last element. After doing all this, we can convert the array to a set for... reasons. I like sets, okay. Each set represents a group of columns for which there exist a path between the two.'''\n wallList = []\n passageLists = [[]]\n for i,cell in enumerate(line):\n if cell == 0: #passage\n passageLists[-1].append(i)\n else: #wall\n wallList.append(i)\n passageLists.append([])\n \n #convert to sets of connected columns:\n partition = set()\n for subset in passageLists:\n if subset != []:\n partition.add(frozenset(subset))\n \n #convert to a mapping from columns onto sets\n partitionMapping = {}\n for subset in partition:\n for col in subset:\n partitionMapping[col] = subset\n\n return partition, partitionMapping\n\n\n\ndef bipartiteGraph(oldPartition,oldMapping, newPartition, newMapping,length):\n \"Figure out the connections between the old and new partitions of passageways. A connection exists between two subsets if there is any connection between an individual cell in one subset and an individual cell in the other.\"\n #initialize the graph\n oldToNewEdges = {subset:set() for subset in oldPartition}\n newToOldEdges = {subset:set() for subset in newPartition}\n #form graph connections\n for col in range(length):\n if (col in newMapping) and (col in oldMapping):\n oldSubset = oldMapping[col]\n newSubset = newMapping[col]\n oldToNewEdges[oldSubset].add(newSubset)\n newToOldEdges[newSubset].add(oldSubset)\n \n return oldToNewEdges, newToOldEdges\n\n\ndef validContinuation(oldPartition, oldToNewEdges):\n \"returns true iff all the old passage subsets are connected to the new frontier\"\n for subset in oldPartition:\n if oldToNewEdges[subset] == set():\n return False\n return True\n\n\ndef updatePartition(newPartition, oldToNewEdges, newToOldEdges):\n '''When we add a new layer to the maze, we need to update the information about which frontier cells are connected to each other. To do this, we take the new paritition, and join together any subsets which are connected.\n This is basically just path traversal through a very very simple graph.''' \n continuationPartition = set()\n for subset in newPartition:\n #Bounce back and forth in the bipartite graph until we stop picking up new pieces\n connections = {subset}\n while True:\n connectionsCopy = set(connections) \n for connectedSubset in connections:\n for oldSubset in newToOldEdges[connectedSubset]:\n for newSubset in oldToNewEdges[oldSubset]:\n connectionsCopy.add(newSubset) #union operation\n if connectionsCopy == connections:\n break\n else: \n connections = connectionsCopy\n #now take the union of those pieces and throw them in the stew.\n joinedSubset = frozenset()\n for subset in connections: joinedSubset = joinedSubset|subset\n continuationPartition.add(joinedSubset)\n \n continuationMapping = {}\n for subset in continuationPartition:\n for col in subset:\n continuationMapping[col] = subset\n \n return continuationPartition, continuationMapping\n \n#%%\n \ndef genValidLine(length,maze,frontierPartition,frontierMapping):\n newLine = []\n while True:\n newLine = genLine(length)\n newPartition, newMapping = lineToPartition(newLine)\n oldToNewEdges, newToOldEdges = bipartiteGraph(frontierPartition,frontierMapping, newPartition, newMapping,length)\n if validContinuation(frontierPartition, oldToNewEdges):\n break\n newFrontierPartition, newFrontierMapping = updatePartition(newPartition, oldToNewEdges, newToOldEdges)\n return newLine, newFrontierPartition, newFrontierMapping\n \n \n\n#%%\n\nglobalLength = 50\nmazeLayers = 100\n \nmaze = [genFirstLine(globalLength)]\nfrontierPartition, frontierMapping = lineToPartition(maze[0])\n\nfor _ in range(mazeLayers):\n newLine, frontierPartition, frontierMapping = genValidLine(globalLength,maze,frontierPartition, frontierMapping)\n maze.append(newLine)\n \nprintMaze(maze)\n \n\n\n\n#%% Image creation\n\ndef saveMazeImage(maze):\n w,h = len(maze[0]), len(maze)\n img = Image.new('L',size=(w,h))\n pix = img.load()\n for x in range(w):\n for y in range(h):\n if maze[y][x]:\n pix[x,y] = 0\n else: \n pix[x,y] = 255\n img.save('utterNonsense.png')\n img.show()\n \n \n"
},
{
"alpha_fraction": 0.48945146799087524,
"alphanum_fraction": 0.5611814260482788,
"avg_line_length": 14.866666793823242,
"blob_id": "1f4d997c48d215e6574c6593dd08d54a2972107e",
"content_id": "96e502c834d89cde73dc9748b75e4ab08a9d9293",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/SpotifyScraper/multithreadtest.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 13 20:53:37 2019\n\n@author: RobertWinslow\n\"\"\"\n\nfrom multiprocessing import Pool\n\ndef f(x):\n return x*x\n\nif __name__ == '__main__':\n with Pool(5) as p:\n print(p.map(f, [1, 2, 3]))"
},
{
"alpha_fraction": 0.4017331004142761,
"alphanum_fraction": 0.4259965419769287,
"avg_line_length": 34.844154357910156,
"blob_id": "3231497c3ef6a5958fd5ccb4a6902bebdc487630",
"content_id": "40cf706b53c19a655e9c2274a35f354543dcc4fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3136,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 77,
"path": "/castleGenerator/roomobjecformatter.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 4 14:33:16 2019\n\n@author: robertwinslow\n\"\"\"\n\nranks = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\nsuits = '♠♥♦♣'\n\nroom_color = {'♠':\"spadeColor\",'♥':\"heartColor\",'♦':\"diamondColor\",'♣':\"clubColor\",}\nhallway_color = {'♠':\"spadeHallColor\",'♥':\"heartHallColor\",'♦':\"diamondHallColor\",'♣':\"clubHallColor\",}\n\nroom_label = {\"2♠\": [\"Hallway\", \"\", \"\"] ,\n\"2♥\": [\"Hallway\", \"\", \"\"] ,\n\"2♦\": [\"Hallway\", \"\", \"\"] ,\n\"2♣\": [\"Hallway\", \"\", \"\"] ,\n\"3♠\": [\"Hallway\", \"\", \"\"] ,\n\"3♥\": [\"Hallway\", \"\", \"\"] ,\n\"3♦\": [\"Hallway\", \"\", \"\"] ,\n\"3♣\": [\"Hallway\", \"\", \"\"] ,\n\"4♠\": [\"Hallway\", \"\", \"\"] ,\n\"4♥\": [\"Hallway\", \"\", \"\"] ,\n\"4♦\": [\"Hallway\", \"\", \"\"] ,\n\"4♣\": [\"Hallway\", \"\", \"\"] ,\n\"5♠\": [\"Hallway\", \"\", \"\"] ,\n\"5♥\": [\"Hallway\", \"\", \"\"] ,\n\"5♦\": [\"Hallway\", \"\", \"\"] ,\n\"5♣\": [\"Hallway\", \"\", \"\"] ,\n\"6♠\": [\"Hallway\", \"Must be in this house\", \"\"] ,\n\"6♥\": [\"Hallway\", \"Pitch Black\", \"🕯️\"] ,\n\"6♦\": [\"Hallway\", \"Hidden Door\", \"\"] ,\n\"6♣\": [\"Hallway\", \"Overgrown\", \"🌿\"] ,\n\"7♠\": [\"Hallway\", \"Shifting Exit\", \"\"] ,\n\"7♥\": [\"Hallway\", \"Precarious walkways\", \"\"] ,\n\"7♦\": [\"Hallway\", \"Riddle\", \"?\"] ,\n\"7♣\": [\"Hallway\", \"Guard Beast\", \"🐉\"] ,\n\"8♠\": [\"Lounge\", \"housenameS\", \"🛋️\"] ,\n\"8♥\": [\"Lounge\", \"housenameH\", \"🛋️\"] ,\n\"8♦\": [\"Lounge\", \"housenameD\", \"🛋️\"] ,\n\"8♣\": [\"Lounge\", \"housenameC\", \"🛋️\"] ,\n\"9♠\": [\"Study Room\", \"housenameS\", \"🖋️\"] ,\n\"9♥\": [\"Study Room\", \"housenameH\", \"🖋️\"] ,\n\"9♦\": [\"Study Room\", \"housenameD\", \"🖋️\"] ,\n\"9♣\": [\"Study Room\", \"housenameC\", \"🖋️\"] ,\n\"10♠\": [\"Classroom\", \"Rhetoric\", \"🗣️\"] ,\n\"10♥\": [\"Classroom\", \"Gymnasium\", \"🏋️\"] ,\n\"10♦\": [\"Classroom\", \"Mathematics\", \"🧮\"] ,\n\"10♣\": [\"Classroom\", \"Languages\", \"🗺️\"] ,\n\"J♠\": [\"Secret Society\", \"(Secret)\", \"\"] ,\n\"J♥\": [\"Vault\", \"(Secret)\", \"⚙️\"] ,\n\"J♦\": [\"Lost Library\", \"(Secret)\", \"📜\"] ,\n\"J♣\": [\"Exotic Garden\", \"(Secret)\", \"🌵\"] ,\n\"Q♠\": [\"Classroom\", \"Alchemy\", \"⚗️\"] ,\n\"Q♥\": [\"Dueling Arena\", \"\", \"⚔️\"] ,\n\"Q♦\": [\"Library\", \"\", \"📖\"] ,\n\"Q♣\": [\"Kitchens\", \"\", \"🍳\"] ,\n\"K♠\": [\"Office\", \"Enchantment\", \"🧠\"] ,\n\"K♥\": [\"Office\", \"Evocation\", \"⚡\"] ,\n\"K♦\": [\"Classroom\", \"Divination\", \"🔮\"] ,\n\"K♣\": [\"Office\", \"Necromancy\", \"️\"] ,\n\"A♠\": [\"Dorms\", \"housenameS\", \"🛏️\"] ,\n\"A♥\": [\"Dorms\", \"housenameH\", \"🛏️\"] ,\n\"A♦\": [\"Dorms\", \"housenameD\", \"🛏️\"] ,\n\"A♣\": [\"Dorms\", \"housenameC\", \"🛏️\"] ,}\n\n \nfor rank in ranks:\n for suit in suits:\n if rank in [str(num) for num in range(2,8)]:\n hallway=\"1\"\n bgcolor = hallway_color[suit]\n else:\n hallway =\"0\"\n bgcolor = room_color[suit]\n print('{rank:\"'+rank+'\", suit:\"'+suit+'\", hallway:'+hallway+' , bgcolor:'+bgcolor+', title:\"'+room_label[rank+suit][0]+'\", subtitle:\"'+room_label[rank+suit][1]+'\", symbol:\"'+room_label[rank+suit][2]+'\"},')\n \n \n \n \n \n \n \n \n \n \n \n \n \n "
},
{
"alpha_fraction": 0.6504883170127869,
"alphanum_fraction": 0.6564072370529175,
"avg_line_length": 30.542055130004883,
"blob_id": "00e6aeb3aa07eb419def75949c89deb19382f5a9",
"content_id": "96af37be146c1c721002b98ff914bee79703f2cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3380,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 107,
"path": "/SubstanceCategorizer/categorizer.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 2 21:55:47 2019\n\n@author: robertwinslow\n\nI want to have a big list of substances tagged with properties for a little doodle.\n\nThe substances are stored in an outside textfile with the following format.\n[tags] Name\nTags are:\n H=hot, h=cold\n D=dry, d=wet\n A=active, a=passive\n B=bright, b=dark\n L=lightweight, l=dense\n U=unyielding, u=yielding (hard vs soft)\n\nExamples:\nHDABLu Fire\nhdaLU Ice\n\nThis code is meant for messing with the sets using the interpreter.\n\"\"\"\nimport itertools\n#%% set up data structures and definitions\npdic = {} #properties dictionary. maps flags/properties to sets of substances\nflags = \"HhDdAaBbLlUu\" #same order as names below\nfullPropNames = ['hot', 'cold', 'dry', 'wet', 'active', 'passive',\n 'bright', 'dark', 'lightweight', 'dense', 'unyielding', 'yielding']\n\nsdic = {} #substances dictionary. Reverse of pdic. maps substance to set of flags\n\nfor i in range(len(flags)):\n #make two entries in the pdic for each property, pointing to the same set\n pdic[flags[i]] = set()\n pdic[fullPropNames[i]] = pdic[flags[i]] \n #now also map each variable name to the string, for faster typing in terminal\n #this is not best coding practices, but eh. \n exec(flags[i] + \"='\" + flags[i] + \"'\" )\n exec(fullPropNames[i] + \"='\" + flags[i] + \"'\" )\n \n#manual synonyms:\nhard = unyielding\nsoft = yielding\nsubtle = lightweight\n\nØ = frozenset() #just for fun.\n\n#%% Load the current list into memory\nsourcefile = open(\"substances.txt\")\ntxt = sourcefile.read().split('\\n')\nfor item in txt:\n properties, name = item.split(maxsplit=1)\n sdic[name] = set()\n #put the name into the substance dictionary and vice versa\n for prop in properties:\n pdic[prop].add(name)\n sdic[name].add(prop)\n \nsourcefile.close()\n\n#%% Functions for manipulating and adding to the sets\n\ndef add_thing(name, *properties):\n \"Example usage: add_thing('Potato', H, soft)\"\n if name not in sdic: sdic[name] = set()\n for prop in properties:\n pdic[prop].add(name)\n sdic[name].add(prop)\n \ndef remove_thing(name):\n \"purges this substance from the data\"\n if name in sdic:\n properties = sdic[name]\n del sdic[name]\n for prop in properties:\n pdic[prop].remove(name)\n\ndef find_match(*properties, quiet=False):\n \"prints a list of the items in the intersection of the sets\"\n sets = [pdic[prop] for prop in properties]\n intersection = set.intersection(*sets)\n #if not quiet: print(intersection)\n return intersection\n \ndef find_empty_triplets():\n \n \"iterates through all valid triplets and says which ones are missing substances\"\n pairs = [fullPropNames[2*i:2*i+2] for i in range(6)]\n for combo in itertools.combinations(pairs,3):\n for triplet in itertools.product(*combo):\n if len(find_match(*triplet, quiet=True))<3:\n print(triplet, find_match(*triplet))\n \nfind_empty_triplets()\n \n#%% function to save file. If you don't call this, the dictionaries won't be saved. \ndef save_substances(filename=\"substances.txt\"):\n file = open(filename,'w')\n lines = []\n for name in sdic:\n properties = \"\".join(sdic[name])\n lines.append(properties + \" \" + name)\n file.write(\"\\n\".join(lines))\n file.close()\n "
},
{
"alpha_fraction": 0.3226516544818878,
"alphanum_fraction": 0.571183979511261,
"avg_line_length": 33.36134338378906,
"blob_id": "47853e74289734c70f7d4773a46e917352a397d2",
"content_id": "cdfc0ff929eb83250ba7bf33eaa795ab85245c6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4138,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 119,
"path": "/statedata/stateplotter.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 7 15:27:09 2019\n\n@author: RobertWinslow\n\"\"\"\n\ndata = \"\"\"Alabama, 95.7, 9.5, 0.028, −11.9, 0.004, 1.3\nAlaska, 99.0, 6.1, 0.047, −0.8, 0.006, 3.3\nArizona, 97.4, 11.0, 0.032, −1.1, 0.005, 4.3\nArkansas, 97.5, 6.8, 0.027, −14.3, 0.005, 3.0\nCalifornia, 95.5, 10.6, 0.040, 4.9, 0.006, 2.7\nColorado, 101.6, 6.2, 0.041, 12.5, 0.004, 4.0\nConnecticut, 103.1, 7.4, 0.049, 15.6, 0.003, 3.0\nDelaware, 100.4, 11.8, 0.059, −2.3, 0.006, 6.7\nFlorida, 98.4, 12.2, 0.032, −9.9, 0.007, 4.7\nGeorgia, 98.0, 8.0, 0.036, −9.5, 0.005, 5.0\nHawaii, 95.6, 2.5, 0.036, 15.1, 0.003, 2.7\nIdaho, 101.4, 2.0, 0.029, 8.6, 0.002, 4.3\nIllinois, 99.9, 8.1, 0.039, 0.3, 0.006, 5.0\nIndiana, 101.7, 2.2, 0.034, 0.5, 0.003, 4.3\nIowa, 103.2, 2.4, 0.034, 14.5, 0.003, 6.3\nKansas, 102.8, 4.4, 0.033, 7.2, 0.004, 5.7\nKentucky, 99.4, 1.7, 0.030, −7.8, 0.003, 6.7\nLouisiana, 95.3, 13.7, 0.031, −20.5, 0.006, 5.3\nMaine, 103.4, 0.0, 0.030, 14.3, 0.001, 4.3\nMaryland, 99.7, 8.7, 0.037, −0.9, 0.007, 6.3\nMassachusetts, 104.3, 5.8, 0.046, 16.9, 0.005, 4.3\nMichigan, 100.5, 4.7, 0.035, 0.8, 0.005, 7.3\nMinnesota, 103.7, 5.2, 0.040, 23.3, 0.003, 6.3\nMississippi, 94.2, 13.4, 0.024, −20.7, 0.003, 4.0\nMissouri, 101.0, 4.5, 0.033, −3.3, 0.005, 7.0\nMontana, 103.4, 3.3, 0.026, 4.0, 0.003, 4.3\nNebraska, 102.3, 4.3, 0.036, 11.2, 0.003, 5.7\nNevada, 96.5, 8.5, 0.039, −5.5, 0.006, 4.0\nNew Hampshire, 104.2, −0.4, 0.037, 22.2, 0.002, 3.3\nNew Jersey, 102.8, 5.7, 0.044, 9.0, 0.004, 5.0\nNew Mexico, 95.7, 9.5, 0.029, −7.5, 0.007, 3.3\nNew York, 100.7, 7.1, 0.043, −0.4, 0.005, 3.7\nNorth Carolina, 100.2, 9.2, 0.036, −5.6, 0.005, 5.3\nNorth Dakota, 103.8, 2.4, 0.032, 14.5, 0.001, 5.0\nOhio, 101.8, 3.3, 0.034, 1.8, 0.003, 6.0\nOklahoma, 99.3, 9.2, 0.028, −11.2, 0.005, 3.3\nOregon, 101.2, 3.1, 0.033, 7.9, 0.003, 4.3\nPennsylvania, 101.5, 5.8, 0.035, 3.2, 0.004, 6.3\nRhode Island, 99.5, 7.6, 0.035, 11.6, 0.003, 3.0\nSouth Carolina, 98.4, 11.3, 0.030, −15.3, 0.008, 6.3\nSouth Dakota, 102.8, 1.5, 0.034, 8.9, 0.002, 4.7\nTennessee, 97.7, 6.8, 0.033, −13.8, 0.007, 4.7\nTexas, 100.0, 10.4, 0.036, −4.6, 0.006, 6.0\nUtah, 101.1, −0.5, 0.032, 18.2, 0.002, 8.0\nVermont, 103.8, −0.2, 0.032, 19.8, 0.001, 5.3\nVirginia, 101.9, 6.5, 0.040, 6.8, 0.003, 7.7\nWashington, 101.9, 4.5, 0.039, 11.5, 0.003, 7.7\nWest Virginia, 98.7, −0.1, 0.025, −10.1, 0.003, 3.7\nWisconsin, 102.9, 6.6, 0.035, 12.7, 0.002, 5.3\nWyoming, 102.4, 1.0, 0.041, 4.3, 0.003, 3.0\"\"\"\n\nstates = data.split('\\n')\nfor i,state in enumerate(states):\n states[i] = state.split(',')\n \nranked = sorted(states, key = lambda state: float(state[1]))\n\n\n\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport pandas as pd\n\ndf = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv')\n\nfor col in df.columns:\n df[col] = df[col].astype(str)\n\nscl = [\n [0.0, 'rgb(242,240,247)'],\n [0.2, 'rgb(218,218,235)'],\n [0.4, 'rgb(188,189,220)'],\n [0.6, 'rgb(158,154,200)'],\n [0.8, 'rgb(117,107,177)'],\n [1.0, 'rgb(84,39,143)']\n]\n\ndf['text'] = df['state'] + '<br>' + \\\n 'Beef ' + df['beef'] + ' Dairy ' + df['dairy'] + '<br>' + \\\n 'Fruits ' + df['total fruits'] + ' Veggies ' + df['total veggies'] + '<br>' + \\\n 'Wheat ' + df['wheat'] + ' Corn ' + df['corn']\n\ndata = [go.Choropleth(\n colorscale = scl,\n autocolorscale = False,\n locations = df['code'],\n z = df['total exports'].astype(float),\n locationmode = 'USA-states',\n text = df['text'],\n marker = go.choropleth.Marker(\n line = go.choropleth.marker.Line(\n color = 'rgb(255,255,255)',\n width = 2\n )),\n colorbar = go.choropleth.ColorBar(\n title = \"Millions USD\")\n)]\n\nlayout = go.Layout(\n title = go.layout.Title(\n text = '2011 US Agriculture Exports by State<br>(Hover for breakdown)'\n ),\n geo = go.layout.Geo(\n scope = 'usa',\n projection = go.layout.geo.Projection(type = 'albers usa'),\n showlakes = True,\n lakecolor = 'rgb(255, 255, 255)'),\n)\n\nfig = go.Figure(data = data, layout = layout)\npy.iplot(fig, filename = 'd3-cloropleth-map')"
},
{
"alpha_fraction": 0.6002087593078613,
"alphanum_fraction": 0.6299582719802856,
"avg_line_length": 21.85542106628418,
"blob_id": "55b36ea72eda49cf289c125463a9c084659d66de",
"content_id": "3aa6646fa4eb2034b16a70a4e67f39795b4b7db5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1928,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 83,
"path": "/MagicCardRobotSchoolShipGenerator/pigfarts generator.py",
"repo_name": "RMWinslow/LittleScripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 29 \n@author: RobertWinslow\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.colors as colors\n\nimport matplotlib.patches as patches\n\n#%% dictionaries defining house properties\n\n#suit to hex color code\nhouse_color = {\"♥\":\"#ff9999\",}\n\n#%%\n\ndef housesquare(x,y,house='♥'):\n return plt.Rectangle((x,y),1,1,facecolor=house_color[house],edgecolor='k',lw=2)\n\n#%%\n\ncards = [\"♥A\",\"♦2\",\"♣7\",\"♠K\"]\n\n\n#%% Draw the castle\n\npatches = []\n\nfig, ax = plt.subplots()\n\nplt.axis('equal')\nplt.plot([1,2,3],[3,5,2])\n\npatches.append(plt.Circle((0,0), radius= 1))\npatches.append(housesquare(2,2))\n\nfor eachpatch in patches:\n ax.add_patch(eachpatch)\n\nfig.set_size_inches(11,8)\n\nplt.show()\nfig.savefig(\"test.svg\")\n\n#%% General figure drawer with no boundaries\ndef make_image(data, outputname, size=(2, 2), dpi=80, viewbox=((-1,-1),(1,1)) ):\n \n plt.gca().set_aspect('equal', adjustable='box')\n plt.axis('scaled')\n \n \n fig = plt.figure()\n #fig.set_size_inches(size)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n #ax.set(xlim=(viewbox[0][0], viewbox[1][0]), ylim=(viewbox[0][1], viewbox[1][1])) #uncomment to control viewbox\n #ax.set_axis_off()\n fig.add_axes(ax)\n #---------------\n \n \n # Create a Rectangle patch\n rect = patches.Rectangle((1,1),1,2,linewidth=1,edgecolor='r',facecolor='none')\n \n # Add the patch to the Axes\n ax.add_patch(rect)\n \n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n\n # place a text box in upper left in axes coords\n ax.text(1, 1, \"FISH\", transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n\n \n #---------------\n plt.savefig(outputname, dpi=dpi)\n#%%\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
}
] | 11 |
forkiller/test_python
|
https://github.com/forkiller/test_python
|
0ab5f1cd19a8342523a562d039af477d99bbe3d3
|
656178a8db0608139f62d0eec37638e2ae821889
|
2050ecd585b7f53490b093edefb24ba4c7fdeb26
|
refs/heads/master
| 2023-01-10T20:55:42.440304 | 2020-11-07T16:49:20 | 2020-11-07T16:49:20 | 310,883,026 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5795454382896423,
"alphanum_fraction": 0.6325757503509521,
"avg_line_length": 22.909090042114258,
"blob_id": "5548e7936f717fae1b971288cf3d7e4aa1229c87",
"content_id": "adde5ee134337c2d09c657ad3fc6890e051ab964",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 11,
"path": "/hello_git.py",
"repo_name": "forkiller/test_python",
"src_encoding": "UTF-8",
"text": "# D:\\Users\\Administrator\\Anaconda3\\python.exe\n# -*- coding: UTF-8 -*-\n# @Author : Steve\n# @File : hello_git.py\n# @Software: PyCharm\n# @Time : 2020-11-08 上午 12:16\n\nprint(\"Hello Git\")\nprint(\"this is a branch!\")\nprint(\"third commit\")\nprint(\"last commit!===\")\n\n"
}
] | 1 |
robbyt/drf-extensions
|
https://github.com/robbyt/drf-extensions
|
9f52996e4e379dc8aeeab888771a1134c2552738
|
6f07fe182e5a53c59761bb9d1106a17c70718abb
|
7bcc6e85ff09951a2db018b89d5f1df93d650cd7
|
refs/heads/master
| 2021-01-15T09:51:42.304848 | 2014-10-31T19:10:36 | 2014-10-31T19:10:36 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6535432934761047,
"alphanum_fraction": 0.6574802994728088,
"avg_line_length": 24.399999618530273,
"blob_id": "0a631a5b1c32258c7e5cd77f210158bcd2047aca",
"content_id": "63080b94d2904f6f2d417eb7415419499a7d66ba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 254,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 10,
"path": "/tests_app/tests/functional/cache/decorators/urls.py",
"repo_name": "robbyt/drf-extensions",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom .views import HelloView, HelloParamView\n\n\nurlpatterns = [\n url(r'^hello/$', HelloView.as_view(), name='hello'),\n url(r'^hello-param/$', HelloParamView.as_view(), name='hello-param'),\n]\n"
}
] | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.