repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
AnaPana-zz/functools
https://github.com/AnaPana-zz/functools
a74674fa9733e8fe43e0735dca8d432531897544
97ca9ce31badaa088647e3e8020ff2bc188f3c84
295ecd6978206ebfd0235f5d39176a829b7db601
refs/heads/master
2021-05-30T14:36:20.664661
2016-03-02T17:11:56
2016-03-02T17:11:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47297295928001404, "alphanum_fraction": 0.55694979429245, "avg_line_length": 23.0930233001709, "blob_id": "0a9378deb98918297e5aeeed47f8f4ab7acfce28", "content_id": "ecdbccac631f7cac0e4811bbfc92905e7821edc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1036, "license_type": "no_license", "max_line_length": 80, "num_lines": 43, "path": "/reduce.py", "repo_name": "AnaPana-zz/functools", "src_encoding": "UTF-8", "text": "from functools import reduce\n\n\ndef calculate_sum(lst):\n \"\"\"\n Simple 'reduce' example.\n \"\"\"\n # lst=[1,2,3,4,5]\n # ((((1+2)+3)+4)+5)\n sum = reduce(lambda x,y: x+y, lst)\n return sum\n\n\ndef flatten_list(*lists):\n \"\"\"\n Turn ([1, 2, 3], [4, 5], [6, 7, 8]) into [1, 2, 3, 4, 5, 6, 7, 8]\n *lists -> read all positional arguments and save them into the tuple\n \"\"\"\n # Last argument is initializer\n # If the optional initializer is present, it is placed before the items of\n # the sequence in the calculation, and serves as a default when the sequence\n # is empty.\n lst = reduce(list.__add__, lists, [])\n return lst\n\n\ndef list_of_digitst_to_number(lst):\n \"\"\"\n Turn [1, 2, 3, 4, 5, 6, 7, 8] into 12345678\n\n 1*10+2=12\n 12*10+3=123\n 123*10+4=1234\n ...\n \"\"\"\n n = reduce(lambda a,d: 10*a+d, lst, 0)\n return n\n\n\nif __name__ == '__main__':\n print(calculate_sum([1,2,3,4,5]))\n print(flatten_list([1,2,3], [4,5,6], [7,8,9]))\n print(list_of_digitst_to_number([1,2,3,4]))\n" }, { "alpha_fraction": 0.6604342460632324, "alphanum_fraction": 0.6652593612670898, "avg_line_length": 17.840909957885742, "blob_id": "6eab69fa58834dda84ce5925f1652a5b26d4265c", "content_id": "325bd1354cb41e062cb4a775515027f71787e8e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1658, "license_type": "no_license", "max_line_length": 73, "num_lines": 88, "path": "/singledispatch.py", "repo_name": "AnaPana-zz/functools", "src_encoding": "UTF-8", "text": "\"\"\"\nSingle Dispatch\nA form of generic function dispatch where the implementation\nis chosen based on the type of a single argument.\n\nGeneric Function\nA function composed of multiple functions implementing the same operation\nfor different types.\nWhich implementation should be used during a call is determined\nby the dispatch algorithm.\n\"\"\"\n\nfrom functools import singledispatch\n\n\n@singledispatch\ndef fun(arg, verbose=False):\n if verbose:\n print(\"Let me just say,\", end=\" \")\n print(arg)\n\n\[email protected](int)\ndef _(arg, verbose=False):\n if verbose:\n print(\"Strength in numbers, eh?\", end=\" \")\n print(arg)\n\n\[email protected](list)\ndef _(arg, verbose=False):\n if verbose:\n print(\"Enumerate this:\")\n for i, elem in enumerate(arg):\n print(i, elem)\n\n\[email protected](None)\ndef nothing(arg, verbose=False):\n print(\"Nothing.\")\n\n\nfun(\"Hello, world.\")\n# Hello, world.\nfun(\"test.\", verbose=True)\n# Let me just say, test.\nfun(42, verbose=True)\n# Strength in numbers, eh? 42\nfun(['spam', 'spam', 'eggs', 'spam'], verbose=True)\n# Enumerate this:\n# 0 spam\n# 1 spam\n# 2 eggs\n# 3 spam\nfun(None)\n# Nothing.\n\n# ---------------------------\n\nimport json\n\n\n@singledispatch\ndef get_json_value(data):\n raise Exception(\"This type is not supported\")\n\n\n@get_json_value.register(str)\ndef _(data):\n data = json.loads(data)\n print(data['my-app'])\n\n\n@get_json_value.register(dict)\ndef _(data):\n print(data['my-app'])\n\n\nmy_json_data = '{\"my-app\": \"The best App\"}'\nmy_dict_data = {'my-app': 'The best App'}\n\n\nget_json_value(my_json_data)\n# The best App\nget_json_value(my_dict_data)\n# The best App\nget_json_value(None)\n# Exception raised\n" }, { "alpha_fraction": 0.4727540612220764, "alphanum_fraction": 0.5095729231834412, "avg_line_length": 20.21875, "blob_id": "034bb7550c0e2784a23677356fd155e0bf0430ad", "content_id": "f762373c5592a09f75ea9b39038396a114bed469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 73, "num_lines": 32, "path": "/cmp_to_key.py", "repo_name": "AnaPana-zz/functools", "src_encoding": "UTF-8", "text": "from functools import cmp_to_key\n\n\n# Applicable to sorted(), min(), max(), heapq.ArithmeticErrornsmallest(),\n# itertools.groupby() ...\n\n\ntuple_to_sort = (\n ('bla', 3),\n ('aaa', 1),\n ('foo', 2),\n ('boo', 2)\n)\n\n\nprint(sorted(tuple_to_sort, key=lambda x: x[0]))\n# [('aaa', 1), ('bla', 3), ('boo', 2), ('foo', 2)]\n\nprint(sorted(tuple_to_sort, key=lambda x: x[1]))\n# [('aaa', 1), ('foo', 2), ('boo', 2), ('bla', 3)]\n\n\ndef my_compare(x, y):\n \"\"\"\n I want my results to be [2, 2, ..., everything else]\n \"\"\"\n if x[1] == 2 or y[1] == 2:\n return -1\n\n\nprint(sorted(tuple_to_sort, key=cmp_to_key(my_compare)))\n# [('boo', 2), ('foo', 2), ('aaa', 1), ('bla', 3)]\n" } ]
3
RafalWrzesniak/InstallingScript
https://github.com/RafalWrzesniak/InstallingScript
17b58eab59e5ab0ea46ca8215baa79f70995c2d1
0ce46c233521756599f78899e0d7e5c545d51cbe
9093d5271299505bf522157246b969ad958b3005
refs/heads/master
2020-04-03T07:03:51.717582
2018-11-01T19:51:02
2018-11-01T19:51:02
155,092,448
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.564039409160614, "alphanum_fraction": 0.5718764066696167, "avg_line_length": 28.589040756225586, "blob_id": "b3e341d1c6b5ba853d079608a5a6ade80aefae24", "content_id": "233fdb3c5d4cbb4990a13f3b91822f5a3dbeb210", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4468, "license_type": "no_license", "max_line_length": 92, "num_lines": 146, "path": "/InstallingScript.py", "repo_name": "RafalWrzesniak/InstallingScript", "src_encoding": "UTF-8", "text": "import time\r\nimport os\r\n#import reachArgs\r\n\r\nred = '\\033[91m' + '\\033[1m'\r\nend = '\\033[0m'\r\nbold = '\\033[1;34m' + '\\033[1m'\r\nex_cod = 0\r\nconf_file_path = ''\r\nrepository_path = ''\r\nreport_path = ''\r\nhelp_usage = ''\r\n# to delete\r\n#conf_file_path = 'C:\\\\Users\\\\Student\\\\Desktop'\r\n#repository_path = 'C:\\\\Users\\\\Student\\\\Desktop\\\\Target'\r\n#report_path = 'C:\\\\Users\\\\Student\\\\Desktop'\r\n\r\n\r\n# Get current time and date func\r\ndef get_time():\r\n localtime = time.localtime(time.time())\r\n cur_time = ''\r\n for i in range(0, 6):\r\n cur_time += str(localtime[i])\r\n if i < 2:\r\n cur_time += '-'\r\n elif i == 2:\r\n cur_time += '_'\r\n elif i < 5:\r\n cur_time += ';'\r\n return cur_time\r\n\r\n\r\n# Get configuration file path\r\nconf_file_path = input('Type configuration file path:\\n')\r\nif conf_file_path == '':\r\n conf_file_path = 'C:\\\\Users\\\\Rafal\\\\Desktop'\r\ntry:\r\n os.chdir(conf_file_path)\r\nexcept Exception as e:\r\n print(e)\r\n print('Configuration file path set to default')\r\n conf_file_path = r'C:\\\\Users\\\\Rafal\\\\Desktop'\r\nprint('Configuration file path: ' + conf_file_path)\r\n\r\n# Get repository path\r\nrepository_path = input('Type repository path:\\n')\r\nif repository_path == '':\r\n repository_path = 'F:\\\\Rafał\\\\Instalki\\\\Code Blocks'\r\ntry:\r\n os.chdir(repository_path)\r\nexcept Exception as e:\r\n print(e)\r\n print('Repository path set to default')\r\n repository_path = 'F:\\\\Rafał\\\\Instalki\\\\Code Blocks'\r\nprint('Repository folder path: ' + repository_path)\r\n\r\n# Get report file path\r\nreport_path = input('Type report file path:\\n')\r\nif report_path == '':\r\n report_path = 'C:\\\\Users\\\\Rafal\\\\Desktop'\r\ntry:\r\n os.chdir(report_path)\r\nexcept Exception as e:\r\n print(e)\r\n print('Report file path set to default')\r\n report_path = 'C:\\\\Users\\\\Rafal\\\\Desktop'\r\nprint('Report file path: ' + report_path)\r\n\r\n# help\r\nhelp_usage = input('Help? [T/F]\\n')\r\nif help_usage == 'T':\r\n print('Here is help')\r\nelse:\r\n print('-')\r\n\r\n# Creating file list to be install\r\nos.chdir(conf_file_path) # reachArgs.conf_file_path\r\ntry:\r\n file_conf = open('ConfFile.txt', 'r')\r\n target_list = file_conf.readlines()\r\n for i in range(len(target_list)):\r\n if target_list[i].endswith('\\n'):\r\n target_list[i] = target_list[i][0:-1]\r\n file_conf.close()\r\nexcept Exception as e:\r\n print(e)\r\n target_list = []\r\n# print(target_list)\r\n\r\n# Check if files exist\r\nos.chdir(repository_path)\r\nf = open('restart.txt', 'w')\r\nf.close()\r\nfiles_list = os.listdir(repository_path)\r\nfor name in target_list:\r\n if name not in files_list and name != 'restart':\r\n print(red + 'File \"' + name + '\" not found. It will not be installed' + end)\r\n target_list.remove(name)\r\n\r\n\r\n# Check if os restart interrupted installation\r\ntry:\r\n restart_info = open('restart_info.txt', 'r')\r\n leng = restart_info.readlines()\r\n restart_info.close()\r\n target_list = target_list[len(leng):]\r\nexcept Exception as e:\r\n print('Running for the first time')\r\nprt_target_list = [name for name in target_list if name != 'restart']\r\nprint('Files to install: ' + str(prt_target_list))\r\n\r\n\r\n# Run files\r\nfor name in target_list:\r\n if name != 'restart':\r\n try:\r\n start_time = get_time()\r\n print('Installing file \"' + bold + name + end + '\"')\r\n #ex_cod = os.system(msiexec /i repository_path + '\\\\' + name)\r\n print('\"' + bold + name + end + '\" installed')\r\n restart_info = open('restart_info.txt', 'a')\r\n restart_info.write('1\\n')\r\n restart_info.close()\r\n stop_time = get_time()\r\n except Exception as e:\r\n print(e)\r\n print(red + 'Failed to install \"' + name + '\"' + end)\r\n ex_cod = 2\r\n sto_time = get_time()\r\n\r\n rep_file = open(report_path + '\\\\' + get_time() + '_Report.txt', 'a')\r\n if ex_cod == 0:\r\n rep_file.write(name + ', ' + start_time + ', ' + stop_time + ', ' + 'SUCCESS\\n')\r\n else:\r\n rep_file.write(name + ', ' + start_time + ', ' + stop_time + ', ' + 'FAILED\\n')\r\n rep_file.close()\r\n\r\n else:\r\n restart_info = open('restart_info.txt', 'a')\r\n restart_info.write('1\\n')\r\n restart_info.close()\r\n print(red + 'Restarting system..' + end)\r\n # os.system(\"shutdown /r\")\r\nos.system(\"del restart_info.txt\")\r\nprint(bold + '\\nInstallation completed\\n' + end)\r\n" }, { "alpha_fraction": 0.8157303333282471, "alphanum_fraction": 0.8157303333282471, "avg_line_length": 147.3333282470703, "blob_id": "f01c136cf7a643a652a212482aa5f739f8b5e198", "content_id": "f44ed1fc8a51b17496c3458145d7fe38b5f62b8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 445, "license_type": "no_license", "max_line_length": 424, "num_lines": 3, "path": "/README.md", "repo_name": "RafalWrzesniak/InstallingScript", "src_encoding": "UTF-8", "text": "# InstallingScript\n\nThis program can be used for automatical install of programs working on windows platform. As input it is necessary to give the path were all files are stored on hard drive and the .txt file which is an instruction for the script. In the file user sets the instalation order and can force computer restart after any instalation. Script will automaticly restart the PC and will remember which program it has already installed.\n" } ]
2
youngmoon-kang/kiwoomApi_practice
https://github.com/youngmoon-kang/kiwoomApi_practice
55a55557ac146073b1b7b94bb973f66c0f0e2318
d8f68edd969ac28b42fe8b1276402fefcc798edf
e17811971dab7474073f13204ffd2860ee45756a
refs/heads/master
2022-12-20T03:41:24.427754
2020-10-04T10:24:09
2020-10-04T10:24:09
301,099,343
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5112359523773193, "alphanum_fraction": 0.5112359523773193, "avg_line_length": 15.181818008422852, "blob_id": "9b8ca894bfb19d228d672fa5fc39d14dfce150d4", "content_id": "2651cd5021e78480a95051328802c81c0ed872c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 27, "num_lines": 11, "path": "/__init__.py", "repo_name": "youngmoon-kang/kiwoomApi_practice", "src_encoding": "UTF-8", "text": "from ui.ui import *\nfrom kiwoom.kiwoom import *\n\nclass Main():\n def __init__(self):\n print(\"실행할 메인 클래스\")\n\n Ui_class()\n\nif __name__ == \"__main__\":\n m = Main()\n" }, { "alpha_fraction": 0.5796881318092346, "alphanum_fraction": 0.586936354637146, "avg_line_length": 43.77735900878906, "blob_id": "401c0889c360b68836ba3ee596ce4bd35f632047", "content_id": "c182b61326a7dcf29e39cc310cea3274f7cbc82f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12777, "license_type": "no_license", "max_line_length": 137, "num_lines": 265, "path": "/kiwoom/kiwoom.py", "repo_name": "youngmoon-kang/kiwoomApi_practice", "src_encoding": "UTF-8", "text": "from PyQt5.QAxContainer import *\nfrom PyQt5.QtCore import *\nfrom config.errorCode import *\nfrom PyQt5.QtTest import *\n\nclass Kiwoom(QAxWidget):\n def __init__(self):\n super().__init__()\n\n print(\"kiwoom클래스 입니다.\")\n\n ########event loop 모음\n self.login_event_loop = None\n self.detail_account_info_event_loop = QEventLoop()\n self.calculator_event_loop = QEventLoop()\n #################\n\n ####스크린 번호\n self.screen_my_info = \"2000\"\n self.screen_calculation_stock = \"4000\"\n ##################\n\n ########변수 모음\n self.account_num = None\n self.account_stock_dict = dict()\n self.not_account_stock_dict = dict()\n ##############\n\n ########계좌 관련 변수\n self.use_money = 0\n self.use_money_percent = 0.5\n ####################\n\n self.get_ocx_instance()\n self.event_slots()\n\n self.signal_login_commConnect()\n self.get_account_info()\n self.detail_account_info()\n self.detail_account_mystock()#계좌평가 잔고내역 요청\n self.not_concluded_account()\n\n self.calculator_fnc() #종목분석용, 임시용으로 실행\n\n def get_ocx_instance(self):\n self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\")\n\n def event_slots(self):\n self.OnEventConnect.connect(self.login_slot)\n\n def signal_login_commConnect(self):\n self.dynamicCall(\"CommConnect()\")\n\n self.login_event_loop = QEventLoop()\n self.login_event_loop.exec_()\n\n def login_slot(self, errCode):\n print(errors(errCode))\n\n self.login_event_loop.exit()\n self.OnReceiveTrData.connect(self.trdata_slot)\n\n def get_account_info(self):\n account_list = self.dynamicCall(\"GetLoginInfo(String)\", \"ACCNO\")\n\n self.account_num = account_list.split(';')[0]\n print(\"나의 보유 계좌번호: {0}\".format(self.account_num))\n\n def detail_account_info(self):\n print(\"예수금을 요청하는 부분\")\n\n self.dynamicCall(\"SetInputValue(String, String)\", \"계좌번호\", self.account_num)\n self.dynamicCall(\"SetInputValue(String, String)\", \"비밀번호\", \"0000\")\n self.dynamicCall(\"SetInputValue(String, String)\", \"비밀번호입력매체구분\", \"00\")\n self.dynamicCall(\"SetInputValue(String, String)\", \"조회구분\", \"2\")\n self.dynamicCall(\"CommRqData(String, String, String, String)\", \"예수금상세현황요청\", \"opw00001\", \"0\", \"2000\")\n\n self.detail_account_info_event_loop = QEventLoop()\n self.detail_account_info_event_loop.exec_()\n\n def detail_account_mystock(self, sPrevNext = \"0\"):\n self.dynamicCall(\"SetInputValue(String, String)\", \"계좌번호\", self.account_num)\n self.dynamicCall(\"SetInputValue(String, String)\", \"비밀번호\", \"0000\")\n self.dynamicCall(\"SetInputValue(String, String)\", \"비밀번호입력매체구분\", \"00\")\n self.dynamicCall(\"SetInputValue(String, String)\", \"조회구분\", \"2\")\n self.dynamicCall(\"CommRqData(String, String, String, String)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info)\n\n self.detail_account_info_event_loop.exec_()\n\n def not_concluded_account(self, sPrevNext = \"0\"):\n self.dynamicCall(\"SetInputValue(String, String)\", \"계좌번호\", self.account_num)\n self.dynamicCall(\"SetInputValue(String, String)\", \"체결구분\", \"1\")\n self.dynamicCall(\"SetInputValue(String, String)\", \"매매구분\", \"0\")\n self.dynamicCall(\"CommRqData(String, String, String, String)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info)\n\n self.detail_account_info_event_loop.exec_()\n\n def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext):\n '''\n tr요청을 받는 구역이다! 슬롯이다!\n :param sScrNo: 스크린번호\n :param sRQName: 내가 요청했을 때 지은 이름\n :param sTrCode: 요청id, tr코드\n :param sRecordName: 사용안함\n :param sPrevNext: 다음 페이지가 있는지\n :return:\n '''\n\n if sRQName == \"예수금상세현황요청\":\n deposit = self.dynamicCall(\"GetCommData(String, String, int, String)\", sTrCode, sRQName, 0, \"예수금\")\n print(\"예수금: \", int(deposit))\n\n self.use_money = int(deposit) * self.use_money_percent\n self.use_money = self.use_money / 4\n\n ok_deposit = self.dynamicCall(\"GetCommData(String, String, int, String)\", sTrCode, sRQName, 0, \"출금가능금액\")\n print(\"출금가능금액: \", int(ok_deposit))\n\n self.detail_account_info_event_loop.exit()\n\n if sRQName == \"계좌평가잔고내역요청\":\n total_buy_money = self.dynamicCall(\"GetCommData(String, String, int, String)\", sTrCode, sRQName, 0, \"총매입금액\")\n print(\"총 매입금액: \", int(total_buy_money))\n\n total_profit_loss_rate = self.dynamicCall(\"GetCommData(String, String, int, String)\", sTrCode, sRQName, 0, \"총수익률(%)\")\n print(\"총 수익률(%): \", float(total_profit_loss_rate))\n\n rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)\n cnt = 0\n for i in range(rows):\n code = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"종목번호\")\n code_nm = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"종목명\")\n code = code.stripe()[1:]\n stock_quantity = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"보유수량\")\n buy_price = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"매입가\")\n learn_rate = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\")\n current_price = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"현재가\")\n total_chagual_price = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"매입금액\")\n possible_quantity = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\")\n\n if code in self.account_stock_dict:\n pass\n else:\n self.account_stock_dict.update({code: {}})\n\n code_nm = code_nm.strip()\n stock_quantity = int(stock_quantity.strip())\n buy_price = int(buy_price.strip())\n learn_rate = float(learn_rate.strip())\n current_price = int(current_price.strip())\n total_chagual_price = int(total_chagual_price)\n possible_quantity = int(possible_quantity)\n\n self.account_stock_dict[code].update({\"종목명\": code_nm})\n self.account_stock_dict[code].update({\"보유수량\": stock_quantity})\n self.account_stock_dict[code].update({\"매입가\": buy_price})\n self.account_stock_dict[code].update({\"수익률(%)\": learn_rate})\n self.account_stock_dict[code].update({\"현재가\": current_price})\n self.account_stock_dict[code].update({\"매입금액\": total_chagual_price})\n self.account_stock_dict[code].update({\"매매가능수량\": possible_quantity})\n\n cnt += 1\n\n print(\"계좌에 가지고 있는 종목: \", self.account_stock_dict)\n\n if sPrevNext == \"2\":\n self.detail_account_mystock(\"2\")\n else:\n self.detail_account_info_event_loop.exit()\n\n elif sRQName ==\"실시간미체결요청\":\n rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)\n for i in range(rows):\n code = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"종목번호\")\n code_nm = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"종목명\")\n order_no = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"주문번호\")\n order_status = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"주문상태\")\n order_quantity = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"수문주량\")\n order_price = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"주문가격\")\n order_gubun = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"주문구분\")\n not_quantity = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"미체결수량\")\n ok_quantity = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, i, \"체결량\")\n\n code = code.stripe()\n code_nm = code_nm.strip()\n order_no = int(order_no.strip())\n order_status = int(order_status.strip())\n order_quantity = int(order_quantity.strip())\n order_price = float(order_price.strip())\n order_gubun = order_gubun.lstrip('+').lstrip('-')\n not_quantity = int(not_quantity.strip())\n ok_quantity = int(ok_quantity.strip())\n\n if order_no in self.not_account_stock_dict:\n pass\n else:\n self.not_account_stock_dict[order_no] = {}\n\n self.not_account_stock_dict[order_no].update({\"종목코드\": code})\n self.not_account_stock_dict[order_no].update({\"종목명\": code_nm})\n self.not_account_stock_dict[order_no].update({\"주문번호\": order_no})\n self.not_account_stock_dict[order_no].update({\"주문상태\": order_status})\n self.not_account_stock_dict[order_no].update({\"주문수량\": order_quantity})\n self.not_account_stock_dict[order_no].update({\"주문가격\": order_price})\n self.not_account_stock_dict[order_no].update({\"주문구분\": order_gubun})\n self.not_account_stock_dict[order_no].update({\"미체결수량\": not_quantity})\n self.not_account_stock_dict[order_no].update({\"체결량\": ok_quantity})\n\n print(\"미체결 종목: \", self.not_account_stock_dict[order_no])\n\n self.detail_account_info_event_loop.exit()\n\n elif sRQName == \"주식일봉차트조회\":\n print(\"일봉 데이터 요청\")\n code = self.dynamicCall(\"GetCommData(QString, Qstring, int, QString)\", sTrCode, sRQName, 0, \"종목코드\")\n code = code.strip()\n print(\"{0} 일봉데이터 요청\".format(code))\n\n rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)\n print(rows)\n\n if sPrevNext ==\"2\":\n self.day_kiwoom_db(code=code, sPrevNext=sPrevNext)\n else:\n self.calculator_event_loop.exit()\n\n def get_code_list_by_market(self, market_code):\n '''\n 종목 코드들 반환\n :param market_code:\n :return:\n '''\n code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code)\n code_list = code_list.split(\";\")[:-1]\n\n return code_list\n\n def calculator_fnc(self):\n '''\n whdahr\n :return:\n '''\n code_list = self.get_code_list_by_market(\"10\")\n\n print(\"코스닥 갯수: \", len(code_list))\n\n for idx, code, in enumerate(code_list):\n self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock)\n print(\"{0} / {1} : KOSDAQ Stock Code : {2} is updating...\".format(idx + 1, len(code_list), code))\n self.day_kiwoom_db(code=code)\n\n\n\n def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"):\n\n QTest.qWait(3600)\n\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\")\n\n if date != None:\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date)\n\n self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock )\n self.calculator_event_loop.exec_()" } ]
2
alonshmilo/MachineLearningJCE
https://github.com/alonshmilo/MachineLearningJCE
9367d5dcfce4dad9b4a75a4d9570eda862032ea2
ce5196061a961098ec80bd67eef9898716700eeb
71be73982e62055bf00b442b5db1e0f2f7eabd3c
refs/heads/master
2020-12-24T20:52:26.428445
2018-08-08T17:17:16
2018-08-08T17:17:16
56,162,615
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3511348366737366, "alphanum_fraction": 0.43791723251342773, "avg_line_length": 16.83333396911621, "blob_id": "41f731948fe2e808a013387ec05e6711ed88242b", "content_id": "b94d902dd2357c5b4f11c1b3333f2203bdacc447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "no_license", "max_line_length": 58, "num_lines": 42, "path": "/classes/class02.py", "repo_name": "alonshmilo/MachineLearningJCE", "src_encoding": "UTF-8", "text": "#func\n\ndef func1(a, b=4, c=4):\n print(\"a:\", a, \"b:\", b, \"c:\",c)\nprint(\"#1:\")\nfunc1(2,c=56)\n\nprint(\"#2:\")\nfunc1(b=1, c=55, a=45)\n\nprint(\"-------------------------------------------------\")\n\ndef func2(a, L=[]):\n print(id(L))\n L.append(a)\n return L\n\nprint(func2(1))\nprint(func2(2))\nprint(func2(3))\nprint(\" \")\nprint(\"L is nottt initializing!!!\")\n\nprint(\"-------------------------------------------------\")\n\ndef func3(p1, *p2, **p3):\n print(p1)\n print(p2)\n print(p3)\n\nfunc3(123)\nfunc3(123, 'aaa', 1234)\nfunc3(123, 'aaa', 1234, v1=1, v2=2)\nfunc3(123, 'aaa', 'sgsdg', 2345, v1=1, v2=2)\n\n\nprint(\" \")\nprint(\"* is tuple, ** is map\")\n\nprint(\"-------------------------------------------------\")\nprint(\"test line\", \",\")\nprint(\"test lineee\")\n" }, { "alpha_fraction": 0.6838180422782898, "alphanum_fraction": 0.7039522528648376, "avg_line_length": 35.27027130126953, "blob_id": "edd5b8c79948e2f8a21c8e7f795d2a3f497cc383", "content_id": "d7a094c436d298a51d0c028702b7d27c4c087004", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1341, "license_type": "no_license", "max_line_length": 94, "num_lines": 37, "path": "/classes/class05.py", "repo_name": "alonshmilo/MachineLearningJCE", "src_encoding": "UTF-8", "text": "from PIL import Image\nimport glob\nimport numpy as np\n\nimage_list = map (Image.open, glob.glob(\"c:/*ok*.png\"))\nok_image = np.zero(len(image_list), image_list[0].size[0]*image_list[0].size[1])\nfor (idx, im) in enumerate(image_list):\n ok_image[idx,:] = np.array(im,np.uint8).reshape(1,im.size[0]*im.size[1])\n\n\nimage_list = map (Image.open, glob.glob(\"c:/*cyst*.png\"))\ncyst_image = np.zero(len(image_list), image_list[0].size[0]*image_list[0].size[1])\nfor (idx, im) in enumerate(image_list):\n cyst_image[idx,:] = np.array(im,np.uint8).reshape(1,im.size[0]*im.size[1])\n\n#now 2 matrices:\nall_image = np.concatenation((ok_image,cyst_image))\n\n#now tagging what is ok and what is not\nimage_class = np.concatenation((np.zeros(ok_image.shape[0],1),np.ones(cyst_image.shape[0],1)))\n\n#now taking the tests: 20%, 10% from each \"list\"\nfrom sklearn.cross_validation import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(all_image, image_class, test_size=0.2)\n\n#now we throw it to the \"checker\"\n\n\nclassifier = NearestNeighbor(); #new \"object\"\nclassifier.train(x_train,y_train) #we are training it with method \"train\"\ny_pred=classifier.predict(x_test) # and we test the rest, using \"predict\"\n\nnum_of_correct = np.sum(y_pred==y_test)\naccuracy = num_of_correct/len(y_test)\n\n#accuracy depends on the input: how the input was divided." }, { "alpha_fraction": 0.6396946310997009, "alphanum_fraction": 0.6854962110519409, "avg_line_length": 14.95121955871582, "blob_id": "4d46c535c701ea8643d28c502890a69e98417745", "content_id": "ba0742d8c91d56a91eb2815e819991760ccafffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 58, "num_lines": 41, "path": "/classes/class03.py", "repo_name": "alonshmilo/MachineLearningJCE", "src_encoding": "UTF-8", "text": "#16/03/2016\n\n#packages: Numpy / scipy / matplotlib\n\n#from scipy.misc import imread,imsave,imresize\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#img = imread(\"/Users/alonshmilovich/Downloads/logo.png\")\n\n#print(img.dtype, img.shape)\n\n#img2 = img *[1,0,0]\n\n#imsave(\"/Users/alonshmilovich/Downloads/logo2.png\", img2)\n\n#print(img2.dtype, img2.shape)\n\n#plt.imshow(img2)\n#plt.show()\n\nx = np.arange(0,3*np.pi,0.1)\ny = np.sin(x)\nplt.plot(x,y)\n\ny1 = np.cos(x)\nplt.plot(x,y1)\n\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('title')\n\nplt.legend(['sin','cos'])\nplt.show()\n\n#spliting to 2 windows\nplt.subplot(1,2,1)\nplt.plot(x,y)\n\nplt.subplot(1,2,2)\nplt.plot(x,y)\n\n" }, { "alpha_fraction": 0.4990347623825073, "alphanum_fraction": 0.5284749269485474, "avg_line_length": 29.485294342041016, "blob_id": "630b63cbd168cc35a445a4c60b522977dd68dea1", "content_id": "097a17169d5219f32891477bbe6499c51bb0b0f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2072, "license_type": "no_license", "max_line_length": 124, "num_lines": 68, "path": "/classes/class06.py", "repo_name": "alonshmilo/MachineLearningJCE", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nclass TwoLayerNet():\n def __init__(self, input_size, hidden_size, output_size ):\n \"\"\":param input_size = how much pixels in img, hidden_size - how much perceptrons, output_size-how much classes-1\"\"\"\n std = 1*math.e-4;\n self.param = {'w1': np.random.rand(input_size,hidden_size),\n 'b1': np.zeros(hidden_size),\n 'w2': np.random.rand(hidden_size,output_size),\n 'b2': np.zeros(output_size)}\n\n def train(self, x, y, x_val, y_val):\n \"\"\"x_val - the validated group (20%) that was left aside\"\"\"\n learning_rate = 1*math.e - 3\n batch_size = 20 #how much data we take every iteration in training\n num_iters = 100\n num_train = x.shape[0]\n iteration_per_epoch = num_train/batch_size\n\n for it in xrange(num_iters):\n indices = np.randon.choice(x.shape[0], size=batch_size)\n x_batch = x[indices]\n y_batch = y[indices]\n loss, grads = self.loss(x_batch, y_batch)\n for param_name in grads:\n self.param[param_name] -=learning_rate * grads[param_name]\n\n\n\n\n\n\n def loss(self,x,y):\n w1, b1 = self.param['w1'], self.param['b1']\n w2, b2 = self.param['w2'], self.param['b2']\n N,D = x.shape\n\n q1 = x.dot(w1) + b1\n q2 = np.maximum(0,q1)\n q3 = q2.dot(w2) + b2\n scores = q3\n\n exp_score = np.exp(scores)\n probabilities = exp_score/np.sum(exp_score,axis=1)\n log_sigmoid = -np.log(probabilities)\n loss = np.sum(log_sigmoid)\n lamda = 0.5\n loss += lamda * np.sum(w1*w1)\n\n grads = []\n dscores = probabilities\n\n dw2 = q2.T.dot(dscores)\n dw2 += lamda *w2\n db2 = np.sum(dscores)\n\n dhidden = scores.dot(w1.T)\n\n dw1 = x.T.dot(dhidden)\n dw1 += lamda *w1\n db1 = np.sum(dhidden,axis=0)\n\n grads['w1'] = dw1\n grads['b2'] = db2\n grads['w2'] = dw2\n grads['b1'] = db1\n\n return (loss, grads)" } ]
4
gdexlab/Cursive-OCR-for-Geneology
https://github.com/gdexlab/Cursive-OCR-for-Geneology
bd83ad00902bcdd09aa98dc3aefb2add9c775e34
4eb9ee56f1b493f686dc9a1b01740a3e3ef35800
3ff052751e7adb7d596e2cdff9c8833461ff81fd
refs/heads/master
2021-03-19T18:54:43.142359
2018-04-19T04:42:01
2018-04-19T04:42:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5582095384597778, "alphanum_fraction": 0.5837878584861755, "avg_line_length": 33.303226470947266, "blob_id": "6a9607d5c3643f342a9139927c78e6cbeec86b64", "content_id": "068881ff7865da7ec4617d0f385b4471f1464cc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5317, "license_type": "no_license", "max_line_length": 206, "num_lines": 155, "path": "/historical/experiments.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport scipy.misc\nimport matplotlib\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport string\nimport re\nimport random\nimport os\n\n\nif os.name == 'nt':\n env = 0\nelse:\n env = 1\n\nif env == 1:\n path=\"/home/ubuntu/Cursive-OCR-for-Geneology/dataset\"\nelse:\n path=\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\dataset\"\n\n# for file in glob.glob(\"*/*.jpg\", recursive=True):\n# img = cv2.imread(file,0)\n# edges = cv2.Canny(img,100,200)\n\n # plt.subplot(121),plt.imshow(img,cmap = 'gray')\n # plt.title('Original Image'), plt.xticks([]), plt.yticks([])\n # plt.subplot(122),plt.imshow(edges,cmap = 'gray')\n # plt.title('Edge Image'), plt.xticks([]), plt.yticks([])\n\n # plt.show()\n\n'''Trains a simple convnet on the MNIST dataset.\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nimport load_images_dataset\n\nskips = [\".jpg\", \" \",\n\"@\", \"+\", \"]\", \"[\", \")\", \"(\", \"_\",\n\"$\", \"z\", \"j\", \"b\", \"k\", \"v\", \"w\", # less than 50\n\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\",\n\"P\", \"Q\", \"R\",\"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\",\n \".\", \",\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\ndataset, y, n_classes, label_dict, SIZE = load_images_dataset.prepare_data(path, skips=skips)\n\n\nprint(\"Number of classes: {}\".format(n_classes))\n\n# to unorder samples\nrandom_seed = 4\nrandom.Random(random_seed).shuffle(y)\nrandom.Random(random_seed).shuffle(dataset)\n\nn_test = 8\nn = len(dataset) -(1+n_test)\nx_test = np.array(dataset[n:n + n_test])\nx_train = np.array(dataset[: n])\ny_test = np.array(y[n:n + n_test])\ny_train = np.array(y[: n])\n\nmodel = Sequential()\nmodel.add(Conv2D(32,\n kernel_size=(3, 3),\n activation='tanh',\n input_shape=SIZE))\n#tanh offering more specific vals, rather than 1 0\nmodel.add(Conv2D(64, (3, 3), activation='tanh')) # relu\n# print(x_train.shape)\n# model.add(keras.layers.ConvLSTM2D(32, (3,3), strides=(1, 1),\n# padding='valid',\n# dilation_rate=(1, 1), activation='tanh',\n# # data_format='channels_last',\n# # recurrent_activation='hard_sigmoid', use_bias=True,\n# # kernel_initializer='glorot_uniform',\n# # recurrent_initializer='orthogonal',\n# # bias_initializer='zeros', unit_forget_bias=True,\n# # kernel_regularizer=None, recurrent_regularizer=None,\n# # bias_regularizer=None, activity_regularizer=None,\n# # kernel_constraint=None, recurrent_constraint=None,\n# # bias_constraint=None,\n# # return_sequences=True,\n# # go_backwards=False, stateful=False, dropout=0.0,\n# # recurrent_dropout=0.0))\n# ))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\n#bonus\nmodel.add(Conv2D(64, (3, 3), activation='tanh')) # relu\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\n#double bonus\nmodel.add(Conv2D(32, (3, 3), activation='tanh')) # relu\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='tanh'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(n_classes, activation='sigmoid'))\n\n # 'categorical_crossentropy' <- supposedly for multi-class, not multi label: https://stats.stackexchange.com/questions/260505/machine-learning-should-i-use-a-categorical-cross-entropy-or-binary-cross-entro\nmodel.compile(loss='binary_crossentropy',\n#'binary_crossentropy' : supposedely ideal for multi label, current .5 test accuracy, but no letters predicted\n# 'mean_squared_error' : all same, 1s\n optimizer=keras.optimizers.Adam(), #.Adam(), Adadelta()\n metrics=['categorical_accuracy', 'accuracy', 'mae'])\n\nmodel.fit(x_train, y_train,\n batch_size=64, #128\n epochs=3,\n verbose=1,\n # validation_data=(x_test, y_test)\n validation_split=0.4\n )\n\nscore = model.evaluate(x_test, y_test, verbose=1)\n\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nprint(score)\n\npred = model.predict(x_test)\nprint(\"predictions finished\")\n\nfor i in range (0, len(x_test)):\n actuals = \"\"\n # for label in y[n+i]:\n for index in np.where(y[n+i]==1)[0]:\n # print(index)\n actuals += \" {}\".format(label_dict[\"idx2word\"][index])\n print(\"---------------------------------------\\nActual: {}\".format(actuals))\n\n # label_dict[\"idx2word\"][s],y[n+i][s]) for s in y[n+i])\n # print(\"Prediction: {}\".format(pred[i]))\n print(\"Predicted letters: \")\n for i2 in range (0, len(label_dict[\"idx2word\"])):\n if pred[i][i2] > 0.2:\n print(\"\\\"{}\\\":{}\".format(label_dict[\"idx2word\"][i2], pred[i][i2]))\n print(\"--------------------------------------\")\n" }, { "alpha_fraction": 0.5508021116256714, "alphanum_fraction": 0.5799708366394043, "avg_line_length": 34.465518951416016, "blob_id": "f0109c9105389025fa326d977bf40c78588b97b9", "content_id": "127dbf8e57a2c654baa3b5ee734ee3019d7bdc44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2057, "license_type": "no_license", "max_line_length": 128, "num_lines": 58, "path": "/historical/rotate.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "import cv2\nimport os\nimport glob\nimport numpy as np\nimport imutils\n\nos.chdir(\"C:\\\\Users\\\\grant\\\\IS\\\\Past\\\\IS693R\\\\image_project\\\\images\\\\misc\\\\cleaner_selections\")\n\ndef longest_line_angle(img_bw):\n \"\"\"\n Expects image to have white background with black writing\n Function rotates image to various angles,\n iterates every vertical line of pixels in the images,\n line lengths of dark pixels are recorded and the angle returning the longest vertical line is returned\n\n Warning: slow performance on large images\n \"\"\"\n\n angles = [0, -5,-10, -15, -20, -25, -30, -35, -40, 10, 15, 20]\n vert_line_lengths = []\n for indx, angle in enumerate(angles):\n vert_line_lengths.append([angle, 0])\n img_warped = imutils.rotate_bound(img_bw, angle)\n h, w = img_warped.shape[:2]\n for x in range(w):\n line_length = 0\n for y in range(h):\n try:\n if img_warped[y][x] < 10 and (img_warped[y-1][x] <10 or img_warped[y-1][x-1] <10 or img_warped[y][x-1] <10):\n line_length += 1\n else:\n if line_length > vert_line_lengths[indx][1]:\n vert_line_lengths[indx][1] = line_length\n line_length = 0\n except:\n None\n\n best_angle_weight = 0\n best_angle = 0\n for indx, val in enumerate(vert_line_lengths):\n # print(vert_line_lengths[indx][1])\n if vert_line_lengths[indx][1] > best_angle_weight+1:\n best_angle = vert_line_lengths[indx][0]\n best_angle_weight = vert_line_lengths[indx][1]\n # print(vert_line_lengths[indx])\n return best_angle\n\n\nfor file in glob.glob(\"*.jpg\"):\n print(\"reading img: \", file)\n img = cv2.imread(file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # img = deskew(img)\n # print(long_contour_angle(img))\n img = imutils.rotate_bound(img, long_contour_angle(img))\n cv2.imshow('test', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.6107208728790283, "alphanum_fraction": 0.6295748353004456, "avg_line_length": 29.738636016845703, "blob_id": "6484f3566a768d567ad0362aedd7a69531e5398f", "content_id": "fcbc70e8e9a8e7e39c3dc4ecd711501b98d9f158", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2705, "license_type": "no_license", "max_line_length": 131, "num_lines": 88, "path": "/image_processing_tools/augmentation.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "from keras.preprocessing.image import ImageDataGenerator\nimport os\nfrom matplotlib import pyplot\nfrom keras import backend as K\nimport random\nimport numpy as np\nimport load_images_dataset\nimport custom_models\nfrom keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img\nfrom keras.utils import to_categorical\nfrom matplotlib import pyplot\nimport cv2\nfrom numpy import array\n\n\nx, y, n_classes, label_dict, size = load_images_dataset.prepare_data(\n 'C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\dataset\\\\ready_singles')\n\nprint(label_dict)\nK.set_image_dim_ordering('tf')\nx = array(x)\nx.reshape(60, 25, 3, x.shape[0])\nx = x.astype('float32')\n\n# for z in range(0,2):\n# pyplt.imshow(np.uint8(x[z]))\n# pyplt.show()\n\n# define data preparation\ndatagen = ImageDataGenerator(\n # featurewise_center=False,\n # samplewise_center=False,\n # featurewise_std_normalization=False,\n # samplewise_std_normalization=False,\n # zca_whitening=True,\n # zca_epsilon=1e-6,\n rotation_range=12,\n width_shift_range=4,\n height_shift_range=10,\n # shear_range=0.,\n zoom_range=0.2,\n channel_shift_range=.8,\n # fill_mode='nearest',\n # cval=0.,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=0,\n # preprocessing_function=None,\n data_format=K.image_data_format())\n\n# print(label_dict.shape)\nprint('fitting augmentation model')\ndatagen.fit(x)\naug_cnt = 0\n\n# number of times to augment the dataset\nbatch_size = 1\naug_factor = len(x)/batch_size * 20\n\n\nprint('creating flow')\npath = 'C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\dataset\\\\01a_singles_augmented'\nfor x_batch, y_batch in datagen.flow(x, y, batch_size=batch_size):\n try:\n label = '{}\\\\{} ({}).jpg'.format(\n path,\n label_dict['idx2word'][int(np.where(y_batch[0]==1)[0][0])],\n aug_cnt\n )\n # print(label)\n \"\"\"\n pyplot has a hard time showing major changes, but you will still get an idea for the variety of the pixels.\n Output the images to actually see appearance.\n \"\"\"\n # pyplot.imshow(x_batch[0])\n # pyplot.show()\n aug_cnt += 1\n\n cv2.imwrite(label, x_batch[0])\n # print('writing {}\\\\{}{}.jpg'.format(path,os.path.split(y_batch[0])[1].strip('.jpg'), '({})'.format(aug_cnt)))\n # cv2.imwrite('{}\\\\{}{}.jpg'.format(path,os.path.split(y_batch[0])[1].strip('.jpg'), '({})'.format(random.getrandbits(8))),\n # X_batch.reshape(60, 70, 3))\n # print('wrote')\n except Exception as e:\n print(e)\n if aug_cnt > aug_factor:\n # flow will loop indefinitely\n break\n" }, { "alpha_fraction": 0.5632440447807312, "alphanum_fraction": 0.5766369104385376, "avg_line_length": 28.866666793823242, "blob_id": "a6ddd5e4e772eab7f2fddf38cd3defba7b6dd0f1", "content_id": "13505e3b3a6f17495647c51c278f742acdd96b53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1344, "license_type": "no_license", "max_line_length": 92, "num_lines": 45, "path": "/tester.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "from keras.models import model_from_yaml\nimport load_images_dataset\nimport numpy as np\nimport operator\n\np_data = load_images_dataset.PreparedData()\np_data.set_size((60, 25))\np_data.process_test_only()\n\nyaml_file = open('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\v4_cnn.yaml', 'r')\n\nloaded_model_yaml = yaml_file.read()\nyaml_file.close()\nmodel = model_from_yaml(loaded_model_yaml)\n\n\nmodel.load_weights(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\v5_model_0.8.h5\")\nmodel.compile(\nloss='categorical_crossentropy',\n optimizer='Adadelta',\n metrics=['accuracy'])\n\n\npred = model.predict(p_data.dataset['x_test'])\n# print(pred)\nprint(\"predictions finished\")\n\nvowels = ['a', 'e', 'i', 'o', 'u']\nfrom queue import Queue\nclean_actuals = Queue()\nfor d in p_data.dataset['y_test']:\n clean_actuals.put(vowels[np.where(d == 1)[0][0]])\n\nfor p in pred:\n prediction_dict = {'a': p[0],\n 'e': p[1],\n 'i': p[2],\n 'o': p[3],\n 'u': p[4],\n }\n sorted_preds = sorted(prediction_dict.items(), key=operator.itemgetter(1), reverse=True)\n print(\"-------------------\")\n print(\"Actual: {}\".format(clean_actuals.get()))\n print(\"Predicted: {}\".format(sorted_preds))\n print(\"-------------------\")\n" }, { "alpha_fraction": 0.6009981632232666, "alphanum_fraction": 0.6317310333251953, "avg_line_length": 30.71666717529297, "blob_id": "6ed8410d8b17395dad59137c4fe34ee3021b112c", "content_id": "ff388c4ed766566d634ae8909b5408b83a56e35f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3807, "license_type": "no_license", "max_line_length": 154, "num_lines": 120, "path": "/historical/vizualizer.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\n# from vizualizer import vizualize_layer\nimport scipy\nimport matplotlib.pyplot as plt\nimport cv2\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, Conv1D, MaxPooling2D,MaxPooling1D, AveragePooling2D, GlobalMaxPooling2D\nimport keras\nimport numpy as np\n# vizualize_layer(model, scipy.misc.imread('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\vizualize_examplery_images\\\\_lan.jpg').astype(np.float32))\nimg1 = cv2.imread('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\dataset\\\\single\\\\a\\\\a (28).jpg')\n# print(img1)\n# cv2.imshow('test', img1)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n# kernel = np.ones((5,9),np.uint8)\n# img1 = cv2.erode(img1,kernel,iterations = 1) # erosion is actually dilation in this case\nimg1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n\n# print(img.shape)\n\n\nmodel = Sequential()\n# model.add(MaxPooling1D(pool_size=(2), input_shape=img1.shape))\nmodel.add(Conv1D(8, kernel_size=(3),\n activation='relu',\n input_shape=img1.shape))\nmodel.add(MaxPooling1D(pool_size=(2)))\nmodel.add(Conv1D(16, (3), activation='relu'))\nmodel.add(MaxPooling1D(pool_size=(2)))\nimg_batch = np.expand_dims(img1, axis=0)\nconv_img = model.predict(img_batch)\nprint(\"here 2\")\n\nprint(\"here2\")\ndef vizualize_layer(img_batch):\n print(img_batch)\n img2 = np.squeeze(img_batch, axis=0)\n print(\"\\n\\n\")\n # print(img)\n print(\"here 3\")\n # print(img.shape)\n fig=plt.figure(figsize=(4, 4))\n fig.add_subplot(1, 2, 1)\n plt.imshow(img1)\n fig.add_subplot(1, 2, 2)\n plt.imshow(img2)\n plt.show()\n\nvizualize_layer(conv_img)\n\n\n\n\n\n\n\n# import numpy as np\n# from keras import backend as K\n# from scipy.misc import imshow\n#\n#\n# def vizualize_layer(model, input_img, layer_name='conv2d_1', filter_index=1):\n#\n# # get the symbolic outputs of each \"key\" layer (we gave them unique names).\n# layer_dict = dict([(layer.name, layer) for layer in model.layers])\n#\n# # can be any integer from 0 to 511, as there are 512 filters in that layer\n# print(\"Layers: {}\".format(layer_dict))\n# # build a loss function that maximizes the activation\n# # of the nth filter of the layer considered\n# layer_output = layer_dict[layer_name].output\n#\n# print(layer_output)\n# print(layer_output[:, :, :, filter_index])\n#\n# loss = K.mean(layer_output[:, :, :, filter_index])\n# print(\"Loss = {}:\".format(loss))\n# # compute the gradient of the input picture wrt this loss\n# grads = K.gradients(layer_output, input_img)[0]\n#\n# # print(input_img)\n# print(\"Grads: {}\".format(grads))\n#\n# # normalization trick: we normalize the gradient\n# grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)\n# print(\"here\")\n#\n# # this function returns the loss and grads given the input picture\n# iterate = K.function([input_img], [loss, grads])\n# print(\"here2\")\n#\n# # we start from a gray image with some noise\n# input_img_data = np.random.random((1, 3, 70, 60)) * 20 + 128.\n# # run gradient ascent for 20 steps\n# for i in range(20):\n# loss_value, grads_value = iterate([input_img_data])\n# input_img_data += grads_value * step\n#\n#\n# # util function to convert a tensor into a valid image\n# def deprocess_image(x):\n# # normalize tensor: center on 0., ensure std is 0.1\n# x -= x.mean()\n# x /= (x.std() + 1e-5)\n# x *= 0.1\n#\n# # clip to [0, 1]\n# x += 0.5\n# x = np.clip(x, 0, 1)\n#\n# # convert to RGB array\n# x *= 255\n# x = x.transpose((1, 2, 0))\n# x = np.clip(x, 0, 255).astype('uint8')\n# return x\n#\n# img = input_img_data[0]\n# img = deprocess_image(img)\n# imshow('%s_filter_%d.png' % (layer_name, filter_index), img)\n" }, { "alpha_fraction": 0.5957124829292297, "alphanum_fraction": 0.6138713955879211, "avg_line_length": 32.04166793823242, "blob_id": "a543faeeafc7e731d4b1e08e0885a317dccfb555", "content_id": "5611676144b1cfdeede4eac1b65a1120b4b3d741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3965, "license_type": "no_license", "max_line_length": 159, "num_lines": 120, "path": "/image_processing_tools/window_slider.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\"\"\"\nProcess:\n 1. process entire image (adaptiveThreshold, contour detection, bounding rect, rotate, etc.)\n 2. Slide and slice based on tone and variety\n 3. predict from data set\n\"\"\"\n\nimport os\nimport glob\nimport cv2\nimport imutils\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\nos.chdir(\"C:\\\\Users\\\\grant\\\\IS\\\\IS552\\\\JSPapersBookofTheLawoftheLord\")\n\n\ndef variety_check(img, w):\n \"\"\"\n Scan the middle strip of the image for variety of tone\n Converts to black and white.\n Then verifies minimum of 2 changes to color tone\n Intended for small crops of images\n \"\"\"\n block_size = w\n while block_size %2 != 1:\n block_size -= 1\n\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # using 10 here to eliminate rows with faint lines\n img_bw = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,block_size,10)\n # cv2.imshow('img' ,img_bw)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n (horizontal, vertical) = img_bw.shape\n h_lower = int(.45 * horizontal)\n h_upper = int(.6 * horizontal)\n v_lower = int(0 * vertical)\n v_upper = int(1 * vertical)\n\n # print(h_lower, h_upper, v_lower, v_upper)\n # check_area = img_bw[h_lower:h_upper,v_lower:v_upper]\n #\n # cv2.imshow('img' ,check_area)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n # sum total changes accross the bw array. If none of middle x percentof arrays contain more than one color change, discard entire boundary\n contained_variety = 0\n for horiz in img_bw[h_lower:h_upper]:\n row_value_changes = 0\n value_in_row = horiz[0]\n for vert_value in horiz[v_lower:v_upper]:\n if vert_value != value_in_row:\n row_value_changes += 1\n value_in_row = vert_value\n if row_value_changes > 2: # a change and back (we want more than one line)\n contained_variety += 1\n if contained_variety > 0:\n # print(\"Contained variety: \", contained_variety)\n return True\n else:\n return False\n\ndef tone_check(crop_rgb_img, h, w, base_tone=100):\n \"\"\"\n Verify color tone of image passes minimum threshold.\n 255 is brightest\n 0 is darkest\n Intended for small crops of images\n \"\"\"\n crop_val = 0\n for line in crop_rgb_img:\n for pixel in line:\n for rgb_val in pixel:\n crop_val += rgb_val\n if crop_val > (h*w*3*base_tone):\n return True\n else:\n return False\n\ndef window_slicer(img, w, h, increment_percentage, file_name):\n \"\"\"\n params:\n img = color image\n w = desired crop width\n h = desired crop heigth\n increment percentage = amount frames will advance from w\n file_name = for output purposes\n \"\"\"\n w_increment = increment_percentage * w\n h_increment = increment_percentage * h\n img_count = 0\n\n width_slider = 0\n height_slider = 0\n max_height, max_width = img.shape[:2]\n\n while height_slider < max_height:\n # crop = img[width_slider:width_slider+h, height_slider:height_slider+w]\n while width_slider < max_width:\n crop = img[height_slider:height_slider+h, width_slider:width_slider+w]\n # print(height_slider, h, width_slider, w)\n if tone_check(crop, h, w) and variety_check(crop, w):\n # cv2.imshow('img', crop)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n out = \"C:\\\\Users\\\\grant\\\\IS\\\\IS552\\\\JSPapersBookofTheLawoftheLord\\\\windows_wider\\\\{}_{}_from_{}\".format(height_slider, width_slider, file_name)\n # print(\"Writing file out {}\".format(out))\n # cv2.imwrite(out, crop)\n width_slider += int(w_increment)\n width_slider = 0\n height_slider += int(h_increment)\n\n\nfor file in glob.glob(\"*.jpg\"):\n print(\"reading img: \", file)\n img = cv2.imread(file)\n window_slicer(img, 70, 60, .3, str(file))\n" }, { "alpha_fraction": 0.65625, "alphanum_fraction": 0.75, "avg_line_length": 23.25, "blob_id": "6046987d1baaf8a7a1294c235fb32e1703b00d09", "content_id": "0a1d3be28b287f70eb677391c23cae67a0f97521", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/historical/dataset/singles/todo.txt", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "Need to fine-tune augmentor. \nProduce 250 raw letters per vowel. \nAugment to 25X (6250). \nTRAIN." }, { "alpha_fraction": 0.5805196166038513, "alphanum_fraction": 0.6017321348190308, "avg_line_length": 41.604278564453125, "blob_id": "34ee5838a101739565bc851eaab712a7f579c439", "content_id": "b25fe73528769e8c48831c74412eb7b437c97d95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7967, "license_type": "no_license", "max_line_length": 185, "num_lines": 187, "path": "/historical/CHL_AutoTranscribe.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\"\"\"\nProcess:\n 1. process entire image (adaptiveThreshold, contour detection, bounding rect, etc.)\n 2. pull boundaries to be convolved\n 3. output predictions\n\"\"\"\n\nimport cv2\nimport pytesseract\nimport os\nimport glob\nimport cv2\nimport numpy as np\nimport time\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport math\nimport statistics\nimport imutils\n\nos.chdir(\"C:\\\\Users\\\\grant\\\\IS\\\\IS552\\\\JSPapersBookofTheLawoftheLord\")\n\ndef longest_line_angle(img_bw):\n \"\"\"\n Expects image to have white background with black writing\n Function rotates image to various angles,\n iterates every vertical line of pixels in the images,\n line lengths of dark pixels are recorded and the angle returning the longest vertical line is returned\n\n Warning: slow performance on large images\n \"\"\"\n\n angles = [0, -5,-10, -15, -20, -25, -30, -35, -40, 10, 15, 20]\n vert_line_lengths = []\n for indx, angle in enumerate(angles):\n vert_line_lengths.append([angle, 0])\n img_warped = imutils.rotate_bound(img_bw, angle)\n h, w = img_warped.shape[:2]\n for x in range(w):\n line_length = 0\n for y in range(h):\n try:\n if img_warped[y][x] < 10 and (img_warped[y-1][x] <10 or img_warped[y-1][x-1] <10 or img_warped[y][x-1] <10):\n line_length += 1\n else:\n if line_length > vert_line_lengths[indx][1]:\n vert_line_lengths[indx][1] = line_length\n line_length = 0\n except:\n None\n\n best_angle_weight = 0\n best_angle = 0\n for indx, val in enumerate(vert_line_lengths):\n if vert_line_lengths[indx][1] > best_angle_weight+1:\n best_angle = vert_line_lengths[indx][0]\n best_angle_weight = vert_line_lengths[indx][1]\n\n return best_angle\n\ndef show_images(images, cols = 2, titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed')\n plt.show()\n\ndef add_contours(img_source_bw, img_color, square_pixels):\n image,contours,hierarchy = cv2.findContours(img_source_bw,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n img_cont_unfiltered = cv2.cvtColor(img_color, cv2.COLOR_BGR2RGB)\n average_angle = 0\n angles = []\n applied_contour_count = 0\n for cnt in contours:\n x,y,w,h = cv2.boundingRect(cnt)\n cv2.rectangle(img_cont_unfiltered,(x,y),(x+w,y+h),(0,255,0),5)\n # First, check for shape of word. Ignoring single letters for now.\n if w > h:\n if (22 <= w <= 500) and (22 <= h <= 200):\n # this ensures that height is less than max_contour, and width, while width is greater than min_contour\n # if (h > min_contour and w < max_contour):\n # \"\"\"iterate through contours and check for variety in the images\"\"\"\n crop_img = img_source_bw[y:y+h, x:x+w]\n # horizontal = columns, vertical = rows\n (horizontal, vertical) = crop_img.shape\n h_lower = int(.2 * horizontal)\n h_upper = int(.8 * horizontal)\n v_lower = int(.2 * vertical)\n v_upper = int(.8 * vertical)\n\n # sum total changes accross the bw array. If none of middle sixty percentof arrays contain more than one color change, discard entire boundary\n contained_variety = 0\n for horiz in crop_img[h_lower:h_upper]:\n row_value_changes = 0\n value_in_row = horiz[0]\n for vert_value in horiz[v_lower:v_upper]:\n if vert_value != value_in_row:\n row_value_changes += 1\n value_in_row = vert_value\n if row_value_changes > 2: # a change and back (we want more than one line)\n contained_variety += 1\n if contained_variety > 0:\n applied_contour_count += 1\n cv2.rectangle(img_color,(x,y),(x+w,y+h),(0,255,0),5)\n\n if applied_contour_count % 5 == 0:\n angles.append(longest_line_angle(crop_img))\n # file_name = \"C:\\\\Users\\\\grant\\\\IS\\\\Past\\\\IS693R\\\\image_project\\\\images\\\\misc\\\\raw_area_crops\\\\\"+str(v_upper-v_lower+h/w)+\"{}.jpg\".format(contained_variety*(w/100))\n # print(x, y, h, w)\n # raw_output = img_color[y5:y+h, x:x+w] # If really wide, we need larger height for dataset\n # raw_output = img_color[y-h:y+h+w, x-15:x+w+15]\n # try:\n # raw_output_rotated_expanded = imutils.rotate(raw_output, -longest_line_angle(crop_img))\n # except:\n # raw_output_rotated_expanded = raw_output\n\n angles.sort()\n average_angle = statistics.median(angles)\n\n return img_color, img_cont_unfiltered, average_angle\n\nfor file in glob.glob(\"*.jpg\"):\n print(\"reading img: \", file)\n img = cv2.imread(file)\n\n print(\"creating duplicate images for immutable output.\")\n original_img = img.copy()\n height, width, channels = img.shape\n square_pixels = height * width\n\n # while square_pixels > 7000000:\n # print(\"reducing oversize image: {}\".format(square_pixels))\n # img = cv2.resize(img, (0,0), fx=0.9, fy=0.9)\n # height, width, channels = img.shape\n # square_pixels = height * width\n\n print(\"Dilating image to repair errosion\")\n kernel = np.ones((5,9),np.uint8) # was 5, 5, first is vert, then horizontal\n img_dilated = cv2.erode(img,kernel,iterations = 1)\n # erosion extracts the white from the image and replaces with surrounding dark.\n # In this case, the writing is dark, and therefore the erosion works as dilation\n # dilation = cv2.dilate(img,kernel,iterations = 1)\n # show_images([img, erosion, dilation])\n\n\n print(\"creating grayscale image.\")\n img_gray = cv2.cvtColor(img_dilated, cv2.COLOR_BGR2GRAY)\n print(height, width, (square_pixels))\n\n\n print(\"calculating block size for gaussian window:\")\n # The smaller this block size, the more area-sensitive the window will be\n #.75 isn't capturing all we need (but that was before -20 thresh)\n window_block_neighbors = int(.75*math.sqrt(square_pixels))\n\n while window_block_neighbors %2 != 1:\n window_block_neighbors += 1\n\n\n print(\"Adding adaptive threshold.\")\n # The last param is a manual input which subtracts from the threshold\n img_low_thresh = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,window_block_neighbors,-10)\n\n print(\"Finding contours.\")\n img_low_thresh,low_thresh_contours,low_thresh_hierarchy = cv2.findContours(img_low_thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n print(\"Filtering and adding contours.\")\n img_low, img_cont_unfiltered, angle = add_contours(img_low_thresh, img, square_pixels)\n\n # print(\"Rotating image to match writing angle\")\n # For creation of data set\n # rotated_img = imutils.rotate_bound(original_img, angle)\n # file_name = \"C:\\\\Users\\\\grant\\\\IS\\\\IS552\\\\JSPapersBookofTheLawoftheLord\\\\RotationsApplied\\\\{}\".format(file)\n # print(\"Writing rotated image to file: {}\".format(file_name))\n # cv2.imwrite(file_name, rotated_img)\n\n print(\"Displaying images.\")\n show_images([img, img_low_thresh, img_cont_unfiltered, img_low])\n" }, { "alpha_fraction": 0.5630568861961365, "alphanum_fraction": 0.5849897265434265, "avg_line_length": 31.180147171020508, "blob_id": "f027b3ab2f3c6dd6e55e2749b7ecb993d95e422d", "content_id": "764fb8d5d8c1da6ba4b56b23f3fca01bbee2f141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8754, "license_type": "no_license", "max_line_length": 135, "num_lines": 272, "path": "/historical/multilabelclassifier.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport pandas as pd\nimport glob\nimport scipy.misc\nimport matplotlib\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport string\nimport os\nimport re\nimport random\n\nSIZE = (60, 70)\n\n\nif os.name == 'nt':\n env = 0\nelse:\n env = 1\n\nif env == 1:\n path=\"/home/ubuntu/Cursive-OCR-for-Geneology/dataset\"\nelse:\n path=\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\dataset\\\\augmented\"\n\nlabel_dict = {\"label2idx\": {},\n \"idx2label\": []}\n\n\nlabel_dict = {\"word2idx\": {}, \"idx2word\": []}\ndef prepare_data(imgs_dir):\n os.chdir(imgs_dir)\n y = []\n idx = 0\n skips = [\".jpg\", \" \", \"@\", \"+\", \"]\", \"[\", \")\", \"(\", \"_\",\n \"$\", \"z\", \"j\", \"b\", \"k\", \"v\", \"w\", # less than 50\n # \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\",\n # \"P\", \"Q\", \"R\",\"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\",\n \".\", \",\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n imgs = []\n clean_titles = []\n label_cardinality = {}\n for file in glob.glob(\"*.jpg\", recursive=True):\n img = scipy.misc.imread(file).astype(np.float32)\n\n if img.shape[0] == SIZE[0] and img.shape[1] == SIZE[1] and img.shape[2] == 3:\n clean_title = file #str(file.split('\\\\')[1])\n clean_title = re.sub(r\"\\([\\d+]*\\)\", \"\", clean_title)\n for lb in skips:\n clean_title = clean_title.replace(lb, \"\")\n\n if len(clean_title) > 0:\n imgs.append(img)\n # print(clean_title)\n clean_titles.append(clean_title)\n else:\n print(\"img size mismatch: {}\".format(img.shape))\n\n\n # Add all file labels to dict, with indexes\n for title in clean_titles:\n for l in list(title): #.split('|'):\n if l in label_cardinality:\n label_cardinality[l] += 1\n else:\n label_cardinality[l] = 1\n if l in label_dict[\"idx2word\"]:\n pass\n else:\n label_dict[\"idx2word\"].append(l)\n label_dict[\"word2idx\"][l] = idx\n idx += 1\n\n\n n_classes = len(label_dict[\"idx2word\"])\n # add multi-hot labels to overall labels?\n for title in clean_titles:\n letters = list(title)\n l = np.sum([np.eye(n_classes, dtype=\"uint8\")[label_dict[\"word2idx\"][s]]\n for s in letters], axis=0)\n # print(\"letters: {}\\nlabel: {}\".format(letters, l))\n y.append(l)\n\n # print(label_cardinality)\n for l in sorted(label_cardinality):\n print(l, \": \", label_cardinality[l])\n\n return imgs, y, n_classes\n\n# dataset = imgs, label_dict = word2indx:, indx2word:, ids = img titles, y = list of sumed classes and label indexes\n# dataset, y, label_dict, ids = prepare_data(data, img_dict, size=SIZE)\ndataset, y, n_classes = prepare_data(path)\n\n\nprint(\"shuffling dataset\")\nprint(\"Number of classes: {}\".format(n_classes))\n# to unorder samples\nrandom_seed = 4\nrandom.Random(random_seed).shuffle(y)\nrandom.Random(random_seed).shuffle(dataset)\n\n\n# def show_images(ids, cols = 2, titles = None):\n# fig = plt.figure()\n# n_images = len(ids)\n# n = 0\n# for id in ids:\n# plt.imshow(dataset[id])\n# a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n# # print(y[id])\n# # print(label_dict[\"idx2word\"])\n# # print(np.where(y[id]>0))\n# actuals =\"\"\n# for index in np.where(y[id]==1)[0]:\n# # print(index)\n# actuals += \" {}\".format(label_dict[\"idx2word\"][index])\n# a.set_title(actuals)\n# n+=1\n# # for n, (image, title) in enumerate(zip(ids, titles)):\n# # if image.ndim == 2:\n# # plt.gray()\n# # plt.imshow(image)\n# fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n# mng = plt.get_current_fig_manager()\n# mng.window.state('zoomed')\n# plt.show()\n\n\ndef return_labels(id):\n labels = \"\"\n for index in np.where(y[id]>0)[0]:\n # print(index)\n labels += \" {}\".format(label_dict[\"idx2word\"][index])\n # print(\"Returning labels: \", labels)\n return labels\n\ndef show_img(id):\n plt.suptitle(return_labels(id))\n plt.imshow(dataset[id])\n plt.ylabel(return_labels(id))\n plt.show()\n\n# show_img(0)\n# show_img(1)\n# show_img(2)\n# show_img(3)\n\n# ------------------------------------\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n\nkernel_size = 3 #3\npool_size = 2 #2\n\nmodel = Sequential()\nmodel.add(Conv2D(n_classes*2, kernel_size=(kernel_size, kernel_size),\n activation='relu',\n input_shape=(SIZE[0], SIZE[1], 3)))\n\n\n# ------------------------------\n# model.add(Conv2D(n_classes*2, (kernel_size, kernel_size), activation='relu'))\n# # model.add(MaxPooling2D(pool_size=(2, 2)))\n# # model.add(Dropout(0.25))\n#\n# model.add(Flatten()) # sets to single dimension\n# model.add(Dense(n_classes*4, activation='relu')) # fully connected layer\n# # model.add(Dropout(0.5)) # random removal to prevent overfit\n# model.add(Dense(n_classes, activation='sigmoid')) # modified from softmax\n\n# ---------------\n# check out neural attention models (it is moving accross the word (aka attention)) LSTM\n# could still include some thresholding (use multiple routes and compare results--one model doesn't need to do it all!)\n# visualize between layers\n\n\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\n# model.add(Dense(29, activation='sigmoid'))\n\n\n\n\n\n\n\n\n# model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n# model.add(Conv2D(int(n_classes/2), (kernel_size, kernel_size), activation='relu'))\n# model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n# # model.add(Dropout(0.25))\n# model.add(Conv2D(n_classes, kernel_size=(kernel_size, kernel_size), activation='relu'))\n# model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n# model.add(Conv2D(n_classes, (kernel_size, kernel_size), activation='relu'))\n# model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n# model.add(Conv2D(n_classes, (kernel_size, kernel_size), activation='relu'))\n# model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n# model.add(Flatten())\n\n# model.add(Conv2D(n_classes, (kernel_size, kernel_size), activation='relu'))\n# model.add(Conv2D(n_classes, (kernel_size, kernel_size), activation='relu'))\n# model.add(Conv2D(n_classes, (kernel_size, kernel_size), activation='relu'))\n# model.add(MaxPooling2D(pool_size=(pool_size, pool_size))) # could add average pooling, best to have between each convolutional layer.\n# capsule networks overcome the shortcomings of pooling\n# model.add(Dropout(0.25))\n# model.add(Dense(n_classes*3, activation='relu')) # *3 because dimensions were 3, now flattened\n# model.add(Dropout(0.3))\n\n\n# -----------------------------------\nmodel.add(Dense(n_classes, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy',\n optimizer=keras.optimizers.Adam(), #keras.optimizers.Adadelta()(),\n metrics=['accuracy', 'mae'])\n\n# current 547 train and vaildate on 500, test examples for predictions on 47\n# total imgs?\nn_test = 8\nn = len(dataset) -(1+n_test)\n\nprint(\"Beginning fit...\")\nmodel.fit(np.array(dataset[: n]), np.array(y[: n]), batch_size=64, epochs=13,\n verbose=1, validation_split=0.3)\n\n\n\n\n\n\nX_test = dataset[n:n + n_test]\ny_test = y[n:n + n_test]\n\nprint(\"model.fit DONE. Moving on to pred...\")\npred = model.predict(np.array(X_test))\nprint(\"predictions finished\")\n\nfor i in range (0, len(X_test)):\n actuals = \"\"\n # for label in y[n+i]:\n for index in np.where(y[n+i]==1)[0]:\n # print(index)\n actuals += \" {}\".format(label_dict[\"idx2word\"][index])\n print(\"---------------------------------------\\nActual: {}\".format(actuals))\n\n # label_dict[\"idx2word\"][s],y[n+i][s]) for s in y[n+i])\n # print(\"Prediction: {}\".format(pred[i]))\n print(\"Predicted letters: \")\n for i2 in range (0, len(label_dict[\"idx2word\"])):\n if pred[i][i2] > 0.4:\n print(\"\\\"{}\\\":{}\".format(label_dict[\"idx2word\"][i2], pred[i][i2]))\n print(\"--------------------------------------\")\n" }, { "alpha_fraction": 0.635026752948761, "alphanum_fraction": 0.6697860956192017, "avg_line_length": 34.619049072265625, "blob_id": "b9f967bb1ab62d5efca152b95491fb1556323666", "content_id": "a824e32765728ac8608aa533b567805a31eff297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "no_license", "max_line_length": 89, "num_lines": 21, "path": "/historical/data_expansion.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "import cv2\nimport os, glob\n\n\npath=\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\dataset\\\\X - enlarged, rotated\"\nos.chdir(path)\nfor file in glob.glob(\"*.jpg\"):\n image = cv2.imread(file)\n larger = cv2.resize(image, (80, 69))\n rows,cols = larger.shape[:2]\n M = cv2.getRotationMatrix2D((cols/2,rows/2),7,1)\n larger_rotated = cv2.warpAffine(larger,M,(cols,rows))\n larger_rotated_cropped = larger_rotated[4:64, 5:75]\n # print(larger.shape)\n # print(larger_rotated.shape)\n # print(larger_rotated_cropped.shape)\n # = warpAffine(larger, dst, r, Size(src.cols, src.rows))\n # cv2.imshow('output', larger_rotated_cropped)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n cv2.imwrite(file, larger_rotated_cropped)\n" }, { "alpha_fraction": 0.6253508925437927, "alphanum_fraction": 0.6445560455322266, "avg_line_length": 44.12666702270508, "blob_id": "8114cb6077d492cfed2a3d050e56d19f90379f1b", "content_id": "8d1c109f32ea2c7799ad4d0dfea1ba6aa71c8da5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6769, "license_type": "no_license", "max_line_length": 155, "num_lines": 150, "path": "/image_processing_tools/Character_Segmentation.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\"\"\"RATHER THAN...\nsegmenting out so you can convolve, just jump to the convolutional model now!\nYou can add the rotated and resized individual letters to your data set for variation ( do this step after building the dataset ).\nMay need some high-level segmentation, like record type for increased accuracy, but just get as accurate as you can at a generic level first.\nSave the rawest of the raw crops (with ligatures reaching edge) for model training.\n\"\"\"\n\n\n\n# Could use text area detection for average letter size\n# TODO experiment with accuracy from searching entire image for each letter\n# TODO for additional accuracy, ask user to select one of each common letter?\n\n\n# templates work, but must be the same size as the letters in the image\n# also, many templates would be necessary for each letter. A comparison could be made for highest thresh in that section of image\n # TODO COMBINE TECHNIQUES FOR HIGHEST ACCURACY?\n\n# TODO could use the convolving template to identify characters, and then crop those to be passed through convolutional network in original form!\n# could use ten templates of each character to segment and devide\n\n# a few hard fast rules to help:\n # any space with two separate line in the same vertical, is a letter, not a ligature.\n # all outgoing (besides B's) connectors are from the top or bottom of the letter\n # round or straight, every character has walls\n # eroding horizontal lines will leave character walls\n\n\n\n# Make the best splits you can, but give multiple options. Then train your algorithm to see which words make the most sense based on splits.\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport glob\nimport numpy as np\nimport imutils\n# os.chdir(\"C:\\\\Users\\\\grant\\\\IS\\\\Past\\\\IS693R\\\\image_project\\\\images\\\\misc\\\\rotated_crops\\\\clean_selections\")\nos.chdir(\"C:\\\\Users\\\\grant\\\\IS\\\\IS552\\\\JSPapersBookofTheLawoftheLord\\\\RotationsApplied\")\ndef cv_imshow(img):\n cv2.imshow('output', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef show_images(images, cols = 1, titles = None):\n \"\"\"Display a list of images in a single figure with matplotlib.\n\n Parameters\n ---------\n images: List of np.arrays compatible with plt.imshow.\n\n cols (Default = 1): Number of columns in figure (number of rows is\n set to np.ceil(n_images/float(cols))).\n\n titles: List of titles corresponding to each image. Must have\n the same length as titles.\n \"\"\"\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed')\n plt.show()\n\n\ndef get_location_list_position(locations_list, point):\n position = len(locations_list)\n for indx, val in enumerate(locations_list):\n position = indx\n # If new coordinates are more than 5 pixels diffent in any position, than add position\n if abs(point[0] - val[0]) > 4 or abs(point[1] - val[1]) > 4:\n position += 1\n else:\n break\n\n return position\n\nfor file in glob.glob(\"*.jpg\"):\n '''based on cropped image height, set 3 letter sizes, around 1/3 the cropped image Height\n convolve over the image comparing templates (one of each size) with letters\n Establish votes for by each letter\n\n original img remains 3 channels the entire time.\n\n\n '''\n print(\"reading img: \", file)\n img = cv2.imread(file)\n\n img_height, img_width = img.shape[:2]\n # img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n img_count = 0\n sizes = [.98, 1, 1.02]\n rotations = [0,358, 2]\n template_imgs = []\n a_votes = [0]\n a_locations = []\n # for template_img in glob.glob(\"C:\\\\Users\\\\grant\\\\IS\\\\Past\\\\IS693R\\\\image_project\\\\images\\\\misc\\\\rotated_crops\\\\hhlettersrotated\\\\nadaNADA*.jpg\"):\n for template_img in glob.glob(\"C:\\\\Users\\\\grant\\\\IS\\\\IS552\\\\JSPapersBookofTheLawoftheLord\\\\templates\\\\*.jpg\"):\n img_count += 1\n print(img_count)\n template = cv2.imread(template_img,0)\n # template = cv2.adaptiveThreshold(template,255,cv2.cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,21,2)\n # ret,template = cv2.threshold(template,127,255,cv2.THRESH_BINARY)\n # template = cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)\n # img_low_thresh3 = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,window_block_neighbors,2)\n template_height, template_width = template.shape[:2]\n # print(\"Template shape: \", template.shape)\n # print(\"Img gray shape: \", img_gray.shape)\n for angle in rotations:\n template_rotated = imutils.rotate(template, angle=angle)\n for resize in sizes:\n # print(\"new width: \",int((resize*img_height)/(template_height/template_width)))\n # print(\"New height: \", int(resize*img_height))\n template_rotated = cv2.resize(template_rotated, (int(resize*template_width), int(resize*template_height)), interpolation = cv2.INTER_CUBIC)\n resized_template_w, resized_template_h = template_rotated.shape[:2]\n res = cv2.matchTemplate(img,template_rotated,cv2.TM_CCOEFF_NORMED)\n # print(img.shape)\n threshold = 0.8\n loc = np.where( res >= threshold)\n for pt in zip(*loc[::-1]):\n list_location_postition = get_location_list_position(a_locations, pt)\n # print(pt)\n # print(l ist_location_postition)\n if len(a_votes) <= list_location_postition:\n a_votes.append(1)\n else:\n a_votes[list_location_postition] +=1\n if len(a_locations) <= list_location_postition:\n a_locations.append(pt)\n cv2.rectangle(img, pt, (pt[0] + resized_template_w, pt[1] + resized_template_h), (0,0,255-10*img_count), 2)\n # print(img.shape)\n # if img_count %10 == 1:\n # template_imgs.append(template)\n # cv2.imshow('img', template_rotated)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n cv2.putText(img, str(a_votes), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2, cv2.LINE_AA)\n template_imgs.append(img)\n show_images(template_imgs, 1)\n" }, { "alpha_fraction": 0.6105306148529053, "alphanum_fraction": 0.6325454115867615, "avg_line_length": 47.08292770385742, "blob_id": "87fe592f77775fd000894a8de3b270f23325a763", "content_id": "ae18b8a6a3cc1fee1e6de6567d15331c9ec16557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9857, "license_type": "no_license", "max_line_length": 191, "num_lines": 205, "path": "/image_processing_tools/Area_Recognition.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\"\"\"\nProcess:\n 1. process entire image (adaptiveThreshold, contour detection, bounding rect, etc.)\n 2. pull boundaries to be convolved\n 3. output predictions\n\"\"\"\n\nimport cv2\nimport pytesseract\nimport os\nimport glob\nimport cv2\nimport numpy as np\nimport time\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport math\nimport imutils\n\n# os.chdir(\"C:\\\\Users\\\\grant\\\\IS\\\\Past\\\\IS693R\\\\image_project\\\\images\\\\misc\\\\original_bigs\")\nos.chdir(\"C:\\\\Users\\\\grant\\\\IS\\\\IS552\\\\test\")\n\ndef longest_line_angle(img_bw):\n \"\"\"\n Expects image to have white background with black writing\n Function rotates image to various angles,\n iterates every vertical line of pixels in the images,\n line lengths of dark pixels are recorded and the angle returning the longest vertical line is returned\n\n Warning: slow performance on large images\n \"\"\"\n\n angles = [0, -5,-10, -15, -20, -25, -30, -35, -40, 10, 15, 20]\n vert_line_lengths = []\n for indx, angle in enumerate(angles):\n vert_line_lengths.append([angle, 0])\n img_warped = imutils.rotate_bound(img_bw, angle)\n h, w = img_warped.shape[:2]\n for x in range(w):\n line_length = 0\n for y in range(h):\n try:\n if img_warped[y][x] < 10 and (img_warped[y-1][x] <10 or img_warped[y-1][x-1] <10 or img_warped[y][x-1] <10):\n line_length += 1\n else:\n if line_length > vert_line_lengths[indx][1]:\n vert_line_lengths[indx][1] = line_length\n line_length = 0\n except:\n None\n\n best_angle_weight = 0\n best_angle = 0\n for indx, val in enumerate(vert_line_lengths):\n if vert_line_lengths[indx][1] > best_angle_weight+1:\n best_angle = vert_line_lengths[indx][0]\n best_angle_weight = vert_line_lengths[indx][1]\n # print(\"Rotating image to: {}\".format(best_angle))\n return best_angle\n\ndef show_images(images, cols = 2, titles = None):\n \"\"\"Display a list of images in a single figure with matplotlib.\n\n Parameters\n ---------\n images: List of np.arrays compatible with plt.imshow.\n\n cols (Default = 1): Number of columns in figure (number of rows is\n set to np.ceil(n_images/float(cols))).\n\n titles: List of titles corresponding to each image. Must have\n the same length as titles.\n \"\"\"\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed')\n plt.show()\n\ndef add_contours(img_source_bw, img_color, square_pixels):\n image,contours,hierarchy = cv2.findContours(img_source_bw,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n img_cont_unfiltered = cv2.cvtColor(img_color, cv2.COLOR_BGR2RGB)\n\n for cnt in contours:\n x,y,w,h = cv2.boundingRect(cnt)\n cv2.rectangle(img_cont_unfiltered,(x,y),(x+w,y+h),(0,255,0),5)\n # First, check for shape of word. Ignoring single letters for now.\n if w > h:\n if (22 <= w <= 500) and (22 <= h <= 200):\n # this ensures that height is less than max_contour, and width, while width is greater than min_contour\n # if (h > min_contour and w < max_contour):\n # \"\"\"iterate through contours and check for variety in the images\"\"\"\n crop_img = img_source_bw[y:y+h, x:x+w]\n # horizontal = columns, vertical = rows\n (horizontal, vertical) = crop_img.shape\n h_lower = int(.2 * horizontal)\n h_upper = int(.8 * horizontal)\n v_lower = int(.2 * vertical)\n v_upper = int(.8 * vertical)\n\n # sum total changes accross the bw array. If none of middle sixty percentof arrays contain more than one color change, discard entire boundary\n contained_variety = 0\n for horiz in crop_img[h_lower:h_upper]:\n row_value_changes = 0\n value_in_row = horiz[0]\n for vert_value in horiz[v_lower:v_upper]:\n if vert_value != value_in_row:\n row_value_changes += 1\n value_in_row = vert_value\n if row_value_changes > 2: # a change and back (we want more than one line)\n contained_variety += 1\n if contained_variety > 0:\n cv2.rectangle(img_color,(x,y),(x+w,y+h),(0,255,0),5)\n # file_name = \"C:\\\\Users\\\\grant\\\\IS\\\\Past\\\\IS693R\\\\image_project\\\\images\\\\misc\\\\raw_area_crops\\\\\"+str(v_upper-v_lower+h/w)+\"{}.jpg\".format(contained_variety*(w/100))\n # print(x, y, h, w)\n # raw_output = img_color[y5:y+h, x:x+w] # If really wide, we need larger height for dataset\n # raw_output = img_color[y-h:y+h+w, x-15:x+w+15]\n # try:\n # raw_output_rotated_expanded = imutils.rotate(raw_output, -longest_line_angle(crop_img))\n # except:\n # raw_output_rotated_expanded = raw_output\n # cv2.imwrite(file_name, raw_output_rotated_expanded)\n # print(\"writing image {}\".format(file_name))\n # cv2.imshow('crop', raw_output_rotated)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n return img_color, img_cont_unfiltered\n\nfor file in glob.glob(\"*.jpg\"):\n print(\"reading img: \", file)\n img = cv2.imread(file)\n height, width, channels = img.shape\n square_pixels = height * width\n\n # reduce oversize images\n while square_pixels > 7000000:\n print(\"reducing oversize image.\")\n img = cv2.resize(img, (0,0), fx=0.9, fy=0.9)\n # print(\"reducing image size...\")\n height, width, channels = img.shape\n square_pixels = height * width\n\n print(\"creating duplicate images for unrefined output.\")\n # unrefined_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # unrefined_img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n unrefined_img3 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n print(\"creating grayscale image.\")\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n print(height, width, (square_pixels))\n\n print(\"calculating block size for gaussian window:\")\n window_block_neighbors = int(.75*math.sqrt(square_pixels))\n\n while window_block_neighbors %2 != 1:\n window_block_neighbors += 1\n print(window_block_neighbors)\n\n print(\"Adding adaptive threshold.\")\n # img_low_thresh = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,window_block_neighbors,20)\n # img_low_thresh2 = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,window_block_neighbors,10)\n img_low_thresh3 = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,window_block_neighbors,2)\n\n print(\"Finding contours.\")\n # img_low_thresh,low_thresh_contours,low_thresh_hierarchy = cv2.findContours(img_low_thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # img_low_thresh2,low_thresh_contours2,low_thresh_hierarchy2 = cv2.findContours(img_low_thresh2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n img_low_thresh3,low_thresh_contours3,low_thresh_hierarchy3 = cv2.findContours(img_low_thresh3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n print(\"Filtering and adding contours.\")\n # img_low, img_cont_unfiltered = add_contours(img_low_thresh, unrefined_img)\n # img_low2, img_cont_unfiltered2 = add_contours(img_low_thresh2, unrefined_img2)\n img_low3, img_cont_unfiltered3 = add_contours(img_low_thresh3, unrefined_img3, square_pixels)\n\n print(\"Displaying images.\")\n # show_images([img_low_thresh, img_low_thresh2, img_low_thresh3, img_cont_unfiltered, img_low])\n # show_images([img_low_thresh, img_low_thresh2, img_low_thresh3, img_low, img_low2, img_low3])\n # show_images([img_low_thresh, img_low_thresh2, img_low, img_low2])\n show_images([img, img_low_thresh3, img_cont_unfiltered3, img_low3])\n\n\n# TODO split words to characters\n# TODO homogenize chracter sizes\n\n# use machine learning to decide which filters are applicable for contours?\n# ignore every contour fully contained within another contour\n# everything interesting is generally captured. Try combining overlapping contours when similar height\n# try decreasing threshold for bw vs bolding black (adding one to each array)\n# rotating rectangles will help ( for rotated records ) - OpenCV has builtin\n# ALMOST EVERY SPOT WE WANT TO FIND HAS A HORIZONTAL LINE (sometimes dotted) GOING THROUGH IT with x amount of writing pixels above it, and sometimes some below\n# check into repeating patterns to identify dotted line, and slightly shifted solids (fuzzy) for straight line. Bold may help for this task too.\n# llook into manual contour creation by checking for curved, connected, lines- can do for just black and white. Probably should decrease threshold rather than thickenning pixels, but try both\n# see how open cv does contours\n # could use ratio for contour filter\n# next, need to refine boxes to include whole words, where possible.\n# maybe combine horizontally similar boxes\n# maybe move forward with current results to better understand next steps before spending too much time refining\n" }, { "alpha_fraction": 0.564862847328186, "alphanum_fraction": 0.5927847623825073, "avg_line_length": 35.13393020629883, "blob_id": "0230809238cd294ebe82c1ea7c791cfac4d51002", "content_id": "b7bb7a7ed769b4d2ef997d64d63441407a0891d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4047, "license_type": "no_license", "max_line_length": 210, "num_lines": 112, "path": "/custom_models.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv1D, Conv2D, MaxPooling2D, MaxPooling1D, AveragePooling2D\n\n\ndef bw_cnn(x_train, y_train, x_val, y_val,\n input_shape, n_classes,\n epochs=12, batch_size=64):\n print(\"input shape: {}\".format(input_shape))\n print(\"Number of classes: {}\".format(n_classes))\n\n model = Sequential()\n\n model.add(Conv1D(32, kernel_size=(3),\n activation='relu',\n input_shape=input_shape))\n model.add(Conv1D(64, (3), activation='relu'))\n model.add(MaxPooling1D(pool_size=(2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.25))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.25))\n model.add(Dense(n_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n # validation_split=0.4,\n validation_data=(x_val, y_val)\n )\n return model\n\ndef cursive_cnn(x_train, y_train, #x_val, y_val,\n input_shape, n_classes,\n epochs=12, batch_size=64):\n\n print(\"input shape: {}\".format(input_shape))\n print(\"Number of classes: {}\".format(n_classes))\n\n model = Sequential()\n\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(n_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n # validation_split=0.4,\n validation_data=(x_val, y_val)\n )\n return model\n\n\ndef seven_layer_cnn(activation_1, activation_2, loss, x_train, y_train, \\\n input_shape, n_classes, base_layers, epochs=3):\n\n model = Sequential()\n model.add(Conv2D(int(n_classes/2),\n kernel_size=(3, 3),\n activation=activation_1,\n input_shape=input_shape))\n #tanh offering more specific vals, rather than 1 0\n model.add(Conv2D(n_classes, (3, 3), activation=activation_1)) # relu\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n #bonus\n model.add(Conv2D(n_classes, (3, 3), activation=activation_1)) # relu\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(n_classes*2, activation=activation_1))\n model.add(Dropout(0.5))\n model.add(Dense(n_classes, activation=activation_2))\n\n # 'categorical_crossentropy' <- supposedly for multi-class, not multi label: https://stats.stackexchange.com/questions/260505/machine-learning-should-i-use-a-categorical-cross-entropy-or-binary-cross-entro\n model.compile(loss=loss,\n #'binary_crossentropy' : supposedely ideal for multi label, current .5 test accuracy, but no letters predicted\n # 'mean_squared_error' : all same, 1s\n optimizer=keras.optimizers.Adam(), #.Adam(), Adadelta()\n metrics=['categorical_accuracy', 'accuracy', 'mae'])\n\n model.fit(x_train, y_train,\n batch_size=learning_rate, #128\n epochs=epochs,\n verbose=1,\n # validation_data=(x_test, y_test)\n validation_split=0.4\n )\n return model\n" }, { "alpha_fraction": 0.8054136633872986, "alphanum_fraction": 0.8105209469795227, "avg_line_length": 121.375, "blob_id": "4118e8fabec57bf81b488c0d3866897ffd05f674", "content_id": "bf441f332435e33c5b9378b0c297fd3104103ddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1958, "license_type": "no_license", "max_line_length": 479, "num_lines": 16, "path": "/README.md", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "# Cursive-OCR-for-Geneology\nResearch findings and trained models associated with cursive handwriting recognition of antiquated records. Created to store and propagate findings from research at BYU during capstone semester, 2018. Emphasis given to genealogical records which need indexing.\n\n\nIf you are interested in contributing to this project, please reach out with a personalized invitation to https://www.linkedin.com/in/d-grant-dexter/.\n\n# Overview\nThe attached dataset includes (1) full images, (2) sliced windows (from window_slider.py function), and (3) individual characters. You will also find custom modules for reading, vizualizing, and modeling with these image datasets. \n\n# Tips\nFor binary classification, use the sigmoid activiation on the final output layer, and a simple loss function like mean_squared_error (unless using one-hot encoding [0, 1] then softmax will work with categorical_crossentropy loss function). For multi-class models, use a softmax activation on the final output layer with a categorical_crossentropy loss. For multi-label classification, use a sigmoid function (because of independent probabilities) with a binary_crossentropy loss.\n\nExpect to need, at a minimum, 5K images per class for mult-class solutions. Raise that number to the power of as many labels you may have for multi-label classification. If the model is converging on a solution which assigns all images to the most common labels, consider enlarging the dataset (optionally through augmentation).\n\n# Tools\nThe Keras preprocessor can be used to augment datasets. Take advantage of vizualizer.py when designing a network to vizualize the filters and pooling layers of your CNN. load_images_dataset.py has functions for reading in images from a folder with image names as labels, or reading a csv with image paths and labels on each row. Also use the divide_dataset function for shuffling your data and spliting a training, validation, and test dataset.\n" }, { "alpha_fraction": 0.5734902024269104, "alphanum_fraction": 0.597865641117096, "avg_line_length": 36.48181915283203, "blob_id": "b693076ab56d912fd884468cdf1dcfebfdd3e07b", "content_id": "2096e3cf43f183637342f9622602fa789bd76fec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16492, "license_type": "no_license", "max_line_length": 140, "num_lines": 440, "path": "/historical/CNN.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\"\"\"Convolutional Neural Network Estimator for Cursive, built with tf.layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport glob, os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.contrib.learn.python.learn.datasets import base\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import random_seed\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nif os.name == 'nt':\n env = 0\nelse:\n env = 1\n\nCLASS_N = 9\nSZ_W = 40\nSZ_H = 60\n\ndef cnn_model_fn(features, labels, mode):\n \"\"\"Model function for CNN.\"\"\"\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n # MNIST images are 28x28 pixels, and have one color channel\n input_layer = tf.reshape(features[\"x\"], [-1, SZ_W, SZ_H, 1])\n print(input_layer)\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 26, 80, 1]\n # Output Tensor Shape: [batch_size, 26, 80, 32]\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n print(conv1)\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 26, 80, 32]\n # Output Tensor Shape: [batch_size, 13, 40, 32]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n print(pool1)\n # Convolutional Layer #2\n # Computes 64 features using a 5x5 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 13, 40, 32]\n # Output Tensor Shape: [batch_size, 13, 40, 64]\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n print(conv2)\n # Pooling Layer #2\n # Second max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 13, 40, 64]\n # Output Tensor Shape: [batch_size, 7, 20, 64]\n # was pool size 2, 2\n pool2 = conv2 #tf.layers.max_pooling2d(inputs=conv2, pool_size=[1, 2], strides=2)\n print(pool2)\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 7, 20, 64?]\n # Output Tensor Shape: [batch_size, 7 * 20 * 64]\n flattened_size = (int(pool2.shape[1]) * int(pool2.shape[2]) * int(pool2.shape[3]))\n pool2_flat = tf.reshape(pool2, [-1, flattened_size])\n print(pool2_flat)\n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 7 * 20 * 64]\n # Output Tensor Shape: [batch_size, 8960]\n dense = tf.layers.dense(inputs=pool2_flat, units=flattened_size, activation=tf.nn.relu)\n print(dense)\n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n print(dropout)\n # Logits layer\n # Input Tensor Shape: [batch_size, 16640]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=CLASS_N)\n print(\"logits: \", logits)\n\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n print(predictions)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n print(1.0)\n # Calculate Loss (for both TRAIN and EVAL modes)\n onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=CLASS_N)\n print(\"1.0.1\")\n print(onehot_labels)\n loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)\n print(1.1)\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n print(1.2)\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\ndef read_data_sets(train_dir,\n one_hot=False,\n dtype=dtypes.float32,\n reshape=True,\n validation_size=5000,\n seed=None):\n\n\n # Pass in the images here.\n # TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'\n # TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'\n # TEST_IMAGES = 't10k-images-idx3-ubyte.gz'\n # TEST_LABELS = 't10k-labels-idx1-ubyte.gz'\n #\n # local_file = base.maybe_download(TRAIN_IMAGES, train_dir,\n # SOURCE_URL + TRAIN_IMAGES)\n # with open(local_file, 'rb') as f:\n # train_images = extract_images(f)\n #\n # local_file = base.maybe_download(TRAIN_LABELS, train_dir,\n # SOURCE_URL + TRAIN_LABELS)\n # with open(local_file, 'rb') as f:\n # train_labels = extract_labels(f, one_hot=one_hot)\n # print('mnist train labels: ', train_labels.shape)\n # print('example 1: ', train_labels[1])\n #\n # local_file = base.maybe_download(TEST_IMAGES, train_dir,\n # SOURCE_URL + TEST_IMAGES)\n # with open(local_file, 'rb') as f:\n # test_images = extract_images(f)\n #\n # local_file = base.maybe_download(TEST_LABELS, train_dir,\n # SOURCE_URL + TEST_LABELS)\n # with open(local_file, 'rb') as f:\n # test_labels = extract_labels(f, one_hot=one_hot)\n #\n # # if not 0 <= validation_size <= len(train_images):\n # # raise ValueError(\n # # 'Validation size should be between 0 and {}. Received: {}.'\n # # .format(len(train_images), validation_size))\n #\n # validation_images = train_images[:validation_size]\n # validation_labels = train_labels[:validation_size]\n # train_images = train_images[validation_size:]\n # train_labels = train_labels[validation_size:]\n\n print(\"loading windows...\")\n # This is where the auto_transcribe functions will input sliced windows\n\n if env == 0:\n os.chdir(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\dataset\")\n else:\n os.chdir(\"/home/ubuntu/Cursive-OCR-for-Geneology/dataset\")\n windows = []\n labels = []\n label_names = [\"a\", \"e\", \"i\", \"o\", \"u\", \"h\", \"n\", \"t\", \"other\"]\n total_imgs = 0\n for file in glob.glob(\"*.jpg\"):\n # if label_names in str(file):\n img = cv2.imread(file)\n\n # Convert img to grayscale to prep for black and white\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #grey increased accuracy by .3%\n # Convert to black and white based on automatice OTSU threshold\n # (thresh, img) = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) # b&w increased it by 20%\n\n windows.append(img)\n\n str_label = str(file)\n\n # instanciate int_label\n # int_label = [0, 0, 0, 0, 0, 0, 0]\n # if str_label == 'll':\n # int_label = [1, 0, 0, 0, 0, 0, 0]\n # elif str_label == 'ly':\n # int_label = [0, 1, 0, 0, 0, 0, 0]\n # elif str_label == 'lh':\n # int_label = [0, 0, 1, 0, 0, 0, 0]\n # elif str_label == 'lo':\n # int_label = [0, 0, 0, 1, 0, 0, 0]\n # elif str_label == 'le':\n # int_label = [0, 0, 0, 0, 1, 0, 0]\n # elif str_label == 'la':\n # int_label = [0, 0, 0, 0, 0, 1, 0]\n # elif str_label == 'ln':\n # int_label = [0, 0, 0, 0, 0, 0, 1]\n # labels.append(int_label)\n int_label = 0\n if \"a\" in str_label:\n int_label = 0\n elif \"e\" in str_label:\n int_label = 1\n elif \"i\" in str_label:\n int_label = 2\n elif \"o\" in str_label:\n int_label = 3\n elif \"u\" in str_label:\n int_label = 4\n elif \"h\" in str_label:\n int_label = 5\n elif \"n\" in str_label:\n int_label = 6\n elif \"t\" in str_label:\n int_label = 7\n elif \"noise\" in str_label:\n int_label = 8\n labels.append(int_label)\n total_imgs += 1\n\n\n windows = np.array(windows)\n labels = np.array(labels)\n print('my labels: ', labels.shape)\n windows = windows.reshape(-1, SZ_H, SZ_W) # was originally reversed. Correct is H then W\n\n # shuffle digits\n rand = np.random.RandomState(5)\n shuffle = rand.permutation(181) # len of windows turns out larger than 84?? 252??\n # print(rand, '\\n\\n\\n',len(windows),'\\n\\n\\n', shuffle)\n windows, labels = windows[shuffle], labels[shuffle]\n # print(windows, labels)\n\n # total sie = 181 with 7 classes currently\n # total_imgs = 547\n val_size = int(.2 * total_imgs)\n\n validation_images = windows[:val_size]\n validation_labels = labels[:val_size]\n train_images = windows[val_size:]\n train_labels = labels[val_size:]\n\n # if I validate with training data, I am still only getting 26% accuracy\n # try with black and white images next to see what changes.\n\n # come back and address the 20 test/training later? is okay that they are validation?\n test_images = train_images #validation_images # windows[:90]\n test_labels = train_labels #validation_labels # labels[:90]\n\n options = dict(dtype=dtype, reshape=reshape, seed=seed)\n\n train = DataSet(train_images, train_labels, **options)\n validation = DataSet(validation_images, validation_labels, **options)\n test = DataSet(test_images, test_labels, **options)\n\n return base.Datasets(train=train, validation=validation, test=test), total_imgs\n\n\ndef images_to_tensors(train_dir=\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for_Geneology\"):\n if env == 1:\n train_dir=\"/home/ubuntu/Cursive-OCR-for-Geneology/\"\n return read_data_sets(train_dir)\n\nclass DataSet(object):\n \"\"\"Construct a DataSet.\n one_hot arg is used only if fake_data is true. `dtype` can be either\n `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into\n `[0, 1]`. Seed arg provides for convenient deterministic testing.\n \"\"\"\n def __init__(self,\n images,\n labels,\n fake_data=False,\n one_hot=False,\n dtype=dtypes.float32,\n reshape=True,\n seed=None):\n # If op level seed is not set, use whatever graph level seed is returned\n seed1, seed2 = random_seed.get_seed(seed)\n np.random.seed(seed1 if seed is None else seed2)\n dtype = dtypes.as_dtype(dtype).base_dtype\n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError('Invalid image dtype %r, expected uint8 or float32' %\n dtype)\n if fake_data:\n self._num_examples = 10000\n self.one_hot = one_hot\n else:\n assert images.shape[0] == labels.shape[0], (\n 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))\n self._num_examples = images.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n\n if reshape:\n print('shape: ', images.shape)\n # assert images.shape[3] == 1\n images = images.reshape(images.shape[0],\n images.shape[1] * images.shape[2])\n print('reshaped: ', images.shape)\n\n if dtype == dtypes.float32:\n # Convert from [0, 255] -> [0.0, 1.0].\n images = images.astype(np.float32)\n images = np.multiply(images, 1.0 / 255.0)\n self._images = images\n self._labels = labels\n self._epochs_completed = 0\n self._index_in_epoch = 0\n\n @property\n def images(self):\n return self._images\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_examples(self):\n return self._num_examples\n\n @property\n def epochs_completed(self):\n return self._epochs_completed\n\n def next_batch(self, batch_size, fake_data=False, shuffle=True):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n if fake_data:\n fake_image = [1] * 784\n # if self.one_hot:\n # fake_label = [1] + [0] * 9\n # else:\n # fake_label = 0\n # return [fake_image for _ in xrange(batch_size)], [\n # fake_label for _ in xrange(batch_size)\n # ]\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self._num_examples)\n np.random.shuffle(perm0)\n self._images = self.images[perm0]\n self._labels = self.labels[perm0]\n # Go to the next epoch\n if start + batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n images_rest_part = self._images[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self.images[perm]\n self._labels = self.labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n images_new_part = self._images[start:end]\n labels_new_part = self._labels[start:end]\n return np.concatenate((images_rest_part, images_new_part), axis=0) , np.concatenate((labels_rest_part, labels_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]\n\n\ndef main(unused_argv):\n # Load training and eval data\n dataset, total_imgs = images_to_tensors()\n print(\"print dataset: \", dataset)\n train_data = dataset.train.images # Returns np.array\n train_labels = np.asarray(dataset.train.labels, dtype=np.int32)\n eval_data = dataset.test.images # Returns np.array\n eval_labels = np.asarray(dataset.test.labels, dtype=np.int32)\n # images = input_data.read_data_sets(\"C:\\\\Users\\\\grant\\\\IS\\\\IS693R\\\\image_project\\\\images\\\\CensusRecords\\\\1900s\\\\isolated\", one_hot=True)\n # print(\"print images: \", images)\n # train_data = \"\"\n # train_labels = \"\"\n # eval_data = \"\"\n # eval_labels = \"\"\n\n # Create the Estimator\n if env == 1:\n model_dir=\"/home/ubuntu/Cursive-OCR-for-Geneology/adjust_{}_rev1\".format(total_imgs)\n else:\n model_dir=\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\adjust_{}_rev1\".format(total_imgs)\n\n cursive_classifier = tf.estimator.Estimator(\n model_fn=cnn_model_fn, model_dir=model_dir)\n\n # Set up logging for predictions\n # Log the values in the \"Softmax\" tensor with label \"probabilities\"\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=50)\n\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": train_data},\n y=train_labels,\n batch_size=50,\n num_epochs=None,\n shuffle=True)\n cursive_classifier.train(\n input_fn=train_input_fn,\n steps=100, #20000\n hooks=[logging_hook])\n\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False)\n eval_results = cursive_classifier.evaluate(input_fn=eval_input_fn)\n print(eval_results)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" }, { "alpha_fraction": 0.7613636255264282, "alphanum_fraction": 0.8125, "avg_line_length": 34.400001525878906, "blob_id": "ee4d437724e331a0d2ffb14db6bcd7b12e300b72", "content_id": "70a9edb74ad3b6f1c60603a14e7c2f8aa4f8f7e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 176, "license_type": "no_license", "max_line_length": 94, "num_lines": 5, "path": "/historical/NOTES.txt", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "Currently, accuracy levels are dipping (quick pass only reached 26% accuracy with 9 classes). \nSwitching to multiple-hot encoding\n\n\ndataset contruction only reached bll_100-106" }, { "alpha_fraction": 0.5700308680534363, "alphanum_fraction": 0.579070508480072, "avg_line_length": 37.53688430786133, "blob_id": "95ad1c69968b5f2f726b50b77d9f6605118386c6", "content_id": "748290fcc3338af6bf1527064cd4ffa2804c4351", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9403, "license_type": "no_license", "max_line_length": 123, "num_lines": 244, "path": "/load_images_dataset.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport pandas as pd\nimport glob\nimport scipy.misc\nimport matplotlib\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport string\nimport os\nimport re\nimport random\n\n\nclass PreparedData:\n n_classes = 0\n size=(60,25,3)\n channels = 3\n skips=[\".jpg\", \" \"]\n\n label_dict = {\"word2idx\": {}, \"idx2word\": []}\n idx = 0\n\n dataset = {'x_train': [], 'x_val': [], 'x_test': [],\n 'y_train': [], 'y_val': [], 'y_test': []}\n\n def set_size(self, size):\n self.size = size\n self.channels = size[-1] if size[-1] < 4 else 0\n\n def read(self, path, channels, tvt='train'):\n print(\"Processing '{}' dataset\".format(tvt))\n\n label_cardinality = {}\n clean_titles = []\n os.chdir(path)\n\n for file in glob.glob(\"*.jpg\".format(path), recursive=True):\n if channels == 0:\n img = cv2.imread(file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n elif channels == 3:\n img = scipy.misc.imread(file) #.astype(np.unit8)\n else:\n print(\"Unexpected channels: {}\".format(channels))\n break\n\n if img.shape[0] == self.size[0] and img.shape[1] == self.size[1]:\n try:\n # if subfolder, this will split\n clean_title = str(file.split('\\\\')[1])\n except:\n clean_title = str(file)\n clean_title = re.sub(r\"\\([\\d+]*\\)\", \"\", clean_title)\n\n for lb in self.skips:\n clean_title = clean_title.replace(lb, \"\")\n\n if len(clean_title) > 0:\n self.dataset['x_{}'.format(tvt)].append(img)\n clean_titles.append(clean_title)\n else:\n print(\"{} size mismatch: {}\".format(file, img.shape))\n\n # # Add all file labels to dict, with indexes\n for title in clean_titles:\n for l in list(title):\n if l in label_cardinality:\n label_cardinality[l] += 1\n else:\n label_cardinality[l] = 1\n if l in self.label_dict[\"idx2word\"]:\n pass\n else:\n self.label_dict[\"idx2word\"].append(l)\n self.label_dict[\"word2idx\"][l] = self.idx\n self.idx += 1\n\n # this must be the same for train, val, and test\n self.n_classes = len(self.label_dict[\"idx2word\"])\n\n for title in clean_titles:\n letters = list(title)\n l = np.sum([np.eye(self.n_classes, dtype=\"uint8\")[self.label_dict[\"word2idx\"][s]]\n for s in letters], axis=0)\n self.dataset['y_{}'.format(tvt)].append(l)\n for l in sorted(label_cardinality):\n print(l, \": \", label_cardinality[l])\n\n print(\"Shuffling\")\n # print(self.dataset['x_{}'.format(tvt)])\n # print(self.dataset['y_{}'.format(tvt)])\n random_seed = 113\n random.Random(random_seed).shuffle(self.dataset['x_{}'.format(tvt)])\n random.Random(random_seed).shuffle(self.dataset['y_{}'.format(tvt)])\n self.dataset['x_{}'.format(tvt)] = np.array(self.dataset['x_{}'.format(tvt)])\n # self.dataset['x_{}'.format(tvt)].reshape(self.dataset['x_{}'.format(tvt)].shape[0], 60, 25)\n self.dataset['y_{}'.format(tvt)] = np.array(self.dataset['y_{}'.format(tvt)])\n\n\n\n def process(self):\n self.read('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_dataset\\\\all', self.channels, tvt='train')\n self.read('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_dataset\\\\validate', self.channels, tvt='val')\n self.read('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_dataset\\\\test', self.channels, tvt='test')\n\n def process_test_only(self):\n self.read('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_dataset\\\\test', self.channels, tvt='test')\n\n\ndef read_my_csv(train_file_name, val_file_name, input_shape=(60, 70, 3), delimiter='/', channels=3, one_hot=True):\n \"\"\"\n This function is used to pull specific label atrributes from a file,\n in addition to processing the input images.\n \"\"\"\n print(\"Reading training data from csv: {} with delimiter '{}'\".format(train_file_name, delimiter))\n\n # path = os.getcwd() + \"\\\\{}\".format(file_name)\n train_df = pd.read_csv(train_file_name, delimiter=delimiter)\n\n print(\"Reading validation data from csv: {} with delimiter '{}'\".format(val_file_name, delimiter))\n val_df = pd.read_csv(val_file_name, delimiter=delimiter)\n\n train_imgs = []\n train_labels = []\n val_imgs = []\n val_labels = []\n names = []\n flatten = True if channels == 2 else False\n\n print(\"Detecting distinct classes\")\n n_classes = train_df.apply(pd.Series.nunique)['Y']\n val_n_classes = val_df.apply(pd.Series.nunique)['Y']\n assert n_classes == val_n_classes, \"Number of classes in training and validation data mismatch\"\n eyes = np.eye(n_classes, dtype=\"uint8\")\n\n kernel = np.ones((3,3),np.uint8) # was 5, 5, first is vert, then horizontal\n # print(expected_shape, delimiter)\n print(\"Iterating training rows\")\n print(\"-Reading images\")\n print(\"-Dilating images to repair errosion\")\n print(\"-Storing labels\")\n for index, row in train_df.iterrows():\n try:\n name = row.X\n # img = scipy.misc.imread(name, flatten=flatten).astype(np.uint8)\n img = cv2.imread(name)\n img = cv2.erode(img,kernel,iterations = 2)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n assert img.shape == input_shape, \"Image shape {} does not match input shape {}\".format(img.shape, input_shape)\n train_imgs.append(img)\n\n # names.append(name)\n if one_hot:\n train_labels.append(eyes[(row.Y)-1])\n else:\n train_labels.append(row.Y)\n except Exception as e:\n print(\"Failed img error: {} : {}\".format(name, e))\n\n print(\"Iterating validation rows\")\n print(\"-Reading images\")\n print(\"-Dilating images to repair errosion\")\n print(\"-Storing labels\")\n for index, row in val_df.iterrows():\n try:\n name = row.X\n # img = scipy.misc.imread(name, flatten=flatten).astype(np.uint8)\n img = cv2.imread(name)\n img = cv2.erode(img,kernel,iterations = 2)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n assert img.shape == input_shape, \"Image shape {} does not match input shape {}\".format(img.shape, input_shape)\n val_imgs.append(img)\n\n # names.append(name)\n if one_hot:\n val_labels.append(eyes[(row.Y)-1])\n else:\n val_labels.append(row.Y)\n except Exception as e:\n print(\"Failed img error: {} : {}\".format(name, e))\n\n print(\"Total training imgs: {}\".format(len(train_imgs)))\n print(\"Total validation imgs: {}\".format(len(val_imgs)))\n print(\"Total training labels: {}\".format(len(train_labels)))\n print(\"Total validation labels: {}\".format(len(val_labels)))\n # print(\"Total names: {}\".format(len(names)))\n\n assert len(train_imgs) == len(train_labels), \"Training images mismatch with training labels\"\n assert len(val_imgs) == len(val_labels), \"Validation images mismatch with validation labels\"\n\n return train_imgs, train_labels, val_imgs, val_labels, names, n_classes, input_shape\n\ndef divide_data_with_val(train_imgs, train_labels, val_imgs, val_labels, n_test=10):\n # to unorder samples\n random_seed = 4\n random.Random(random_seed).shuffle(train_imgs)\n random.Random(random_seed).shuffle(train_labels)\n # random.Random(random_seed).shuffle(train_name_labels)\n random.Random(random_seed).shuffle(val_imgs)\n random.Random(random_seed).shuffle(val_labels)\n # random.Random(random_seed).shuffle(val_name_labels)\n n = len(val_imgs) - (1+n_test)\n\n x_val = np.array(val_imgs[:n])\n y_val = np.array(val_labels[:n])\n # train_name_labels = np.array(val_name_labels[: n])\n\n x_train = np.array(train_imgs)\n y_train = np.array(train_labels)\n # train_name_labels = np.array(train_name_labels)\n\n x_test = np.array(val_imgs[n:n + n_test])\n y_test = np.array(val_labels[n:n + n_test])\n # test_name_labels = np.array(val_name_labels[n:n + n_test])\n\n\n # train_name_labels, test_name_labels, val_name_labels\n return x_train, x_val, x_test, y_train, y_val, y_test, n_test, n\n\n\ndef divide_data(imgs, labels, name_labels, n_test=10):\n # to unorder samples\n random_seed = 4\n random.Random(random_seed).shuffle(labels)\n # random.Random(random_seed).shuffle(name_labels)\n random.Random(random_seed).shuffle(imgs)\n n = len(imgs) - (1+n_test)\n\n x_train = np.array(imgs[: n])\n y_train = np.array(labels[: n])\n # train_name_labels = np.array(name_labels[: n])\n\n x_test = np.array(imgs[n:n + n_test])\n y_test = np.array(labels[n:n + n_test])\n # test_name_labels = np.array(name_labels[n:n + n_test])\n\n # first_img = np.squeeze(x_train[5])\n # print(\"Graph label: \", y_train[5])\n # plt.imshow(first_img)\n # plt.show()\n return n_test, n, x_test, x_train, y_test, y_train #, train_name_labels, test_name_labels\n" }, { "alpha_fraction": 0.567103922367096, "alphanum_fraction": 0.5804742574691772, "avg_line_length": 31.491804122924805, "blob_id": "829e51398f59e7de48fc949173533ffc57f55480", "content_id": "a25c1dab9daf0c6ad4afba6cbc3eca14eb2927ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3964, "license_type": "no_license", "max_line_length": 124, "num_lines": 122, "path": "/historical/bite-size.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "\"\"\"\nThe goal of this file is to establish smaller classification methods which can be used as inputs to a more intelligent model\n\nAttempt to segment letters into general categories to help with classification\nUse attributes like:\n Contains tall letters or no\n Contains numbers\n Is single letter or multiple letters\n Has space or no (two word parts or all one)\n Has Capital letter or no\n\n\"\"\"\nimport numpy as np\nimport random\nimport os\nfrom keras.preprocessing.image import ImageDataGenerator\n\nimport load_images_dataset\nimport custom_models\n\nfrom keras.datasets import mnist\n\n\npath = os.getcwd() + \"/dataset/single\"\n\n# n_classes = 2\n# base_layers = 3\nepochs = 20\nbatch_size = 24\n# conv_size = 3\n# pool_size = 2\n\nimgs, labels, n_classes, label_dict, SIZE = load_images_dataset.prepare_data(path,\n SIZE=(60,25),\n skips=[\".jpg\", \" \"])\n\n # train_name_labels, test_name_labels\nn_test, n, x_test, x_train, y_test, y_train = \\\n load_images_dataset.divide_data(imgs, labels, label_dict, n_test=10)\n\n# train_imgs, train_labels, val_imgs, val_labels, name_labels, n_classes, input_shape = \\\n# load_images_dataset.read_my_csv(\"train_sameheight.txt\", \"val_sameheight.txt\", \\\n# input_shape=(60, 70), channels=2, one_hot=True)\n\n\n\n# x_train, x_val, x_test, y_train, y_val, y_test, n_test, n = \\\n# load_images_dataset.divide_data_with_val(train_imgs, train_labels, val_imgs, val_labels)\n\"\"\"Make my a vs e dataset so big that it is like mnist (70K total, about 8K per class)\"\"\"\n\n# -----------MNIST DATA TEST------------\n# (x_train, y_train), (x_test, y_test) = mnist.load_data()\n#\n# eyes=np.eye(10, dtype=\"uint8\")\n#\n# new_y_train = []\n# new_y_test = []\n#\n# for i, v in enumerate(y_train):\n# new_y_train.append(eyes[v])\n# for i, v in enumerate(y_test):\n# new_y_test.append(eyes[v])\n#\n# new_y_test = np.array(new_y_test)\n# new_y_train = np.array(new_y_train)\n\n\n\nmodel = custom_models.basic_cnn('relu', 'mean_squared_error', \\\n x_train, y_train, (60, 25), 2, \\\n epochs=epochs, batch_size=batch_size)\n\n\n# n_test, n, x_test, x_train, y_test, y_train, train_name_labels, test_name_labels = \\\n# n_test, n, x_test, x_train, y_test, y_train = load_images_dataset.divide_data(imgs, labels, name_labels)\n\n# print(x_train[:5])\n\n# for x in range(1, 10):\n# print(labels[x], name_labels[x])\n\n# model = custom_models.basic_cnn('relu', 'mean_squared_error', \\\n# x_train, y_train, x_val, y_val, input_shape, n_classes, \\\n# epochs=epochs, batch_size=batch_size)\n\nscore = model.evaluate(x_test, y_test, verbose=1)\n\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nprint(score)\n\npred = model.predict(x_test)\n\nprint(\"predictions finished\")\nprint(pred)\nprint(y_test)\n#\n# for i in range (0, len(x_test)):\n# actuals = \"\"\n# # for label in y[n+i]:\n# for index in np.where(val_labels[n+i]==1)[0]:\n# # actuals += \" {}\".format(label_dict[\"idx2word\"][index])\n# actuals += str(index+1)\n# print(\"---------------------------------------\\nActual: {}\".format(actuals))\n#\n# # label_dict[\"idx2word\"][s],y[n+i][s]) for s in y[n+i])\n# # print(\"Prediction: {}\".format(pred[i]))\n#\n# preds = pred[i]\n# formatted_preds = []\n# for ind, val in enumerate(preds):\n# # print(ind)\n# # print(val)\n# formatted_preds.append(\"{} probability of label: {}\".format(val, ind+1))\n# formatted_preds.sort()\n# for x in formatted_preds:\n# print(x)\n# # print(\"Predicted: {}\".format(formatted_preds.sort()))\n# # for i2 in range (0, len(label_dict[\"idx2word\"])):\n# # if pred[i][i2] > 0.2:\n# # print(\"\\\"{}\\\":{}\".format(label_dict[\"idx2word\"][i2], pred[i][i2]))\n# print(\"--------------------------------------\")\n" }, { "alpha_fraction": 0.595371663570404, "alphanum_fraction": 0.6065918803215027, "avg_line_length": 40.94117736816406, "blob_id": "77ab8ad884c9fde2091d8cbb8de7e86048927ee8", "content_id": "34d92b814ac2a47c350200de41b76397bb946a0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4278, "license_type": "no_license", "max_line_length": 138, "num_lines": 102, "path": "/line_slider_classification_3.py", "repo_name": "gdexlab/Cursive-OCR-for-Geneology", "src_encoding": "UTF-8", "text": "import custom_models\nimport load_images_dataset\nfrom keras.models import model_from_yaml\n# from keras.utils import plot_model\n\n\np_data = load_images_dataset.PreparedData()\np_data.set_size((60, 25))\np_data.process()\n\nepochs = 30\nbatch_size = 64\n# print(p_data.dataset['y_val'][:7])\ntest_loss = 13\ntest_accuracy = 0\ncurrent_epoch = 0\n# model = custom_models.cursive_cnn(p_data.dataset['x_train'], p_data.dataset['y_train'],\n# p_data.dataset['x_val'], p_data.dataset['y_val'],\n# p_data.size, p_data.n_classes,\n# epochs=epochs, batch_size=batch_size)\n\n\nwhile test_loss > .7:\n print(\"Current epochs: {}\".format(current_epoch))\n if (current_epoch > 50 and test_accuracy < 0.25 and test_loss > 10) or current_epoch == 0:\n\n print(\"loading previous model\")\n\n yaml_file = open('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_cnn_current.yaml', 'r')\n loaded_model_yaml = yaml_file.read()\n yaml_file.close()\n model = model_from_yaml(loaded_model_yaml)\n print(\"Loaded.\")\n # load weights into new model\n model.load_weights(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\model2_current.h5\")\n print(\"Loaded weights from disk\")\n model.compile(loss='categorical_crossentropy',\n optimizer='Adadelta',\n metrics=['accuracy'])\n print(\"Compiled\\nFitting...\")\n model.fit(p_data.dataset['x_train'], p_data.dataset['y_train'],\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n # validation_split=0.4,\n validation_data=(p_data.dataset['x_val'], p_data.dataset['y_val'])\n )\n print(\"Fit\")\n # print(\"(re)Initializing model\")\n # # if sufficient attemps, and poor accuracy, re-initialize, or if first time\n # model = custom_models.bw_cnn(p_data.dataset['x_train'], p_data.dataset['y_train'],\n # # p_data.dataset['x_val'], p_data.dataset['y_val'],\n # p_data.size, p_data.n_classes,\n # epochs=epochs, batch_size=batch_size)\n # current_epoch = 0\n\n else:\n print(\"Saving and loading progress\")\n # if the model is performing well, or less than 100 epochs, save the weights, and keep training\n # serialize model to YAML\n model_yaml = model.to_yaml()\n with open(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_cnn_3.yaml\", \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n # serialize weights to HDF5\n model.save_weights(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\model2_{}_{}.h5\".format(test_loss, test_accuracy))\n print(\"Saved model to disk\")\n\n # load YAML and create model\n yaml_file = open('C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_cnn_3.yaml', 'r')\n loaded_model_yaml = yaml_file.read()\n yaml_file.close()\n model = model_from_yaml(loaded_model_yaml)\n # load weights into new model\n model.load_weights(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\model2_{}_{}.h5\".format(test_loss, test_accuracy))\n print(\"Loaded model from disk\")\n model.compile(loss='categorical_crossentropy',\n optimizer='Adadelta',\n metrics=['accuracy'])\n model.fit(p_data.dataset['x_train'], p_data.dataset['y_train'],\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n # validation_split=0.4,\n validation_data=(p_data.dataset['x_val'], p_data.dataset['y_val'])\n )\n\n\n score = model.evaluate(p_data.dataset['x_test'], p_data.dataset['y_test'], verbose=1)\n test_loss = score[0]\n test_accuracy = score[1]\n print('Test accuracy:', test_accuracy)\n print('Test loss:', test_loss)\n current_epoch += epochs\n\n\n\nmodel_yaml = model.to_yaml()\nwith open(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\slider_cnn3{}_{}.yaml\".format(test_loss, test_accuracy), \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n# serialize weights to HDF5\nmodel.save_weights(\"C:\\\\Users\\\\grant\\\\Repos\\\\Cursive-OCR-for-Geneology\\\\model3{}_{}.h5\".format(test_loss, test_accuracy))\nprint(\"Saved model to disk\")\n" } ]
19
anthony1110/web_scraper
https://github.com/anthony1110/web_scraper
2b4f9c640c5e4a3ae0cea2c5733580bb606453d0
86eeb5400062f151984996c09d1b68365597bf48
bdaa97dfcd807d9e4fd0a980f6dfe7ca76e5738d
refs/heads/master
2021-07-05T07:00:00.658247
2017-10-01T11:51:13
2017-10-01T11:51:13
105,350,459
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7329192757606506, "alphanum_fraction": 0.7639751434326172, "avg_line_length": 30.600000381469727, "blob_id": "e6eaad46de2e656c8dc945519961641ef56924a6", "content_id": "9d874fdedbb6c2ded10e80b5d922400a066c0630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 46, "num_lines": 5, "path": "/web_scrapper/mongo_client.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\r\n\r\nMONGO_CLIENT = MongoClient('localhost', 27017)\r\nNEWS_DB = MONGO_CLIENT['news']\r\nNEWS_CONTENT_COLLECTION = NEWS_DB.news_content" }, { "alpha_fraction": 0.5965664982795715, "alphanum_fraction": 0.7510729432106018, "avg_line_length": 20.272727966308594, "blob_id": "4db3bece89bc79ad3e22850239f6ad804f28dd83", "content_id": "aaaac41b85a682361d0386923f14bc480c2e3eb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 233, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/requirements.txt", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "Django==1.11.5\ndjangorestframework==3.5.3\ndjango-extra-fields==0.9\ndjango-rest-framework-mongoengine==3.3.1\nGunicorn==19.6.0\nmongoengine==0.13.0\nmysqlclient==1.3.10\nMySQL-python==1.2.5\nscrapy==1.4.0\ncryptography==2.0\nrequests==2.18.4" }, { "alpha_fraction": 0.6239476203918457, "alphanum_fraction": 0.6323667168617249, "avg_line_length": 45.4782600402832, "blob_id": "d4425ca7901f804c76cf8f70999cbeb56f3b270b", "content_id": "78a9fcac65cb7bb67b271aa971bbe85f41a0fdec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 117, "num_lines": 23, "path": "/crawl_bot/crawl_bot/pipelines.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom news_scrappy.models import NewsContent\nfrom news_scrappy.utils import beautify_text, correct_bbc_article_link_to_full_path\n\n\nclass CrawlBotPipeline(object):\n def process_item(self, item, spider):\n article_text = beautify_text(item.get('summary')[0]) if len(item.get('summary')) > 0 else ''\n article_headline = beautify_text(item.get('title')[0]) if len(item.get('title')) > 0 else ''\n article_url = correct_bbc_article_link_to_full_path(item.get('link')[0]) if len(item.get('link')) > 0 else ''\n article_tag = beautify_text(item.get('tag')[0]) if len(item.get('tag')) > 0 else ''\n news = NewsContent(article_headline=article_headline,\n article_text=article_text,\n article_url=article_url,\n article_tag=article_tag)\n news.save()\n\n return item\n" }, { "alpha_fraction": 0.7076923251152039, "alphanum_fraction": 0.7107692360877991, "avg_line_length": 24, "blob_id": "bf097366131843c92c5166d03ac1fadbc7e8cf67", "content_id": "ae4a9d5c306bc8b583ad7c2fb047d604ca68d87c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/news_scrappy/models.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom mongoengine import Document, StringField\n\n\nclass NewsContent(Document):\n # visible field\n article_text = StringField()\n article_headline = StringField()\n article_url = StringField()\n article_tag = StringField()\n" }, { "alpha_fraction": 0.6625000238418579, "alphanum_fraction": 0.6625000238418579, "avg_line_length": 22.615385055541992, "blob_id": "2b57db94a53fd6796a1c908ce2a43f14e54ab602", "content_id": "3b8a802c5b3cb707bdb97cf9ebba48864754a266", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 75, "num_lines": 13, "path": "/news_scrappy/urls.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\r\nfrom rest_framework import routers\r\n\r\nfrom news_scrappy import views\r\nfrom news_scrappy.api import api_views\r\n\r\nurlpatterns = [\r\n # Main page\r\n url(r'^$', views.main_page),\r\n\r\n # API URL\r\n url(r'^api/query/$', api_views.NewsQuery.as_view(), name='news_query'),\r\n]\r\n" }, { "alpha_fraction": 0.5722714066505432, "alphanum_fraction": 0.5752212405204773, "avg_line_length": 20.600000381469727, "blob_id": "8f7308c4e66b7b6c11577b009083ed537ef9c118", "content_id": "33518574c0bb3cab57c3cd7d4abead0f281fe2f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 47, "num_lines": 15, "path": "/news_scrappy/utils.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\n\r\ndef beautify_text(text):\r\n text = str(text).replace(\"\\n\", \" \").strip()\r\n return text\r\n\r\n\r\ndef correct_bbc_article_link_to_full_path(url):\r\n\r\n url = str(url).strip()\r\n if not url.startswith('http'):\r\n url = 'http://www.bbc.com' + url\r\n return url\r\n" }, { "alpha_fraction": 0.6270213723182678, "alphanum_fraction": 0.6744914054870605, "avg_line_length": 37.979591369628906, "blob_id": "42de7a22e3af7c99b99c12472a3dd0eff9eb71ff", "content_id": "ef9cd45d4ea374f4dba9eac6e36cf44a12cf4e03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1917, "license_type": "no_license", "max_line_length": 336, "num_lines": 49, "path": "/README.md", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "# web_scraper\n\nThis web_scraper is use to crawl news info and it is built on top of django and Scrapy framework. It use to crawl \"www.bbc.com\" news at the moment. Current hosted at Amazon instance http://13.229.116.155/news_scrappy/.\n\nProject Requirements\n----------------------------\n1. django & django rest framework for API. \n2. scrapy\n3. mongoDB\n4. mysql\n\n\nHow to Use\n---------------\n1. git clone the project.\n2. go to project directory and prepare virtual environment.\n - virtualenv venv\n3. install application requirements\n - pip install -r requirements.txt \n4. install mysql and mongodb.\n\n\nHow to crawl news\n----------------------\n1. go to web_scraper/crawl_bot directory.\n2. run crawling command \n - scrapy crawl news_crawling \n \n \nHow to retrieve/search article by keyword\n--------------------------------------\n1. go to web_scraper directory.\n2. run django application command below.\n\n - Query by Article Text \n - python manage.py mongo_api --host=http://13.229.116.155 --query_article_text=Rugby\n - Query by Article Headline\n - python manage.py mongo_api --host=http://13.229.116.155 --query_article_headline=Rugby\n - Query by Article Tag\n - python manage.py mongo_api --host=http://13.229.116.155 --query_article_tag=Rugby\n - Query based on Keyword for article_tag, article_text, article_headline\n - python manage.py mongo_api --host=http://13.229.116.155 --query_any=Rugby \n - Help \n - python manage.py mongo_api --help\n \n - API response Example\n * Query result = \"[{\"id\":\"59d0c3413f6f67258f1c3576\",\"article_text\":\"New Zealand cap their fifth Rugby Championship victory in six years with a comprehensive win over Argentina.\",\"article_headline\":\"NZ beat Argentina after retaining title\",\"article_url\":\"http://www.bbc.com/sport/rugby-union/41458218\",\"article_tag\":\"Rugby Union\"}]\"\n * Query status code = \"200\"\n * Query number of results\"1\"\n \n" }, { "alpha_fraction": 0.5307262539863586, "alphanum_fraction": 0.5321229100227356, "avg_line_length": 35.78947448730469, "blob_id": "17fcea511194f5231d88529c4ef6f635d1e70a43", "content_id": "d2832667ad213851145f85ba1136f886bd268f3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "no_license", "max_line_length": 65, "num_lines": 19, "path": "/crawl_bot/crawl_bot/spiders/news_crawling.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "from scrapy.spiders import BaseSpider\r\n\r\nclass NewsSpider(BaseSpider):\r\n name = \"news_crawling\"\r\n start_urls = ['http://www.bbc.com/']\r\n\r\n def parse(self, response):\r\n for brick in response.css('div.media__content'):\r\n TITLE_SELECTOR = 'h3 a ::text'\r\n SUMMARY_SELECTOR = 'p ::text'\r\n LINK_SELECTOR = '.media__title a ::attr(href)'\r\n TAG_SELECTOR = '.media__tag ::text'\r\n\r\n yield {\r\n 'title': brick.css(TITLE_SELECTOR).extract(),\r\n 'summary': brick.css(SUMMARY_SELECTOR).extract(),\r\n 'link': brick.css(LINK_SELECTOR).extract(),\r\n 'tag': brick.css(TAG_SELECTOR).extract(),\r\n }" }, { "alpha_fraction": 0.6263736486434937, "alphanum_fraction": 0.7032967209815979, "avg_line_length": 28, "blob_id": "98cb3eb150826d22e4519bf4f7a06fae0642b522", "content_id": "bc7b179b2181e0a13acc78035c29e340924ac06d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 56, "num_lines": 6, "path": "/web_scrapper/configurations/gunicorn.conf.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "\r\nbind = \"127.0.0.1:29004\"\r\nlogfile = \"/data/projects/logs/web_scraper.gunicorn.log\"\r\nNUM_WORKERS=4\r\nDJANGODIR='/data/projects/web_scraper'\r\ntimeout = 60\r\nproc_name = \"web_scraper\"\r\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 15, "blob_id": "f8bf337d582a9f90c43f29cedc1d485b011fb4ee", "content_id": "00b97c682a0b195e0d0e08cf48d5776f6c80023d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/web_scrapper/settings_local.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "\r\nPRODUCTION = False\r\nDEBUG = True" }, { "alpha_fraction": 0.6489361524581909, "alphanum_fraction": 0.6489361524581909, "avg_line_length": 57.53333282470703, "blob_id": "ec06c15d5b9349dd1642c6a53e4606691a6766a2", "content_id": "563b086d489a7408492001ce5f6be8b1be08423d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1786, "license_type": "no_license", "max_line_length": 108, "num_lines": 30, "path": "/news_scrappy/management/commands/mongo_api.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "import requests\r\nfrom django.core.management.base import BaseCommand, CommandError\r\n\r\n\r\nclass Command(BaseCommand):\r\n help = 'Scrapy API in command.'\r\n\r\n def add_arguments(self, parser):\r\n parser.add_argument('--host', type=str, help=\"URL that need to be query.\")\r\n parser.add_argument('--query_any', type=str, help=\"keyword to query any word in for crawling info.\")\r\n parser.add_argument('--query_article_text', type=str, help=\"keyword to query article text\")\r\n parser.add_argument('--query_article_headline', type=str, help=\"keyword to query article headline\")\r\n parser.add_argument('--query_article_tag', type=str, help=\"keyword to query article tag.\")\r\n\r\n def handle(self, *args, **options):\r\n default_url = options['host'] + \"/news_scrappy/api/query/\"\r\n\r\n if 'query_any' in options and options['query_any']:\r\n url = default_url + \"?query_any=\" + options['query_any']\r\n elif 'query_article_text' in options and options['query_article_text']:\r\n url = default_url + \"?query_article_text=\" + options['query_article_text']\r\n elif 'query_article_headline' in options and options['query_article_headline']:\r\n url = default_url + \"?query_article_headline=\" + options['query_article_headline']\r\n elif 'query_article_tag' in options and options['query_article_tag']:\r\n url = default_url + \"?query_article_tag=\" + options['query_article_tag']\r\n\r\n response = requests.get(url)\r\n self.stdout.write(self.style.SUCCESS('Query result = \"%s\"' % response.text))\r\n self.stdout.write(self.style.SUCCESS('Query status code = \"%s\"' % response.status_code))\r\n self.stdout.write(self.style.SUCCESS('Query number of results\"%s\"' % len(response.json())))\r\n" }, { "alpha_fraction": 0.7202572226524353, "alphanum_fraction": 0.7202572226524353, "avg_line_length": 29.100000381469727, "blob_id": "bbfa1ffc570653a0b62634385a32c52b59ec01a2", "content_id": "28b562566464c2ba46c3c8ec231d04d4ad5b7e60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 89, "num_lines": 10, "path": "/news_scrappy/api/serializers.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "from rest_framework_mongoengine.serializers import DocumentSerializer\r\n\r\nfrom news_scrappy.models import NewsContent\r\n\r\n\r\nclass NewsContentSerializer(DocumentSerializer):\r\n\r\n class Meta:\r\n model = NewsContent\r\n fields = ('id', 'article_text', 'article_headline', 'article_url', 'article_tag')\r\n" }, { "alpha_fraction": 0.6998867392539978, "alphanum_fraction": 0.6998867392539978, "avg_line_length": 38.1363639831543, "blob_id": "943a5466805a0c2e711a093e22affb6ea2b2c4ad", "content_id": "96df238da71501fa38b2b271b115e278dd18baa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1766, "license_type": "no_license", "max_line_length": 155, "num_lines": 44, "path": "/news_scrappy/api/api_views.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "import datetime\r\nimport pprint\r\n\r\nimport pymongo\r\nfrom django.contrib.auth.models import User\r\nfrom django.utils import log\r\nfrom mongoengine import Q\r\nfrom rest_framework import status\r\nfrom rest_framework import viewsets\r\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\n\r\nfrom news_scrappy.api.serializers import NewsContentSerializer\r\nfrom news_scrappy.models import NewsContent\r\n\r\n\r\nclass NewsQuery(APIView):\r\n queryset = NewsContent.objects.all()\r\n serializer_class = NewsContentSerializer\r\n\r\n def get(self, request, format=None):\r\n get_dict = request.GET.copy()\r\n query_any_value = get_dict.get('query_any')\r\n query_article_text_value = get_dict.get('query_article_text')\r\n query_article_headline_value = get_dict.get('query_article_headline')\r\n query_article_tag_value = get_dict.get('query_article_tag')\r\n\r\n query = Q()\r\n if query_any_value:\r\n query = Q(article_text__icontains=query_any_value) | Q(article_headline__icontains=query_any_value) | Q(article_tag__icontains=query_any_value)\r\n elif query_article_text_value:\r\n query = Q(article_text__icontains=query_article_text_value)\r\n elif query_article_headline_value:\r\n query = Q(article_headline__icontains=query_article_headline_value)\r\n elif query_article_tag_value:\r\n query = Q(article_tag__icontains=query_article_tag_value)\r\n\r\n print query\r\n news_obj = NewsContent.objects.filter(query)\r\n serializer = NewsContentSerializer(news_obj, many=True)\r\n\r\n # return JsonResponse(serializer.data, safe=False)\r\n return Response(serializer.data)\r\n" }, { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.7492063641548157, "avg_line_length": 23.30769157409668, "blob_id": "f6a457d60521896c5f7768219d82751c409feaa2", "content_id": "7a01efdb7f363b4866a4e4bd0ec5d7ad2de18ab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/news_scrappy/views.py", "repo_name": "anthony1110/web_scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, render_to_response\n\n# Create your views here.\nfrom django.template import RequestContext\n\n\ndef main_page(request):\n context = {}\n return JsonResponse(context, safe=False)" } ]
14
MMatlacz/HackUPC2
https://github.com/MMatlacz/HackUPC2
2f7b3d23c6613173cc9ad0e55e48f0ed3d9510b1
d12b8a9f891c8734f4015c3b47d8f0109a76e76e
bf1835e09bdf278b5142169fd4d9269de3268db1
refs/heads/master
2020-04-11T11:06:07.654335
2017-06-02T06:34:49
2017-06-02T06:34:49
52,140,558
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7837837934494019, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 18, "blob_id": "85a3e553bd6ba807c7c7282ca683212f8488cc55", "content_id": "0a7def19667479e3fcd0bf65a87359bf95d86472", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 25, "num_lines": 2, "path": "/backend/run.py", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "from app import setup_app\nsetup_app()" }, { "alpha_fraction": 0.6227390170097351, "alphanum_fraction": 0.633074939250946, "avg_line_length": 23.1875, "blob_id": "1e0a916742a70eff734c293faf164c159662a901", "content_id": "ac4af30eb15e053d9befcc9db32b163f530b2f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 387, "license_type": "no_license", "max_line_length": 63, "num_lines": 16, "path": "/backend/app.ini", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "[uwsgi]\n# Staging configuration hardcoded\nplugins\t\t= python\n\nchdir = /home/niespodd/groovescanner/backend\nmodule = app\ncallable\t= app\n# home = /home/niespodd/.virtualenvs/upc\n\nmaster = true\nprocesses = 1\nsocket = /home/niespodd/groovescanner/backend/app.sock\nchmod-socket = 777\n\n# clear environment on exit\nvacuum = true\n" }, { "alpha_fraction": 0.8440860509872437, "alphanum_fraction": 0.8548387289047241, "avg_line_length": 61, "blob_id": "069f84e59462b34501ef22add4cc6f84665bf42b", "content_id": "cfc0a80aeadcfbbc0fe37acc79ad7fffae05497b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 186, "license_type": "no_license", "max_line_length": 118, "num_lines": 3, "path": "/README.md", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "Application written during 36hours hackathon HACKUPC in Barcelona\n\nIt allows user to choose event from facebook and automatically looks up cheapest airplane connection and accomodation.\n" }, { "alpha_fraction": 0.5487805008888245, "alphanum_fraction": 0.707317054271698, "avg_line_length": 12.666666984558105, "blob_id": "cec80804d2a904750cef9dfa84e82571f16914d6", "content_id": "84eba56361664bcea8f6714e842da9b2c0d72dda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 82, "license_type": "no_license", "max_line_length": 19, "num_lines": 6, "path": "/requirements.txt", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "ipdb\nfacebook\nFlask==0.10.1\nFlask-Cors==2.1.2\nrequests==2.9.1\nfacebook-sdk==0.4.0\n" }, { "alpha_fraction": 0.4570135772228241, "alphanum_fraction": 0.5565611124038696, "avg_line_length": 15.923076629638672, "blob_id": "d73637b70e3f5bd29f7d4dd7c2bf8cee5ccdc27b", "content_id": "bdce10ef81ee06339019f790b9c2d20f7e2e5536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/backend/config.py", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "FACEBOOK = {\n 'APPLICATION_ID': '',\n 'APPLICATION_SECRET': ''\n}\n\nEVENT = {\n 'FIELDS': ['id', 'name', 'start_time', 'end_time'],\n 'LIMIT': 15\n}\n\nAIRPORT = {\n 'API_KEY': '574ba58133fe3a81f705561f2d65abff'\n}\n\n" }, { "alpha_fraction": 0.6317430138587952, "alphanum_fraction": 0.6374970078468323, "avg_line_length": 30.59848403930664, "blob_id": "b5cd83d612f4e04fb3f3392fe7ad0163876065e2", "content_id": "4144944d9d16ea9f63c13bfcd8b7587604ec62f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4171, "license_type": "no_license", "max_line_length": 121, "num_lines": 132, "path": "/backend/flyscanner.py", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "import urllib2\n\nfrom flask import json\n\nmarket = \"UK\"\ncurrency = \"EUR\"\nlocale = \"en-GB\"\n\n\ndef get_api_key():\n return 'api-key'\n\n\ndef return_grid(market, currency, locale, originPlace, destinationPlace, outboundPartialDate, inboundPartialDate):\n api_key = get_api_key()\n url = 'http://partners.api.skyscanner.net/apiservices/browsegrid/v1.0/{}/{}/{}/{}/{}/{}/{}?apiKey={}'.format(\n market, currency, locale, originPlace, destinationPlace, outboundPartialDate, inboundPartialDate, api_key\n )\n print url\n grid = urllib2.urlopen(url).read()\n grid = json.loads(grid)\n\n return json.dumps(grid)\n\n\ndef get_locales():\n api_key = get_api_key()\n url = 'http://partners.api.skyscanner.net/apiservices/reference/v1.0/locales?apiKey={}'.format(\n api_key\n )\n locales = urllib2.urlopen(url).read()\n locales = json.loads(locales)\n\n return json.dumps(locales)\n\n\ndef get_markets(locale):\n api_key = get_api_key()\n url = 'http://partners.api.skyscanner.net/apiservices/reference/v1.0/countries/{}?apiKey={}'.format(\n locale, api_key\n )\n markets = urllib2.urlopen(url).read()\n markets = json.loads(markets)\n\n return json.dumps(markets)\n\n\ndef get_currencies():\n api_key = get_api_key()\n url = 'http://partners.api.skyscanner.net/apiservices/reference/v1.0/currencies?apiKey={}'.format(\n api_key\n )\n currencies = urllib2.urlopen(url).read()\n currencies = json.loads(currencies)\n\n return json.dumps(currencies)\n\n\ndef get_airports(query):\n api_key = get_api_key()\n url = 'http://partners.api.skyscanner.net/apiservices/autosuggest/v1.0/{}/{}/{}/?query={}&apiKey={}'.format(\n market, currency, locale, query, api_key\n )\n currencies = urllib2.urlopen(url).read()\n currencies = json.loads(currencies)\n\n return json.dumps(currencies)\n\n\ndef get_hotels(query, checkin_date, checkout_date, guests, rooms):\n hotels = json.loads(get_hotels_ids(query))\n ids = []\n results = hotels['results']\n for result in results:\n ids.append(result['individual_id'])\n hotels = {}\n for id in ids:\n try:\n hotels['id'] = json.loads(get_hotels_list(id, checkin_date, checkout_date, guests, rooms))\n except TypeError:\n continue\n\n min_price = None\n id = None\n _hotel = None\n for price in hotels['id']['hotels_prices']:\n if price['agent_prices'][0]['price_total'] < min_price or min_price is None:\n min_price = price['agent_prices'][0]['price_total']\n id = price['id']\n\n for hotel in hotels['id']['hotels']:\n if hotel['hotel_id'] == id:\n _hotel = hotel\n _hotel['price'] = min_price\n return json.dumps(_hotel)\n\n\ndef get_hotels_ids(query): # query is city name\n api_key = get_api_key()\n url = \"http://partners.api.skyscanner.net/apiservices/hotels/autosuggest/v2/{}/{}/{}/{}?apikey={}\".format(\n market, currency, locale, query, api_key\n )\n hotels = urllib2.urlopen(url).read()\n hotels = json.loads(hotels)\n\n return json.dumps(hotels)\n\n\n# hotels have unique entity_id\ndef get_hotels_list(entity_id, checkin_date, checkout_date, guests, rooms):\n # create session\n api_key = get_api_key()\n url = \"http://partners.api.skyscanner.net/apiservices/hotels/liveprices/v2/{}/{}/{}/{}/{}/{}/{}/{}?apiKey={}\".format(\n market, currency, locale, entity_id, checkin_date, checkout_date, guests, rooms, api_key\n )\n try:\n hotels_list = urllib2.urlopen(url).read()\n except urllib2.HTTPError:\n hotels_list = None\n # next_poll = \"http://partners.api.skyscanner.net\" + hotels_list.info().getheader('Location')\n # hotels_list = hotels_list.read()\n '''\n # polling session\n extended_hotels_list = [hotels_list]\n while hotels_list['status'] != \"COMPLETE\":\n hotels_list = urllib2.urlopen(next_poll)\n next_poll = \"http://partners.api.skyscanner.net\" + hotels_list.info().getheader('Location')\n hotels_list = hotels_list.read()\n hotels_list = json.loads(hotels_list)\n extended_hotels_list.append(hotels_list)\n return json.dumps(extended_hotels_list)'''\n return hotels_list\n" }, { "alpha_fraction": 0.6208869814872742, "alphanum_fraction": 0.6251788139343262, "avg_line_length": 28.04166603088379, "blob_id": "be19f4309e2adc2f068c3760a1805347e5a1b17e", "content_id": "bf7fe266217d0fb69727cd5628a750f9f8743e91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 90, "num_lines": 24, "path": "/backend/airport.py", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "import json\n\nimport requests\n\nimport config\nimport urllib\n\n\nclass AirportAPI:\n api_key = config.AIRPORT.get('API_KEY')\n endpoint = 'https://airport.api.aero/airport/?user_key={token}'\n\n @staticmethod\n def get_airports(long=None, lat=None):\n args = {'user_key': AirportAPI.api_key}\n url = 'https://airport.api.aero/airport/'\n if long and lat:\n url += \"nearest/{0}/{1}/\".format(long, lat)\n url += '?' + urllib.urlencode(args)\n\n airports_request = requests.get(url, headers={'content-type': 'application/json'})\n response_text = airports_request.text.replace(\"callback(\", \"\")[:-1]\n\n return json.loads(response_text)['airports']\n\n\n" }, { "alpha_fraction": 0.5472049713134766, "alphanum_fraction": 0.5583850741386414, "avg_line_length": 33.27659606933594, "blob_id": "d7d07e2efbdf78977e81f07da253a08859d8dcfc", "content_id": "be3cc5cbba8fed47814279f53e7893479a6ec742", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1610, "license_type": "no_license", "max_line_length": 124, "num_lines": 47, "path": "/backend/alpha.py", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "import urllib\nimport urllib2\nfrom xml.etree import ElementTree as etree\n#pod\n\nclass wolfram(object):\n def __init__(self, appid):\n self.appid = appid\n self.base_url = 'http://api.wolframalpha.com/v2/query?'\n self.headers = {'User-Agent':None}\n\n ip = \"Barcelona International Airport\"\n\n def get_airport_details(self, name):\n #details = urllib2.urlopen(\"http://api.wolframalpha.com/v2/query?input=\" + name + \"&appid=GK3LPL-YW5RJ662W4\").read()\n ip = name\n xml = self._get_xml(ip)\n result_dics = self._xmlparser(xml)\n\n print 'Available Titles', '\\n'\n titles = dict.keys(result_dics)\n for ele in titles : print '\\t' + ele\n print '\\n'\n print result_dics\n\n def _get_xml(self, ip):\n url_params = {'input':ip, 'appid':self.appid}\n data = urllib.urlencode(url_params)\n print data\n req = urllib2.Request(self.base_url, data, self.headers)\n xml = urllib2.urlopen(req).read()\n return xml\n\n def _xmlparser(self, xml):\n data_dics = {}\n tree = etree.fromstring(xml)\n #retrieving every tag with label 'plaintext'\n for e in tree.findall('pod'):\n for item in [ef for ef in list(e) if ef.tag=='subpod']:\n for it in [i for i in list(item) if i.tag=='plaintext']:\n if it.tag=='plaintext':\n data_dics[e.get('title')] = it.text\n return data_dics\n\nappid = 'GK3LPL-YW5RJ662W4'\nw = wolfram(appid)\nw.get_airport_details(\"BHX\")" }, { "alpha_fraction": 0.6017354726791382, "alphanum_fraction": 0.6110113859176636, "avg_line_length": 28.83035659790039, "blob_id": "f11bb5943092d0458d89d2d10cbeb584daa642c2", "content_id": "28b8a428547217f233e5a28c2b55ad854bdf3cc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3342, "license_type": "no_license", "max_line_length": 123, "num_lines": 112, "path": "/backend/app.py", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "import HTMLParser\nimport json\nimport urllib2\n\nfrom flask import Flask, request\n\nimport alpha\nimport config\nimport facebook\nimport flyscanner\nfrom cors import CORS\nfrom transits import get_connections\n\napp = Flask(__name__)\napp.debug = True\nCORS(app)\n\napp_url = '/api'\n\nairports_db = open('airports.dat', 'rb+')\nairports = airports_db.read()\n\n# Facebook client\nfacebook_app_id = config.FACEBOOK.get('APPLICATION_ID')\nfacebook_app_secret = config.FACEBOOK.get('APPLICATION_SECRET')\n\n# facebook_access_token = facebook.get_app_access_token(app_id=facebook_app_id, app_secret=facebook_app_secret)\nfacebook_client = facebook.GraphAPI(access_token='')\n\n# temporary acc_token hack\nfacebook_client.access_token = 'CAAYcd83pBg4BAKV6wvZAJH4xv2U1WE7qDSo0RPe4XP5mcZBwwaORiAmjKj95SCxIXntDeBDGGnIgSWS9nuuzqO3udyxvenzFaF9wmcKucxTZAgp4TwUYfMbYpH1aSTT5yNA2uu2ZAasf3vanyz9V2R8XNcRK9meMgULAV7MGJHStJYWjM2cWk7ZA41LVAJ1UZD'\n\n\[email protected](app_url + '/event/')\ndef get_event():\n if request.method == 'GET':\n query_string = request.args['q']\n params = {\n 'q': query_string,\n 'type': 'event'\n }\n\n search_results = facebook_client.request(path='search', args=params)\n parsed_results = list()\n\n for event in search_results.get('data', []):\n parsed_event = {\n 'thumbnail': 'https://graph.facebook.com/{0}/picture?access_token={1}'.format(event['id'],\n facebook_client.access_token)\n }\n for key in config.EVENT.get('FIELDS', []):\n if event.has_key(key):\n parsed_event.update({key: event[key]})\n\n if parsed_event != {}:\n parsed_results.append(parsed_event)\n return json.dumps(parsed_results[:config.EVENT.get('LIMIT', 10)])\n\n\[email protected](app_url + '/event/<id>/')\ndef get_event_by_id(id):\n if request.method == 'GET':\n return json.dumps(facebook_client.get_object(id=id, args={'access_token': facebook_client.access_token}))\n\n\[email protected](app_url + '/hotel/')\ndef hotels():\n if request.method == 'GET':\n city = request.args['city']\n checkin = request.args['checkin']\n checkout = request.args['checkout']\n guests = request.args['guests']\n rooms = request.args['rooms']\n return flyscanner.get_hotels(city, checkin, checkout, guests, rooms)\n\n\[email protected](app_url + '/to/')\ndef transit():\n if request.method == 'GET':\n event_city = request.args['event_city']\n event_country = request.args['event_country']\n start_city = request.args['start_city']\n start_country = request.args['start_country']\n out_time = request.args['out_time']\n in_time = request.args['in_time']\n return get_connections(event_city, event_country, start_city, start_country, out_time, in_time)\n\n\[email protected](app_url + '/airports/')\ndef return_airports():\n return airports\n\n\[email protected](app_url + '/wolfram/<id>/')\ndef get_airport_detais_wolfram(id):\n if request.method == 'GET':\n return json.dumps(alpha.wolfram.get_airport_details(id))\n\n\[email protected](app_url + '/airports/<id>/')\ndef airport(id):\n city = urllib2.urlopen(\"https://www.wolframcloud.com/objects/caf4da56-9cc9-4673-8bc0-63a1371180ac?code=\"+id).read()\n city = city[17:]\n city = city[:-3]\n\n city = city.split(\"\\\"\")\n print city\n\n return json.dumps({'city': city[3], 'country': city[-2]})\n\nif __name__ == '__main__':\n app.run()\n\n" }, { "alpha_fraction": 0.5744216442108154, "alphanum_fraction": 0.5818420052528381, "avg_line_length": 33.712120056152344, "blob_id": "dc2228276667c1cfdc43d7d81724e0b56592dbd2", "content_id": "094f57156ed40981ded1886f4c8cda10fde69902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2291, "license_type": "no_license", "max_line_length": 117, "num_lines": 66, "path": "/backend/transits.py", "repo_name": "MMatlacz/HackUPC2", "src_encoding": "UTF-8", "text": "import itertools\n\nfrom flask import json\n\nimport flyscanner\nfrom flyscanner import return_grid\n\n\ndef get_connections(event_city, event_country, start_city, start_country, out_time, in_time):\n outairports = []\n destairports = []\n airports = json.loads(flyscanner.get_airports(event_city))\n for airport in airports['Places']:\n if str(airport['CountryName']).lower() == str(event_country).lower() and str(\n airport['PlaceName']).lower() == str(event_city).lower():\n destairports.append(airport)\n airports = json.loads(flyscanner.get_airports(start_city))\n for airport in airports['Places']:\n if str(airport['CountryName']).lower() == str(start_country).lower() and str(\n airport['PlaceName']).lower() == str(start_city).lower():\n outairports.append(airport)\n\n connections = []\n for start in outairports:\n for dest in destairports:\n connections.append(\n json.loads(return_grid(flyscanner.market, flyscanner.currency, flyscanner.locale, start['PlaceId'],\n dest['PlaceId'],\n out_time, in_time)))\n\n for conn in connections:\n conn = conn\n carriers = conn['Carriers']\n dates = conn['Dates']\n currencies = conn['Currencies']\n places = conn['Places']\n output = {'flights': {}}\n i = 0\n d = []\n for date in dates[0]:\n if date is not None:\n d.append(date)\n for inbound in dates[1:]:\n for d1, d2 in itertools.izip(d, inbound[1:]):\n if d2 is None or d1 is None or inbound[0] is None:\n continue\n i += 1\n output['flights'][i] = {\"Out\": d1['DateString'], \"In\": inbound[0]['DateString'], \"Price\": d2['MinPrice'],\n \"QuoteDateTime\": d2['QuoteDateTime']}\n\n\n byprice = []\n for flight in output['flights']:\n byprice.append(output['flights'][flight])\n\n byprice.sort(key=lambda tup: tup['Price'])\n\n #output['places'] = places\n\n cheapest = byprice[:4]\n #find hotels:\n for flight in cheapest:\n print flight\n flight['hotel'] = json.loads(flyscanner.get_hotels(event_city, flight['Out'], flight['In'], 1, 1))\n\n return json.dumps(cheapest)\n" } ]
10
Anon-txt/Hash-Algorithms-Python
https://github.com/Anon-txt/Hash-Algorithms-Python
3966edc0282f253d5092525cd3cba9732e9a0f73
e6c55aa5a036db4d7bd9d233a7b0f06d3cefa8a5
0001d455e95dfeeddba5303c7dabef4788eef96f
refs/heads/master
2022-04-13T01:17:25.871509
2020-03-28T18:05:44
2020-03-28T18:05:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5594855546951294, "alphanum_fraction": 0.6768488883972168, "avg_line_length": 17.02898597717285, "blob_id": "7149bad5b02bf2b8403713e828efedfe915493e8", "content_id": "f2b463995cc83a21d57c6f0f70053acc9722f290", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1244, "license_type": "permissive", "max_line_length": 47, "num_lines": 69, "path": "/src/hash_algorithms.py", "repo_name": "Anon-txt/Hash-Algorithms-Python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport hashlib\n\n\ndef sha1(string):\n sha1 = hashlib.sha1(string).hexdigest()\n return sha1\n\n\ndef sha224(string):\n sha224 = hashlib.sha224(string).hexdigest()\n return sha224\n\n\ndef sha256(string):\n sha256 = hashlib.sha256(string).hexdigest()\n return sha256\n\n\ndef sha384(string):\n sha384 = hashlib.sha384(string).hexdigest()\n return sha384\n\n\ndef sha512(string):\n sha512 = hashlib.sha512(string).hexdigest()\n return sha512\n\n\ndef sha3_224(string):\n sha3_224 = hashlib.sha3_224()\n sha3_224.update(string)\n return sha3_224.hexdigest()\n\n\ndef sha3_256(string):\n sha3_256 = hashlib.sha3_256()\n sha3_256.update(string)\n return sha3_256.hexdigest()\n\n\ndef sha3_384(string):\n sha3_384 = hashlib.sha3_384()\n sha3_384.update(string)\n return sha3_384.hexdigest()\n\n\ndef sha3_512(string):\n sha3_512 = hashlib.sha3_512()\n sha3_512.update(string)\n return sha3_512.hexdigest()\n\n\ndef md5(string):\n md5 = hashlib.md5(string).hexdigest()\n return md5\n\n\ndef blake2b(string):\n blake2b = hashlib.blake2b()\n blake2b.update(string)\n return blake2b.hexdigest()\n\n\ndef blake2s(string):\n blake2s = hashlib.blake2s()\n blake2s.update(string)\n return blake2s.hexdigest()\n" }, { "alpha_fraction": 0.4371257424354553, "alphanum_fraction": 0.628742516040802, "avg_line_length": 10.133333206176758, "blob_id": "d5537fa0964195766109ec7a6ff207e169cdddf3", "content_id": "4286bd1518984cae110c52a1e5340ff4aad51f9b", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 170, "license_type": "permissive", "max_line_length": 24, "num_lines": 15, "path": "/README.md", "repo_name": "Anon-txt/Hash-Algorithms-Python", "src_encoding": "UTF-8", "text": "# Secure Hash Algorithm \n- Sha1\n- Sha224\n- Sha256\n- Sha384\n- Sha512\n- Sha3_224\n- Sha3_256\n- Sha3_384\n- Sha3_512\n- Md5\n- Blake2b\n- Blake2s \n \n_(Python Version)_🐍\n" } ]
2
Zachary-Summers/Special-Circles-2
https://github.com/Zachary-Summers/Special-Circles-2
79c953e79962332db50911cc8b7218b420f9a6e6
25f2f97807796d29815c658f6618529571e764a4
3e84986525856247a0279dec9712ea831130bc30
refs/heads/master
2022-12-10T08:57:18.362039
2020-09-08T15:16:49
2020-09-08T15:16:49
293,847,440
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5038700699806213, "alphanum_fraction": 0.570759654045105, "avg_line_length": 31.703125, "blob_id": "612c4cb58c58c759b9ade841b23c8d0c5e28d30b", "content_id": "5bc0d90d3eccd078f04dd7fecc839a74068c2469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10465, "license_type": "no_license", "max_line_length": 789, "num_lines": 320, "path": "/main.py", "repo_name": "Zachary-Summers/Special-Circles-2", "src_encoding": "UTF-8", "text": "import pygame, random, time, sys, os\nos.system('clear')\nprint(\"\\033[\\n36mSpecial Circles \\033[0m\")\nprint(\"\\033[36mBy Zachary And Dane \\n\\033[0m\")\nprint(\"\\033[34mYou are the green ball. The red ball is trying to touch you to make you lose a life. The blue ball will give you a life. The two same colored balls are portals that you can teleport between. the grey ball will make you faster and yellow. The big pink ball will also kill you but is immobile. The pink balls at the top are your lives. If you get enough lives your level goes up. If you lose all your lives it goes to one. The white balls are your level. The arrow keys and WASD keys will move your charecter. The e key will quit, the r key will restart everything, the p key will pause, the u key will unpause, the g key will pause the red ball for playtesting, the b key will unpause the red ball, c will clear the terminal, and z will use your power if possible.\\n\\033[0m\")\nhighScore = 0\npygame.init()\nscreen = pygame.display.set_mode([800,600])\ngreen = (255, 0, 166) \nblue = (0, 0, 0) \nsquare = False \nfont = pygame.font.SysFont(\"comicsansms\", 50)\ntext = font.render('Special Circles', True, green, blue) \ntextRect = text.get_rect() \ntextRect.center = (650, 35) \nradius = 10\nCOLOR = (0,255,0)\nBAD_COLOR = (255,0,0)\nEND_COLOR = (0, 13, 255)\nLIFE_COLOR = (255, 120, 196)\nISLAND_COLOR = (255, 0, 255)\nBAD_ISLAND_COLOR = (255, 0, 136)\nSLOW_COLOR = (128, 62, 57)\nclock = 3 \nwNum = 10\nlevel = 1\nspeed = 2\nb = True\nprint('level:',level)\nx_change = 0\ny_change = 0\nx = random.randint(0,800)\ny = random.randint(0,600)\nbx = random.randint(0,800)\nby = random.randint(0,600)\nfx = random.randint(0,800)\nfy = random.randint(0,600)\nix = random.randint(0,800)\niy = random.randint(0,600)\nbix = random.randint(0,800)\nbiy = random.randint(0,600)\nitx = random.randint(0,800)\nity = random.randint(0,600)\nsx = random.randint(0,800)\nsy = random.randint(0,600)\nrun = True\nBLACK = (0,0,0)\nlives = 5\nprint(lives)\nex = random.randint(0,800)\ney = random.randint(0,600)\ndo = False\ndo2 = False\ndo3 = True\ndo4 = True\ndo5 = False\nwhile run:\n if level % 2 == 0:\n square == True\n if level > highScore:\n highScore = level\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n x_change = -speed\n elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n x_change = speed\n elif event.key == pygame.K_UP or event.key == pygame.K_w:\n y_change = -speed\n elif event.key == pygame.K_DOWN or event.key == pygame.K_s:\n y_change = speed\n elif event.key == pygame.K_p:\n clock = 1\n elif event.key == pygame.K_u:\n clock = 3\n elif event.key == pygame.K_e:\n pygame.quit()\n quit()\n elif event.key == pygame.K_z and level % 2 == 1 and speed == 3 and do3 == True:\n lives += 1\n do3 = False\n elif event.key == pygame.K_z and level % 2 == 0 and speed >= 2 and do4 == True:\n wNum -= 1\n do4 = False\n elif event.key == pygame.K_r:\n do3 = True\n do4 = True\n clock = 3\n wNum = 10\n level = 1\n print('level:',level)\n x_change = 0\n y_change = 0\n x = random.randint(0,800)\n y = random.randint(0,600)\n bx = random.randint(0,800)\n by = random.randint(0,600)\n ix = random.randint(0,800)\n iy = random.randint(0,600)\n bix = random.randint(0,800)\n biy = random.randint(0,600)\n ity = random.randint(0,600)\n itx = random.randint(0,800)\n run = True\n lives = 5\n print(lives)\n ex = random.randint(0,800)\n ey = random.randint(0,600)\n elif event.key == pygame.K_g:\n b = False\n elif event.key == pygame.K_b:\n b = True\n elif event.key == pygame.K_c:\n os.system('clear')\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_UP or event.key == pygame.K_DOWN or event.key == pygame.K_a or event.key == pygame.K_s or event.key == pygame.K_d or event.key == pygame.K_w:\n x_change = 0\n y_change = 0\n if event.type == pygame.MOUSEBUTTONDOWN:\n Mouse_x, Mouse_y = pygame.mouse.get_pos()\n do5 = True\n if Mouse_x > x:\n x_change = speed\n elif Mouse_x < x:\n x_change = -speed\n elif event.type == pygame.MOUSEBUTTONUP:\n x_change = 0\n y_change = 0\n do5 = False\n elif do5 == True:\n if Mouse_y > y:\n y_change = speed\n elif Mouse_y < y:\n y_change = -speed \n if clock == 3:\n time.sleep(0.01)\n if bx < x:\n bx_change = 1\n if bx > x:\n bx_change = -1\n if by < y:\n by_change = 1\n if by > y:\n by_change = -1\n if ex < x:\n ex_change = -1\n if ex > x:\n ex_change = 1\n if ey < y:\n ey_change = -1\n if ey > y:\n ey_change = 1\n if x >= 800:\n x_change = -speed\n elif x <= 0:\n x_change = speed\n elif y >= 600:\n y_change = -speed\n elif y <= 0 :\n y_change = speed\n elif bx >= 800:\n bx_change = -1\n elif bx <= 0:\n bx_change = 1\n elif by >= 600:\n by_change = -1\n elif by <= 0:\n by_change = 1\n elif ex >= 800:\n ex_change = -1\n elif ex <= 0:\n ex_change = 1\n elif ey >= 600:\n ey_change = -1\n elif ey <= 0:\n ey_change = 1\n elif x + radius > sx - radius and y + radius >= sy - radius and x - radius <= sx + radius and sy - radius <= sy + radius and y - radius <= sy + radius:\n speed = 1\n COLOR = (255, 106, 0)\n elif x + radius >= ix - 15 and y + radius >= iy - 15 and x - radius <= ix + 15 and y - radius <= iy + 15:\n x = itx + 26\n y = ity + 26\n elif x + radius >= itx - 15 and y + radius >= ity - 15 and x - radius <= itx + 15 and y - radius <= ity + 15:\n x = ix + 26\n y = iy + 26\n elif (x + radius >= bx - radius and y + radius >= by - radius and x - radius <= bx + radius and y - radius <= by + radius) or (x + radius>= bix - 15 and y + radius >= biy - 15 and x - radius <= bix + 15 and y - radius <= biy + 15):\n lives-=1\n print(lives)\n x = random.randint(0,800)\n y = random.randint(0,600)\n bx = random.randint(0,800)\n by = random.randint(0,600)\n ex = random.randint(0,800)\n ey = random.randint(0,600)\n fx = random.randint(0,600)\n fy = random.randint(0,800)\n ix = random.randint(0,800)\n iy = random.randint(0,600)\n bix = random.randint(0,800)\n biy = random.randint(0,600)\n itx = random.randint(0,800)\n ity = random.randint(0,600)\n COLOR = (0, 255, 0)\n speed = 2\n do3 = True\n do4 = True\n time.sleep(0.1)\n elif x + radius > fx - radius and y + radius >= fy - radius and x - radius <= fx + radius and fy - radius <= fy + radius and y - radius <= fy + radius:\n speed = 3 \n COLOR = (255, 255, 0)\n elif x + radius >= ex - radius and y + radius >= ey - radius and x - radius <= ex + radius and y - radius <= ey + radius:\n lives+=1\n print(lives)\n x = random.randint(0,800)\n y = random.randint(0,600)\n bx = random.randint(0,800)\n by = random.randint(0,600)\n ex = random.randint(0,800)\n ey = random.randint(0,600)\n fx = random.randint(0,600)\n fy = random.randint(0,800)\n ix = random.randint(0,800)\n iy = random.randint(0,600)\n bix = random.randint(0,800)\n biy = random.randint(0,600)\n itx = random.randint(0,800)\n ity = random.randint(0,600)\n COLOR = (0, 255, 0)\n speed = 2\n do3 = True\n do4 = True\n time.sleep(0.1)\n if lives <= -1:\n print(\"\\033[31mYOU LOSE \\n\\033[0m\")\n level = 1\n x_change = 0\n y_change = 0\n x = random.randint(0,800)\n y = random.randint(0,600)\n bx = random.randint(0,800)\n by = random.randint(0,600)\n ix = random.randint(0,800)\n iy = random.randint(0,600)\n bix = random.randint(0,800)\n biy = random.randint(0,600)\n itx = random.randint(0,800)\n ity = random.randint(0,600)\n run = True\n BLACK = (0,0,0)\n lives = 5\n do3 = True\n do4 = True\n print('level:',level)\n print(lives)\n fx = random.randint(0,600)\n fy = random.randint(0,800)\n COLOR = (0, 255, 0)\n speed = 2\n time.sleep(0.1)\n elif lives >= wNum:\n print(\"\\033[32mYOU WIN \\n\\033[0m\")\n level+=1\n x_change = 0\n y_change = 0\n x = random.randint(0,800)\n y = random.randint(0,600)\n bx = random.randint(0,800)\n by = random.randint(0,600)\n ix = random.randint(0,800)\n iy = random.randint(0,600)\n bix = random.randint(0,800)\n biy = random.randint(0,600)\n run = True\n itx = random.randint(0,800)\n ity = random.randint(0,600)\n BLACK = (0,0,0)\n lives = 5\n print('level:',level)\n ex = random.randint(0,800)\n ey = random.randint(0,600)\n wNum += 1\n print(lives)\n fx = random.randint(0,600)\n fy = random.randint(0,800)\n do3 = True\n do4 = True\n COLOR = (0, 255, 0)\n speed = 2\n time.sleep(0.1)\n if b == True:\n bx = bx + bx_change\n by = by + by_change\n x += x_change\n y += y_change\n def lifeCircles(repLife):\n for i in range (repLife):\n pygame.draw.circle(screen, LIFE_COLOR,((i*20+10), 50), radius)\n def levelCircles(repLevel):\n for a in range (repLevel):\n pygame.draw.circle(screen, (255,255,255),((a*20+10), 100), radius)\n def highScoreCircles(repHighScore):\n for h in range (repHighScore):\n pygame.draw.circle(screen, (0, 255, 238), ((h*20+10), 150), radius)\n screen.fill(BLACK)\n screen.blit(text, textRect)\n lifeCircles(lives)\n levelCircles(level)\n highScoreCircles(highScore)\n pygame.draw.circle(screen,SLOW_COLOR,(sx,sy),radius)\n pygame.draw.circle(screen,ISLAND_COLOR,(itx,ity), 15)\n pygame.draw.circle(screen, COLOR,(x,y), radius)\n pygame.draw.circle(screen, END_COLOR, (ex,ey),radius)\n pygame.draw.circle(screen, BAD_COLOR,(bx,by), radius)\n pygame.draw.circle(screen, (50,50,50),(fx, fy), radius)\n pygame.draw.circle(screen, ISLAND_COLOR,(ix,iy), 15)\n pygame.draw.circle(screen, BAD_ISLAND_COLOR,(bix, biy), 15)\n if level % 2 == 0:\n rectangle = pygame.Rect((x - 10, y - 10), (20, 20))\n pygame.draw.rect(screen, COLOR, rectangle)\n pygame.display.update()\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7346938848495483, "avg_line_length": 23.5, "blob_id": "9b56fb3e549e9adc47bc2441e0be7849eb294474", "content_id": "f8a91a7b6604c268b2652b3d00c90705546875aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/README.md", "repo_name": "Zachary-Summers/Special-Circles-2", "src_encoding": "UTF-8", "text": "# Special-Circles-2\nCreated by Zachary, Dane, and Ethan\n" } ]
2
JinSeal/GA-SEI34-Project3-FreeSpirits
https://github.com/JinSeal/GA-SEI34-Project3-FreeSpirits
5264bbcde677f915eb1d56743f6d20e3a13626bc
febf2ddd6b6c4d4bdd6372a9436ab0513a3ffbcd
384308f9009c7eedea85409e4122ed445c9c8e0b
refs/heads/master
2020-11-25T07:23:07.896717
2020-01-24T07:08:57
2020-01-24T11:20:42
228,549,012
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41969752311706543, "alphanum_fraction": 0.42978033423423767, "avg_line_length": 27.336734771728516, "blob_id": "eb1a3a7c25903fdfd69916eb24528aa8d1d4045d", "content_id": "03dc5a57155027f7396e1b5fa299633fde825700", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5554, "license_type": "no_license", "max_line_length": 80, "num_lines": 196, "path": "/components/DonationForm.js", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import React, { Component } from \"react\";\nimport Router from \"next/router\";\nimport gql from \"graphql-tag\";\nimport {\n FormField,\n TextInputField,\n Autocomplete,\n TextInput,\n Checkbox,\n Button,\n Heading,\n Combobox\n} from \"evergreen-ui\";\nimport styled from \"styled-components\";\nimport StripeCheckout from \"react-stripe-checkout\";\nimport { Query, Mutation } from \"react-apollo\";\nimport _ from \"underscore\";\n\nconst Styles = styled.div`\n background-color: white;\n padding: 2rem 5rem;\n`;\n\nconst ALL_CATS_QUERY = gql`\n query ALL_CATS_QUERY {\n allCats {\n id\n name\n image\n iucnStatus\n }\n }\n`;\n\nconst CREATE_DONATION_MUTATION = gql`\n mutation CREATE_DONATION_MUTATION(\n $amount: Int!\n $email: String!\n $stripetoken: String!\n $cat: String!\n ) {\n createDonation(\n input: {\n amount: $amount\n email: $email\n stripetoken: $stripetoken\n cat: $cat\n }\n ) {\n ok\n donation {\n id\n }\n }\n }\n`;\n\nclass DonationForm extends Component {\n state = {\n email: \"\",\n updates: false,\n gift: this.props.amount || 20,\n allocation: \"\"\n };\n\n saveToState = e => {\n this.setState({ [e.target.name]: e.target.value });\n };\n\n onToken = async (res, createDonation) => {\n const donation = await createDonation({\n variables: {\n amount: Number(this.state.gift),\n stripetoken: res.id,\n email: res.email,\n cat: this.state.allocation\n }\n })\n .then(res => {\n Router.push({\n pathname: \"/thankyou\"\n });\n })\n .catch(err => {\n alert(err.message);\n });\n };\n\n render() {\n return (\n <Query query={ALL_CATS_QUERY}>\n {({ data }) => {\n const catsName = _.pluck(data.allCats, \"name\");\n let cat = _.where(data.allCats, { name: this.state.allocation })[0];\n let image = cat ? cat.image : \"/image/stripe.jpg\";\n return (\n <Styles>\n <FormField label=\"\">\n <Heading size={500} marginTop=\"default\">\n Email\n </Heading>\n <TextInput\n placeholder=\"Enter your email\"\n width={400}\n value={this.state.email}\n label=\"email\"\n onChange={e => this.setState({ email: e.target.value })}\n />\n <Heading size={500} marginTop=\"default\">\n Gift Amount *\n </Heading>\n\n <Autocomplete\n height={50}\n items={[25.0, 50.0, 100.0, 200.0, 500.0]}\n onChange={changedItem => this.setState({ gift: changedItem })}\n initialInputValue={this.state.gift.toString()}\n >\n {props => {\n const {\n getInputProps,\n getRef,\n inputValue,\n openMenu\n } = props;\n return (\n <TextInput\n placeholder=\"Choose or Enter an Amount\"\n width={400}\n value={inputValue}\n innerRef={getRef}\n label=\"Gift Amount\"\n {...getInputProps({\n onFocus: () => {\n openMenu();\n }\n })}\n />\n );\n }}\n </Autocomplete>\n <Heading size={500} marginTop=\"default\">\n Allocation\n </Heading>\n <Combobox\n name=\"allocation\"\n openOnFocus\n width={400}\n items={[...catsName]}\n onChange={selected => this.setState({ allocation: selected })}\n placeholder=\"Select a Cat\"\n />\n <div style={{ display: \"flex\", alignItems: \"center\" }}>\n <Checkbox\n name=\"updates\"\n checked={this.state.update}\n onChange={e => this.setState({ update: e.target.checked })}\n />\n <Heading size={500} marginTop={0} marginLeft={10}>\n Keep me updated on Free Spirits news\n </Heading>\n </div>\n <Mutation mutation={CREATE_DONATION_MUTATION}>\n {createDonation => (\n <StripeCheckout\n amount={this.state.gift * 100}\n email={this.state.email}\n name=\"Free Spirits\"\n description={`Donate to save wild cats`}\n image={image}\n stripeKey=\"pk_test_KiZyYKiQtlmrqhtoGEbkdtuR00es4lCEgx\"\n currency=\"AUD\"\n token={res => this.onToken(res, createDonation)}\n >\n <Button\n height={50}\n marginTop={30}\n appearance=\"primary\"\n intent=\"success\"\n >\n Complete this Transaction\n </Button>\n </StripeCheckout>\n )}\n </Mutation>\n </FormField>\n </Styles>\n );\n }}\n </Query>\n );\n }\n}\n\nexport default DonationForm;\nexport { ALL_CATS_QUERY };\n" }, { "alpha_fraction": 0.7601712942123413, "alphanum_fraction": 0.7601712942123413, "avg_line_length": 26.52941131591797, "blob_id": "96bab38635304f1d54f8868bf4bcc68e899a98f3", "content_id": "e23c6a79f58b9de06bf953bedf36c847f578713d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 70, "num_lines": 17, "path": "/freespirits/schema.py", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import graphene\nimport freespirits.back.schema\n\n\nclass Query(freespirits.back.schema.Query, graphene.ObjectType):\n # This class will inherit from multiple Queries\n # as we begin to add more apps to our project\n pass\n\n\nclass Mutation(freespirits.back.schema.Mutation, graphene.ObjectType):\n # This class will inherit from multiple Queries\n # as we begin to add more apps to our project\n pass\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)" }, { "alpha_fraction": 0.5871056318283081, "alphanum_fraction": 0.6117969751358032, "avg_line_length": 29.375, "blob_id": "b70c29d9d8b9d952406743362fe281fa57ca0bd4", "content_id": "9a88e58f239e16c8c141d192118c87bff168c383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "no_license", "max_line_length": 139, "num_lines": 24, "path": "/freespirits/back/migrations/0002_auto_20200120_0746.py", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0 on 2020-01-20 07:46\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('back', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='donation',\n name='cat',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='back.Cat', verbose_name='Cat'),\n ),\n migrations.AlterField(\n model_name='photo',\n name='cat',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='back.Cat', verbose_name='Cat'),\n ),\n ]\n" }, { "alpha_fraction": 0.43230241537094116, "alphanum_fraction": 0.4481099545955658, "avg_line_length": 21.045454025268555, "blob_id": "1a959c406de152862db3d32ce4995ab30598a2f0", "content_id": "8f5e2af364bcae1363858551353fdb6a6059358c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 114, "num_lines": 66, "path": "/components/NavCat.js", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport styled from 'styled-components';\nimport Link from 'next/link'\nimport PropTypes from 'prop-types';\n\nconst iucnDic = {\n \"A_1\": \"Extinct\",\n \"A_2\": \"Extinct in the wild\",\n \"A_3\": \"Creitically endangered\",\n \"A_4\": \"Endangered\",\n \"A_5\": \"Vulnerable\",\n \"A_6\": \"Near threatened\",\n \"A_7\": \"Least concern\"\n}\n\nconst Style = styled.div`\n display: grid;\n grid-column: 1;\n\n &:hover img {\n border: 2px solid white;\n transform: scale(1.2);\n\n }\n\n div {\n height: 100px;\n margin: 2rem 3rem;\n }\n\n p {\n padding: 0;\n margin: 0;\n text-align: center;\n }\n\n\n`;\n\n\nexport default class NavCat extends Component {\n static propTypes = {\n cat: PropTypes.object.isRequired\n }\n\n render() {\n const { cat } = this.props\n return (\n <div onClick={() => this.props.openDrawer()}>\n <Link href={{\n pathname: '/cat',\n query: { name: cat.name }\n }}>\n\n <Style >\n <div>\n <img style={{ height: \"100%\", borderRadius: \"50%\" }} src={cat.image} alt={cat.name} />\n </div>\n <p>{cat.name}</p>\n <p>{iucnDic[cat.iucnStatus]}</p>\n </Style>\n </Link>\n </div>\n )\n }\n}\n" }, { "alpha_fraction": 0.4810126721858978, "alphanum_fraction": 0.6877636909484863, "avg_line_length": 15.928571701049805, "blob_id": "ad5a0b507dabe0da7fde3abd185933b094ba54b6", "content_id": "d33ce2417a96f8724c3668c9b8ec56157fb00776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 711, "license_type": "no_license", "max_line_length": 26, "num_lines": 42, "path": "/requirements.txt", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "aniso8601==7.0.0\nasgiref==3.2.3\nastroid==2.3.3\nautopep8==1.4.4\ncertifi==2019.11.28\nchardet==3.0.4\nDateTime==4.3\ndj-database-url==0.5.0\ndj-stripe==2.1.1\nDjango==3.0\ndjango-cors-headers==3.2.0\ndjango-graphql-jwt==0.3.0\ndjango-heroku==0.3.1\ngraphene==2.1.8\ngraphene-django==2.7.1\ngraphql-core==2.2.1\ngraphql-relay==2.0.1\ngunicorn==20.0.4\nidna==2.8\nisort==4.3.21\njsonfield==2.0.2\nlazy-object-proxy==1.4.3\nmccabe==0.6.1\npep8==1.7.1\npromise==2.2.1\npsycopg2==2.8.4\npycodestyle==2.5.0\nPyJWT==1.7.1\npylint==2.4.4\npytz==2019.3\nrequests==2.22.0\nRx==1.6.1\nsingledispatch==3.4.0.3\nsix==1.13.0\nsqlparse==0.3.0\nstripe==2.41.0\ntyped-ast==1.4.1\nurllib3==1.25.7\nwhitenoise==5.0.1\nwrapt==1.11.2\nyapf==0.29.0\nzope.interface==4.7.1\n" }, { "alpha_fraction": 0.6350092887878418, "alphanum_fraction": 0.6384676694869995, "avg_line_length": 32.84684753417969, "blob_id": "0eaff7006d246e40d0e2e49f063680946353f406", "content_id": "c1bc809859d43c1252152af3e35c4172c32b8cdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3759, "license_type": "no_license", "max_line_length": 163, "num_lines": 111, "path": "/freespirits/back/schema.py", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "from django.core.mail import EmailMultiAlternatives\nfrom django.contrib.auth import get_user_model, login, logout, authenticate\nfrom datetime import datetime\nimport graphene\nfrom graphene_django.types import DjangoObjectType\nfrom graphql.error import GraphQLError\nimport stripe\nfrom freespirits.back.models import Cat, Photo, Donation\n\nclass CatType(DjangoObjectType):\n class Meta:\n model = Cat\nclass PhotoType(DjangoObjectType):\n class Meta:\n model = Photo\nclass DonationType(DjangoObjectType):\n class Meta:\n model = Donation\n\nclass Query(graphene.ObjectType):\n all_cats = graphene.List(CatType)\n all_photos = graphene.List(PhotoType)\n\n cat = graphene.Field(CatType, id=graphene.Int(), name=graphene.String())\n\n def resolve_all_cats(self, info, **kwargs):\n return Cat.objects.all()\n\n def resolve_cat(self, info, **kwargs):\n name = kwargs.get('name')\n\n if name is not None:\n return Cat.objects.get(name=name)\n\n return None\n\n\nclass DonationInput(graphene.InputObjectType):\n amount = graphene.Int()\n email = graphene.String()\n stripetoken = graphene.String()\n cat = graphene.String()\n\nclass CreateDonation(graphene.Mutation):\n class Arguments:\n input = DonationInput(required=True)\n\n ok = graphene.Boolean()\n donation = graphene.Field(DonationType)\n\n @staticmethod\n def mutate(root, info, input=None):\n stripe.api_key = \"sk_test_NyaCo4VX3cDyY5TBu9WA2ZhW00SAssAE9N\"\n try:\n charge = stripe.Charge.create(\n amount=input.amount*100, \n currency=\"AUD\",\n source=input.stripetoken\n )\n\n ok = True\n date = datetime.now()\n cat = Cat.objects.get(name=input.cat)\n donation = Donation(amount=input.amount, email=input.email, stripetoken=input.stripetoken, cat=cat, date=date)\n donation.save()\n\n\n subject = 'Thank your for your gift to Free Spirits'\n from_email = \"[email protected]\"\n to = input.email\n text_content = \"Your donation of \" + str(input.amount) + \" dollors may be tax deductible.\"\n html_content = '<h1><strong>Thank You</strong> for this donation</h1><p>Your donation of ' + str(input.amount) + ' dollors may be tax deductible.</p>'\n\n message = EmailMultiAlternatives(subject, text_content, from_email, [to])\n message.attach_alternative(html_content, \"text/html\")\n message.send()\n\n return CreateDonation(ok=ok, donation=donation)\n\n except stripe.error.CardError as e:\n body = e.json_body\n err = body.get(\"error\", {})\n raise GraphQLError(err.get('message'))\n \n\n except stripe.error.RateLimitError as e:\n raise GraphQLError(\"Rate limit error\")\n \n\n except stripe.error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n raise GraphQLError(\"Invalid parameters\")\n \n\n except stripe.error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you changed API keys recently)\n raise GraphQLError(\"Not authenticated\")\n\n except stripe.error.APIConnectionError as e:\n # Network communication with Stripe failed\n raise GraphQLError(\"Network error\")\n\n except stripe.error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n raise GraphQLError(\"Something went wrong. You were not charged. Please try again.\")\n\n\nclass Mutation(graphene.ObjectType):\n create_donation = CreateDonation.Field()\n\n\n" }, { "alpha_fraction": 0.6264591217041016, "alphanum_fraction": 0.6361867785453796, "avg_line_length": 17.35714340209961, "blob_id": "e180a1a89313af863eac5748eac0b4fe4e7539df", "content_id": "22036dcfaa3ab2327656f881201e155151ad8234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 514, "license_type": "no_license", "max_line_length": 76, "num_lines": 28, "path": "/pages/thankyou.js", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport styled from \"styled-components\";\nimport Link from \"next/link\";\n\nconst Message = styled.div`\n margin: 10em auto;\n text-align: center;\n`;\n\nconst Text = styled.h1`\n color: white;\n\n a {\n color: white;\n text-decoration: underline;\n }\n`;\n\nconst ThankYou = props => (\n <Message>\n <h1>Thank you for your gift to Free Spirits.</h1>\n <Text>\n You will receive an email soon. <a href=\"/\"> Back to Homepage</a>{\" \"}\n </Text>\n </Message>\n);\n\nexport default ThankYou;\n" }, { "alpha_fraction": 0.6216640472412109, "alphanum_fraction": 0.6305599212646484, "avg_line_length": 25.150684356689453, "blob_id": "0d23f360be84ac0de0cec75be2b45787d0f5371a", "content_id": "4afa47956a1edb9b9da7d513ed22244fa23d2ee3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1911, "license_type": "no_license", "max_line_length": 91, "num_lines": 73, "path": "/freespirits/back/models.py", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom datetime import date, datetime\nfrom django.conf import settings\n\n\n\nclass Cat(models.Model):\n EXTINCT = 1\n EXTINCT_IN_THE_WILD = 2\n CRITICALLY_ENDANGERED = 3\n ENDANGERED = 4\n VULNERABLE = 5\n NEAR_THREATENED = 6\n LEAST_CONCERN = 7\n IUCN_RED_LIST_STATUS = [\n (EXTINCT, 'Extinct'),\n (EXTINCT_IN_THE_WILD, 'Extinct in the wild'),\n (CRITICALLY_ENDANGERED, 'Critically endangered'),\n (ENDANGERED, 'Endangered'),\n (VULNERABLE, 'Vulnerable'),\n (NEAR_THREATENED, 'Near threatened'),\n (LEAST_CONCERN, 'Least concern'),\n ]\n\n\n name = models.CharField(max_length=100)\n description = models.TextField()\n habitat = models.TextField()\n iucn_status = models.IntegerField(\n choices=IUCN_RED_LIST_STATUS,\n )\n number = models.TextField()\n subspecies = models.IntegerField()\n life_span = models.TextField()\n size = models.TextField()\n weight = models.TextField()\n diet = models.TextField()\n image = models.URLField()\n bg = models.URLField()\n bg2 = models.URLField()\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ('iucn_status',)\n \n\nclass Photo(models.Model):\n title = models.CharField(max_length=100)\n url = models.CharField(max_length=100)\n cat = models.ForeignKey(Cat, on_delete=models.CASCADE, verbose_name = \"Cat\", null=True)\n \n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ('title',)\n\n\nclass Donation(models.Model):\n amount = models.FloatField()\n date = models.DateField(default=date.today, blank=True)\n email = models.EmailField()\n stripetoken = models.TextField()\n cat = models.ForeignKey(Cat, on_delete=models.CASCADE, verbose_name = \"Cat\", null=True)\n\n def __str__(self):\n return self.email\n\n class Meta:\n ordering = ['-date','email']\n\n\n" }, { "alpha_fraction": 0.7862993478775024, "alphanum_fraction": 0.7862993478775024, "avg_line_length": 42.32258224487305, "blob_id": "3a7720d1e621264eaab169ac94eb52703085d9f5", "content_id": "40d61f702e3804faee1ec15b3cff7177ec6b7b50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1343, "license_type": "no_license", "max_line_length": 225, "num_lines": 31, "path": "/README.md", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "# Free Spirits\n\nThis is a website for a fictional charity Free Spirits. It is built with Django, GraphQL, Apollo, and Next.js, styled with EvergreenUI and CSS in JS, and backed with Stripe payments and Mailtrap.\n\nDuring the original one-week programming period, I spent most time studing Django, GraphQL, Apollo and Next.js, given I have no experience in any of them. I found things can get really complicated when implete them together.\nFor example:\n\n- Django's built-in administration application is backed by its own authentication token. But GraphGL only provides authentication with JWT. When both token are present, Neither django admin app nor graphql function properly.\n- Next.js often throw out unexpected bahaviors.\n- I came across so many issues while deploying it to heroku.\n\n_Lessons Learned:_\n\n- Study only one new framework each time.\n- Use a framework only if you have a great understanding of the underlying framework.\n- And again, implement the deployment process as early as possible!\n\nDEMO:\n![Demo](FS.gif)\n\n## Directory Layout\n\n- `backend`: Django application\n- `frontend`: Next application\n- `public`: Static resources\n\n## Potential Improvements\n\n- Use microservices to have one server dealing with GraphQL requests and another server for authentication and authorisation.\n- Responsive design.\n- Create a CI/CD pipeline.\n" }, { "alpha_fraction": 0.7176165580749512, "alphanum_fraction": 0.7176165580749512, "avg_line_length": 26.35714340209961, "blob_id": "e7cb3ae8094c5f345290f0bffed784e21c94839e", "content_id": "37c7bacf894973936c3d7d860b5b5106836e07c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 66, "num_lines": 14, "path": "/freespirits/back/admin.py", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom freespirits.back.models import Cat, Photo, Donation\n\nclass CatAdmin(admin.ModelAdmin):\n\n list_display = ('name', 'iucn_status', 'number', 'subspecies')\n list_filter = ['iucn_status']\n search_fields = ['name']\n \n# Register your models here.\n\nadmin.site.register(Cat, CatAdmin)\nadmin.site.register(Photo)\nadmin.site.register(Donation)\n\n\n\n" }, { "alpha_fraction": 0.5174024701118469, "alphanum_fraction": 0.5330721735954285, "avg_line_length": 25.54800033569336, "blob_id": "c6e4254643f867edc1a9b0d79df56d96e64fafd5", "content_id": "06cd5a81e699f735ad4b25779297f5f097243c47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6641, "license_type": "no_license", "max_line_length": 80, "num_lines": 250, "path": "/components/Home.js", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import React, { Component, useState } from \"react\";\nimport PropTypes from \"prop-types\";\nimport styled from \"styled-components\";\nimport DonationForm from \"./DonationForm\";\nimport {\n Button,\n TextInput,\n Autocomplete,\n SideSheet,\n Pane,\n Heading\n} from \"evergreen-ui\";\n\nconst Center = styled.div`\n text-align: center;\n`;\n\nclass Home extends Component {\n static propTypes = {\n getInputProps: PropTypes.func,\n getRef: PropTypes.string,\n inputValue: PropTypes.number,\n openMenu: PropTypes.func\n };\n render() {\n return (\n <Center>\n <div\n style={{\n width: \"100%\",\n height: \"70vh\",\n background: 'url(\"/image/1.jpg\")',\n backgroundRepeat: \"no-repeat\",\n backgroundAttachment: \"fixed\",\n backgroundSize: \"cover\"\n }}\n ></div>\n <Paragraph />\n <Donation />\n <NewsCenter />\n </Center>\n );\n }\n}\n\nconst ParagraphStyles = styled.div`\n display: grid;\n grid-template-columns: 30% auto;\n grid-column-gap: 5rem;\n padding: 10rem 20rem;\n background: ${props => props.theme.lightgrey};\n color: ${props => props.theme.black};\n\n @media (max-width: 1300px) {\n padding: 3rem 10rem;\n }\n\n div {\n text-align: left;\n padding: 2rem;\n }\n`;\n\nconst Paragraph = props => (\n <ParagraphStyles>\n <div>\n <h1>Why Protect Big Cats?</h1>\n </div>\n <div>\n <p>\n Around the world, big cats are among the most recognized and admired\n animals, at the top of the food chain. Yet all seven species are listed\n as Threatened or Near Threatened on the IUCN Red List, with the tiger\n categorized as Endangered. WCS is in a unique position to help—we work\n to conserve all seven.\n </p>\n <p>\n In addition to habitat degradation and loss of prey, many of these\n iconic predators are hunted directly for their fur, bones, or other body\n parts. They are also threatened by conflicts with people—their need for\n space leads them to range outside protected areas and to become a real\n or perceived threat to local people and their livestock.\n </p>\n <Button appearance=\"primary\" marginRight={16} intent=\"warning\">\n Read More\n </Button>\n </div>\n </ParagraphStyles>\n);\n\nconst DonationStyles = styled.div`\n padding: 5rem 20rem;\n background: url(\"/image/donationbg.jpg\");\n background-repeat: no-repeat;\n background-attachment: fixed;\n background-size: cover;\n min-height: 50vh;\n color: white;\n`;\n\nconst Donation = () => {\n const [isShown, setIsShown] = useState(false);\n const [amount, setAmount] = useState(0);\n\n return (\n <>\n <DonationStyles>\n <form style={{ textAlign: \"left\", paddingLeft: \"2rem\" }}>\n <Autocomplete\n height={50}\n onChange={changedItem => setAmount(changedItem)}\n items={[25, 50, 100, 200, 500]}\n >\n {props => {\n const { getInputProps, getRef, inputValue, openMenu } = props;\n return (\n <div>\n <h1>Donate</h1>\n <TextInput\n placeholder=\"Choose or Enter an Amount\"\n value={inputValue}\n innerRef={getRef}\n {...getInputProps({\n onFocus: () => {\n openMenu();\n }\n })}\n />\n <h1>to save the world's wild cats.</h1>\n </div>\n );\n }}\n </Autocomplete>\n <Button\n height={50}\n marginRight={16}\n appearance=\"primary\"\n intent=\"success\"\n iconBefore=\"heart\"\n onClick={e => {\n e.preventDefault();\n setIsShown(true);\n }}\n >\n Donate!\n </Button>\n </form>\n </DonationStyles>\n <SideSheet\n isShown={isShown}\n onCloseComplete={() => setIsShown(false)}\n containerProps={{\n display: \"flex\",\n flex: \"1\",\n flexDirection: \"column\"\n }}\n >\n <Pane zIndex={1} flexShrink={0} elevation={0} backgroundColor=\"white\">\n <Pane padding={16}>\n <Heading size={600}>Donation</Heading>\n </Pane>\n </Pane>\n\n <Pane flex=\"1\" overflowY=\"scroll\" background=\"tint1\" padding={16}>\n <img\n style={{ width: \"100%\", margin: \"0 auto\" }}\n src=\"/image/cubs.jpeg\"\n />\n <DonationForm amount={amount} />\n </Pane>\n </SideSheet>\n </>\n );\n};\n\nconst NewsCenterStyles = styled.div`\n padding: 10rem 20rem;\n text-align: left;\n background: ${props => props.theme.lightgrey};\n color: ${props => props.theme.black};\n min-height: 50vh;\n\n h1 {\n color: ${props => props.theme.darkBlue};\n font-size: 3.5rem;\n padding-bottom: 2rem;\n }\n\n .columns {\n display: grid;\n grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));\n grid-gap: 5rem;\n }\n\n img {\n width: 100%;\n }\n`;\n\nconst NewsCenter = () => (\n <NewsCenterStyles>\n <h1>News Center</h1>\n <div className=\"columns\">\n <div>\n <div>\n <img src=\"/image/news.jpg\" alt=\"news-image\" />\n </div>\n <div>\n <h4>Caracal</h4>\n <p>\n Caracals (Caracal caracal) are found across much of southern and\n central Africa. They have long powerful legs that enable them to\n leap as high as 10 feet, and hunt birds on the wing.\n </p>\n <p>Soruce: BBC Wildlife</p>\n </div>\n </div>\n <div>\n <div>\n <img src=\"/image/news.jpg\" alt=\"news-image\" />\n </div>\n <div>\n <h4>Caracal</h4>\n <p>\n Caracals (Caracal caracal) are found across much of southern and\n central Africa. They have long powerful legs that enable them to\n leap as high as 10 feet, and hunt birds on the wing.\n </p>\n <p>Soruce: BBC Wildlife</p>\n </div>\n </div>\n <div>\n <div>\n <img src=\"/image/news.jpg\" alt=\"news-image\" />\n </div>\n <div>\n <h4>Caracal</h4>\n <p>\n Caracals (Caracal caracal) are found across much of southern and\n central Africa. They have long powerful legs that enable them to\n leap as high as 10 feet, and hunt birds on the wing.\n </p>\n <p>Soruce: BBC Wildlife</p>\n </div>\n </div>\n </div>\n </NewsCenterStyles>\n);\n\nexport default Home;\n" }, { "alpha_fraction": 0.49885058403015137, "alphanum_fraction": 0.5118774175643921, "avg_line_length": 39.78125, "blob_id": "227b503a3fd6a18ee4bfa93bc7a0b0e3fdbc5878", "content_id": "3d35fcba1ab43a8c682ac2107a54432c6f45bb84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2610, "license_type": "no_license", "max_line_length": 221, "num_lines": 64, "path": "/freespirits/back/migrations/0001_initial.py", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0 on 2020-01-20 07:45\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cat',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('description', models.TextField()),\n ('habitat', models.TextField()),\n ('iucn_status', models.IntegerField(choices=[(1, 'Extinct'), (2, 'Extinct in the wild'), (3, 'Critically endangered'), (4, 'Endangered'), (5, 'Vulnerable'), (6, 'Near threatened'), (7, 'Least concern')])),\n ('number', models.TextField()),\n ('subspecies', models.IntegerField()),\n ('life_span', models.TextField()),\n ('size', models.TextField()),\n ('weight', models.TextField()),\n ('diet', models.TextField()),\n ('image', models.URLField()),\n ('bg', models.URLField()),\n ('bg2', models.URLField()),\n ],\n options={\n 'ordering': ('iucn_status',),\n },\n ),\n migrations.CreateModel(\n name='Photo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=100)),\n ('url', models.CharField(max_length=100)),\n ('cat', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='back.Cat', verbose_name='Cat')),\n ],\n options={\n 'ordering': ('title',),\n },\n ),\n migrations.CreateModel(\n name='Donation',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('amount', models.FloatField()),\n ('date', models.DateField(blank=True, default=datetime.date.today)),\n ('email', models.EmailField(max_length=254)),\n ('stripetoken', models.TextField()),\n ('cat', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='back.Cat', verbose_name='Cat')),\n ],\n options={\n 'ordering': ['-date', 'email'],\n },\n ),\n ]\n" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 19.66666603088379, "blob_id": "4c7e216cfc2b385cd8028ac3e8daf32c9fd0e6ab", "content_id": "dc342c2b6d7a65866606eeb9912ca60c56d0c63c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 62, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/pages/cat.js", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import Cat from '../components/SingleCat'\n\nexport default Cat\n" }, { "alpha_fraction": 0.6235741376876831, "alphanum_fraction": 0.6463878154754639, "avg_line_length": 18.481481552124023, "blob_id": "ad2493bb2cad0af1123bc325e939a97cf187c277", "content_id": "72b1902635f3e0fada91bb4e66e313f4dd8b7a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 526, "license_type": "no_license", "max_line_length": 38, "num_lines": 27, "path": "/components/Logo.js", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import styled from 'styled-components'\n\nconst LogoStyles = styled.h1`\n font-size: 4rem;\n margin: 1rem 2rem;\n position: relative;\n z-index: 2;\n background-image: url(\"/logo.png\");\n background-size: contain;\n background-repeat: no-repeat;\n a {\n padding-left: 9ex;\n color: white;\n text-transform: uppercase;\n text-decoration: none;\n }\n @media (max-width: 1300px) {\n margin: 0;\n text-align: center;\n background-position: center;\n a {\n padding-left: 0;\n }\n }\n`\n\nexport default LogoStyles\n" }, { "alpha_fraction": 0.3678891062736511, "alphanum_fraction": 0.3840600550174713, "avg_line_length": 22.086666107177734, "blob_id": "2ec7030fccf90e440b25d8204835019fd55f4971", "content_id": "aa1fe3c00eb5be1e89c1a1e4e23ad83b6cfb6763", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3463, "license_type": "no_license", "max_line_length": 62, "num_lines": 150, "path": "/components/SingleCat.js", "repo_name": "JinSeal/GA-SEI34-Project3-FreeSpirits", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { Query } from 'react-apollo'\nimport gql from 'graphql-tag'\nimport styled from 'styled-components'\n\nconst SINGLE_CAT_QUERY = gql`\n query SINGLE_CATS_QUERY($name: String!) {\n cat(name: $name) {\n id\n name\n description\n habitat\n iucnStatus\n number\n subspecies\n lifeSpan\n size\n weight\n diet\n bg\n bg2\n photoSet {\n id\n url\n }\n }\n }\n`\n\nconst Style = styled.div`\n .gallary {\n width: 100%;\n img {\n width: 100%;\n height: auto;\n }\n }\n .fact {\n padding: 5rem;\n display: flex;\n flex-wrap: wrap;\n justify-content: space-around;\n \n h1 {\n font-size: 8rem;\n }\n\n table {\n min-width: 600px;\n max-width: 900px;\n border-top: 2px solid white;\n border-bottom: 2px solid white;\n }\n\n table td {\n padding: 0.8em 3em;\n border-bottom: 1px solid rgba(255, 255, 255, 0.5);\n font-family: Arial, Helvetica, sans-serif;\n color: rgba(255, 255, 255, 0.5);\n }\n\n table tr td:last-child {\n color: white; \n min-width: 500px;\n }\n }\n\n .links {\n background-color: ${props => props.theme.lightgrey}\n }\n \n }\n`\n\nclass Cat extends Component {\n render () {\n return (\n <Query\n query={SINGLE_CAT_QUERY}\n variables={{ name: this.props.query.name }}\n >\n {({ data, error, loading }) => {\n if (loading) return <p>Loading...</p>\n if (error) return <p>Error: {error.message}</p>\n const cat = data.cat\n\n return (\n <Style>\n <div className=\"gallary\">\n <img src={cat.bg} />\n </div>\n\n <div className=\"fact\">\n <h1>{cat.name}</h1>\n <table>\n <tbody>\n <tr>\n <td>IUCN RED LIST STATUS</td>\n <td>{iucnDic[cat.iucnStatus]}</td>\n </tr>\n <tr>\n <td>LEFT IN THE WILD</td>\n <td>{cat.number}</td>\n </tr>\n <tr>\n <td>RANGE</td>\n <td>{cat.habitat}</td>\n </tr>\n <tr>\n <td>SUB-SPECIES</td>\n <td>{cat.subspecies}</td>\n </tr>\n <tr>\n <td>LIFE SPAN</td>\n <td>{cat.lifeSpan}</td>\n </tr>\n <tr>\n <td>SIZE</td>\n <td>{cat.size}</td>\n </tr>\n <tr>\n <td>WEIGHT</td>\n <td>{cat.weight}</td>\n </tr>\n <tr>\n <td>DIET</td>\n <td>{cat.diet}</td>\n </tr>\n </tbody>\n </table>\n </div>\n </Style>\n )\n }}\n </Query>\n )\n }\n}\n\nexport default Cat\n\nconst iucnDic = {\n A_1: 'Extinct',\n A_2: 'Extinct in the wild',\n A_3: 'Creitically endangered',\n A_4: 'Endangered',\n A_5: 'Vulnerable',\n A_6: 'Near threatened',\n A_7: 'Least concern'\n}\n" } ]
15
datosh/PyEngine
https://github.com/datosh/PyEngine
9450a05f2770bb2505adef9898994cab7338d21f
0e70588ce6e99ec0d13f3b292292cf99335099b3
ef5fb9bc9b1ab972cb0ab257596a13e424c8e0ac
refs/heads/master
2020-06-04T20:34:56.007756
2015-05-12T17:56:15
2015-05-12T17:56:15
35,503,939
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47589877247810364, "alphanum_fraction": 0.5086248517036438, "avg_line_length": 28.398103713989258, "blob_id": "dba4e178eaedd579d9cf18c7ae49a15b378a0de0", "content_id": "cd36d4912ff84c029dffd92e0898938530a7962c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6203, "license_type": "no_license", "max_line_length": 77, "num_lines": 211, "path": "/mace_runner.py", "repo_name": "datosh/PyEngine", "src_encoding": "UTF-8", "text": "import math\nimport pygame\nimport game\n\nfrom pygame.locals import *\n\n\nclass Player(pygame.sprite.Sprite):\n\n \"\"\"A player that is going to run around in the maze.\"\"\"\n\n def __init__(self, x, y, width, height):\n super(Player, self).__init__()\n\n # Set the visuals and the position\n self.width = width\n self.height = height\n self.color = pygame.Color('red')\n self.image = pygame.Surface([self.width, self.height])\n self.image.fill(self.color)\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n\n # Movement related varables\n self.old_x = 0\n self.old_y = 0\n self.x_dir = 0 # Should either be 1, 0 or -1\n self.y_dir = 0 # Should either be 1, 0 or -1\n self.speed = .2\n\n # Collision detections\n self.collider_list = []\n\n def update(self, delta):\n # Make sure delta x and y are in range [-1, 1]\n if self.x_dir:\n self.x_dir = math.copysign(1, self.x_dir)\n if self.y_dir:\n self.y_dir = math.copysign(1, self.y_dir)\n\n # Move the player\n self.old_x = self.rect.x\n self.old_y = self.rect.y\n\n # Move and check for collision in x direction\n self.rect.x = self.rect.x + self.x_dir * int(self.speed * delta)\n coll = pygame.sprite.spritecollide(self, self.collider_list, False)\n if coll:\n coll = coll[0]\n if self.x_dir > 0:\n self.rect.right = coll.rect.left\n else:\n self.rect.left = coll.rect.right\n\n # Move and check for collision in y direction\n self.rect.y = self.rect.y + self.y_dir * int(self.speed * delta)\n coll = pygame.sprite.spritecollide(self, self.collider_list, False)\n if coll:\n coll = coll[0]\n if self.y_dir > 0:\n self.rect.bottom = coll.rect.top\n else:\n self.rect.top = coll.rect.bottom\n\n\nclass Wall(pygame.sprite.Sprite):\n\n \"\"\"Boundrys for the levels\"\"\"\n\n def __init__(self, x, y, width, height, color='blue'):\n super(Wall, self).__init__()\n\n # Set the visuals and the position\n self.width = width\n self.height = height\n self.color = pygame.Color(color)\n self.image = pygame.Surface([self.width, self.height])\n self.image.fill(self.color)\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n\n\nclass Level(object):\n\n \"\"\"Base class for all levels in this mace runner game.\n This class should not be use directly but only be extended\"\"\"\n\n def __init__(self):\n super(Level, self).__init__()\n self.wall_list = pygame.sprite.Group()\n self.enemy_list = pygame.sprite.Group()\n\n\nclass Level_01(Level):\n\n \"\"\"First level\"\"\"\n\n def __init__(self, width, height):\n super(Level_01, self).__init__()\n\n walls = [\n # Outer lines\n (0, 0, width, 10),\n (0, 470, width, 10),\n (0, 0, 10, height),\n (630, 0, 10, height / 2 - 15),\n (630, height / 2 + 15, 10, height),\n\n (100, 40, 10, height - 80),\n (200, 40, 10, height - 80),\n (300, 40, 10, height - 80),\n (400, 40, 10, height - 80),\n (500, 40, 10, height - 80),\n ]\n\n for wall in walls:\n self.wall_list.add(Wall(\n wall[0],\n wall[1],\n wall[2],\n wall[3],\n color='red'))\n\n\nclass Level_02(Level):\n\n \"\"\"Second level\"\"\"\n\n def __init__(self, width, height):\n super(Level_02, self).__init__()\n\n walls = [\n # Outer lines\n (0, 0, width, 10),\n (0, 470, width, 10),\n (0, 0, 10, height),\n (630, 0, 10, height / 2 - 15),\n (630, height / 2 + 15, 10, height),\n\n (40, 100, width - 80, 10),\n (40, 200, width - 80, 10),\n (40, height/2, width - 80, 10),\n (40, 400, width - 80, 10),\n ]\n\n for wall in walls:\n self.wall_list.add(Wall(wall[0], wall[1], wall[2], wall[3]))\n\n\nclass MaceRunner(game.Game):\n\n \"\"\"A simple implementation of mace runner.\n Main takeaway should be changing levels/screens.\"\"\"\n\n def __init__(self):\n super(MaceRunner, self).__init__()\n\n # List of all the levels in the game\n self.levels = []\n self.current_level = 0\n self.levels.append(Level_01(self.WINDOWWIDTH, self.WINDOWHEIGHT))\n self.levels.append(Level_02(self.WINDOWWIDTH, self.WINDOWHEIGHT))\n\n # List for all the sprites in the game\n self.all_sprites = pygame.sprite.Group()\n\n # Add the player to the list\n self.player = Player(40, 40, 15, 15)\n self.all_sprites.add(self.player)\n self.player.collider_list = self.levels[self.current_level].wall_list\n\n def update(self, delta):\n self.player.update(delta)\n\n # MAKE LEVEL TRANSITION\n if self.player.rect.x > self.WINDOWWIDTH:\n self.current_level = 1\n self.player.collider_list = self.levels[1].wall_list\n self.player.rect.topleft = (30, 30)\n\n def evnt_hndlr(self, event):\n if event.type == KEYDOWN:\n if event.key == K_d:\n self.player.x_dir += 1\n if event.key == K_a:\n self.player.x_dir += -1\n if event.key == K_s:\n self.player.y_dir += 1\n if event.key == K_w:\n self.player.y_dir += -1\n if event.type == KEYUP:\n if event.key == K_d:\n self.player.x_dir += -1\n if event.key == K_a:\n self.player.x_dir += 1\n if event.key == K_s:\n self.player.y_dir += -1\n if event.key == K_w:\n self.player.y_dir += 1\n\n def draw(self, surf):\n self.levels[self.current_level].wall_list.draw(surf)\n self.all_sprites.draw(surf)\n\n\ndef main():\n mr = MaceRunner()\n mr.run()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5437702536582947, "alphanum_fraction": 0.5474756956100464, "avg_line_length": 27.03896141052246, "blob_id": "961863d93aa30a4253ebc09b5e7a054f8fd99883", "content_id": "cebb19f9727f3f49d4250e441e1255f5446c619d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2159, "license_type": "no_license", "max_line_length": 74, "num_lines": 77, "path": "/game.py", "repo_name": "datosh/PyEngine", "src_encoding": "UTF-8", "text": "import pygame\nimport sys\n\nfrom pygame.locals import *\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\nclass Game(object):\n\n \"\"\"This represents the abstract base class for any new game. Every new\n game should extend this calss\"\"\"\n\n def __init__(self, width=640, height=480):\n \"\"\"Initializes the Game with a standard window size, fps and\n background color.\"\"\"\n\n # Window\n self.WINDOWWIDTH = width\n self.WINDOWHEIGHT = height\n self.WINDOWDIMENSIONS = (self.WINDOWWIDTH, self.WINDOWHEIGHT)\n self.FPS = 60\n self.background_color = pygame.Color('black')\n\n self.quit_on_esc = True\n self.done = False\n\n pygame.init()\n self.surf = pygame.display.set_mode(self.WINDOWDIMENSIONS)\n self.clock = pygame.time.Clock()\n\n def __str__(self):\n return \"game.Game: WIDTH = {}, HEIGHT = {}, FPS = {}\".format(\n self.WINDOWWIDTH,\n self.WINDOWHEIGHT,\n self.FPS)\n\n def run(self):\n \"\"\"The run functions implements the main loop of the game. The\n functions self.update and self.draw are called, and shall be\n overwritten by the super class do to something useful.\"\"\"\n\n while not self.done:\n # wait for frame to pass\n delta = self.clock.tick(self.FPS)\n\n for event in pygame.event.get():\n # Terminate on X button\n if event.type == QUIT:\n terminate()\n # Terminate on ESC\n if self.quit_on_esc:\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n terminate()\n # pass event to event handler\n self.evnt_hndlr(event)\n # ---- EVENT HANDLING DONE ----\n\n # ---- UPDATE GAME OBJECTS ----\n self.update(delta)\n\n # ---- DRAW GAME OBJECTS ----\n self.surf.fill(self.background_color)\n self.draw(self.surf)\n pygame.display.update()\n\n def evnt_hndlr(self, event):\n pass\n\n def update(self, delta):\n pass\n\n def draw(self, surf):\n pass\n" }, { "alpha_fraction": 0.6021144986152649, "alphanum_fraction": 0.618617832660675, "avg_line_length": 29.535432815551758, "blob_id": "3795b79a2cb094add30630c175eb1df832df9ddd", "content_id": "9715b12a4a8d44c771481144f3718c8f2bff7e92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3878, "license_type": "no_license", "max_line_length": 68, "num_lines": 127, "path": "/gestrandet.py", "repo_name": "datosh/PyEngine", "src_encoding": "UTF-8", "text": "import game\nimport pygame\n\nfrom pygame.locals import *\n\n\nclass SpriteSheet(object):\n\n \"\"\"Helper class to load single images from a sprite sheet\"\"\"\n\n # This points to the sprite sheet image\n sprite_sheet = None\n\n def __init__(self, file_name):\n super(SpriteSheet, self).__init__()\n\n self.file_name = file_name\n\n # Load the sprite sheet\n self.sprite_sheet = pygame.image.load(file_name).convert()\n\n def get_image(self, x, y, width, height):\n \"\"\"Grab a single image out of the larger spritesheet.\"\"\"\n\n # Create a blank image\n image = pygame.Surface([width, height]).convert()\n\n # Copy the sprite from the large sheet onto the smaller one\n image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))\n\n # Assuming black works as the transparent color\n image.set_colorkey(pygame.Color('black'))\n\n return image\n\n\nclass Player(pygame.sprite.Sprite):\n\n \"\"\"The player that is going to run around in the world\"\"\"\n\n # Image, Animation and Movement Variables\n walking_frames_l = []\n walking_frames_r = []\n direction = 'R'\n\n def __init__(self):\n super(Player, self).__init__()\n\n # Set the visuals and the position\n sprite_sheet = SpriteSheet('p1_walk.png')\n\n # TODO: Transform into list comprehension\n # Load all the right facing images into a list\n # Then flip the image and load it into left facing list\n image = sprite_sheet.get_image(0, 0, 66, 90)\n self.walking_frames_r.append(image)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n image = sprite_sheet.get_image(66, 0, 66, 90)\n self.walking_frames_r.append(image)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n image = sprite_sheet.get_image(132, 0, 67, 90)\n self.walking_frames_r.append(image)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n image = sprite_sheet.get_image(0, 93, 66, 90)\n self.walking_frames_r.append(image)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n image = sprite_sheet.get_image(66, 93, 66, 90)\n self.walking_frames_r.append(image)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n image = sprite_sheet.get_image(132, 93, 72, 90)\n self.walking_frames_r.append(image)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n image = sprite_sheet.get_image(0, 186, 70, 90)\n self.walking_frames_r.append(image)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n\n # Load the first image\n self.image = self.walking_frames_r[0]\n\n # Set the collider to match the image\n self.rect = self.image.get_rect()\n\n\nclass Gestrandet(game.Game):\n\n \"\"\"A copy of the game GESTRANDET.\"\"\"\n\n def __init__(self):\n super(Gestrandet, self).__init__(width=1280, height=786)\n\n # TODO: shall we add caves and stuff?\n # List of the levels in the game\n # self.levels = []\n\n # List of all the sprites in the game\n self.all_sprites = pygame.sprite.Group()\n\n # Add the player to the list\n self.player = Player()\n self.all_sprites.add(self.player)\n\n # List of all the colliders in the current level\n self.all_collider = pygame.sprite.Group()\n\n def update(self, delta):\n self.player.update(delta)\n\n def evnt_hndlr(self, event):\n pass\n\n def draw(self, surf):\n self.all_sprites.draw(surf)\n\n\ndef main():\n gs = Gestrandet()\n gs.run()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5346173048019409, "alphanum_fraction": 0.5451913475990295, "avg_line_length": 33.24137878417969, "blob_id": "6f40ff38cee6c3316a384ee053cd8fc13ff645b8", "content_id": "c1da4bedbb7d48859e8dd72e98d649c06ef9fe78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7944, "license_type": "no_license", "max_line_length": 78, "num_lines": 232, "path": "/brick_buster.py", "repo_name": "datosh/PyEngine", "src_encoding": "UTF-8", "text": "import math\nimport game\nimport random\nimport pygame\n\nfrom pygame.locals import *\n\n\nclass Block(pygame.sprite.Sprite):\n\n \"\"\"Implements a basic block in the game using pygames Sprite class\"\"\"\n\n def __init__(self, x=0, y=0):\n super(Block, self).__init__()\n\n self.image = pygame.Surface([32, 16])\n self.image.fill(pygame.Color('green'))\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n\nclass Ball(pygame.sprite.Sprite):\n\n \"\"\"Implements the ball that is going to bust the bricks\"\"\"\n\n def __init__(self, x, y, screen_width, screen_height):\n super(Ball, self).__init__()\n self.width = 6\n self.height = self.width\n self.image = pygame.Surface([self.width, self.height])\n self.image.fill(pygame.Color('black'))\n pygame.draw.circle(self.image,\n pygame.Color('yellow'),\n (int(self.width / 2), int(self.height / 2)),\n int(self.width / 2))\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n self.angle = -(math.pi / 2)\n self.speed = 0\n self.alive = True\n self.SPIN = 0.04\n self.DEFAULT_SPEED = 4\n\n self.screen_width = screen_width\n self.screen_height = screen_height\n\n def update(self):\n area = pygame.display.get_surface().get_rect()\n assert area, \"Couldn't retrieve display surface\"\n\n dx = self.speed * math.cos(self.angle)\n dy = self.speed * math.sin(self.angle)\n self.rect.move_ip((dx, dy))\n\n # Collision with the window, i.e. keep ball in window\n if not area.contains(self.rect):\n tl = not area.collidepoint(self.rect.topleft)\n tr = not area.collidepoint(self.rect.topright)\n bl = not area.collidepoint(self.rect.bottomleft)\n br = not area.collidepoint(self.rect.bottomright)\n if (tr and tl) or (br and bl):\n self.angle = -self.angle\n if (tl and bl) or (tr and br):\n self.angle = math.pi - self.angle\n\n # If ball is at bottom of screen, ball is dead\n if self.screen_height - self.rect.y < self.height:\n self.alive = False\n\n def collide_with(self, colls):\n for coll in colls:\n if self.screen_height - self.rect.y < 100:\n dist = self.rect.centerx - coll.rect.centerx\n self.angle = (-math.pi / 2) + (dist * self.SPIN)\n elif not coll.rect.contains(self.rect):\n tl = not coll.rect.collidepoint(self.rect.topleft)\n tr = not coll.rect.collidepoint(self.rect.topright)\n bl = not coll.rect.collidepoint(self.rect.bottomleft)\n br = not coll.rect.collidepoint(self.rect.bottomright)\n if (tr and tl) or (br and bl):\n self.angle = -self.angle\n if (tl and bl) or (tr and br):\n self.angle = math.pi - self.angle\n\n\nclass Player(pygame.sprite.Sprite):\n\n \"\"\"Implements the player of the BrickBuster game.\"\"\"\n\n def __init__(self, x, y, screen_width):\n super(Player, self).__init__()\n self.width = 64\n self.height = 8\n self.image = pygame.Surface([self.width, self.height])\n self.image.fill(pygame.Color('blue'))\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n self.screen_width = screen_width\n\n def update(self):\n \"\"\"Update the player location.\"\"\"\n\n # Only update the x position of the player. Not y, since he is only\n # allowed to stay at the bottom of the screen\n new_x = pygame.mouse.get_pos()[0]\n if new_x < 0 + (self.width / 2):\n new_x = 0 + (self.width / 2)\n elif new_x > self.screen_width - self.width + self.width / 2:\n new_x = self.screen_width - self.width + self.width / 2\n self.rect.centerx = new_x\n\n\nclass BrickBuster(game.Game):\n\n \"\"\"Implementing the old arcade game BrickBuster\"\"\"\n\n def __init__(self):\n super(BrickBuster, self).__init__()\n\n self.FPS = 120\n\n # Create a container for all sprites\n self.all_sprites = pygame.sprite.Group()\n\n # Create a container only for the blocks\n self.collide_sprites = pygame.sprite.Group()\n\n # Create some blocks and put them in their containers\n for x in range(130, 500, 50):\n for y in range(100, 300, 50):\n block = Block(x, y)\n self.all_sprites.add(block)\n self.collide_sprites.add(block)\n\n # Create the player\n self.player = Player(self.WINDOWWIDTH / 2,\n self.WINDOWHEIGHT - 30,\n self.WINDOWWIDTH)\n self.all_sprites.add(self.player)\n self.collide_sprites.add(self.player)\n\n # Create the ball\n self.ball = Ball(300, 300, self.WINDOWWIDTH, self.WINDOWHEIGHT)\n self.all_sprites.add(self.ball)\n\n # Some statistics\n self.score = 0\n\n # Font\n self.text_font = pygame.font.SysFont(None, 48)\n self.text_color = pygame.Color('white')\n\n # States\n self.intro = 'intro'\n self.playing = 'playing'\n self.gameover = 'gameover'\n self.state = self.intro\n\n def evnt_hndlr(self, event):\n # INTRO\n if self.state == self.intro:\n # If any key is pressed make the transition to playing state\n if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:\n self.state = self.playing\n # PLAYING\n elif self.state == self.playing:\n if event.type == MOUSEBUTTONDOWN:\n self.ball.speed = self.ball.DEFAULT_SPEED\n # GAMEOVER\n elif self.state == self.gameover:\n # If any key is pressed make a new game\n if event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:\n self.__init__()\n\n def update(self, delta):\n # ---- INTRO ----\n if self.state == self.intro:\n pass\n # ---- PLAYING ----\n elif self.state == self.playing:\n # Get all sprites that collide with the ball. If the player is one\n # of them remove it from the list, and delete the remaining blocks\n colls = pygame.sprite.spritecollide(self.ball,\n self.collide_sprites,\n False)\n self.ball.collide_with(colls)\n if self.player in colls:\n colls.remove(self.player)\n self.score += len(colls)\n self.collide_sprites.remove(colls)\n self.all_sprites.remove(colls)\n\n # Update the player and the ball\n self.player.update()\n self.ball.update()\n\n # Test if the game is over\n if not self.ball.alive:\n self.state = self.gameover\n # ---- GAMEOVER ----\n elif self.state == self.gameover:\n pass\n\n def draw(self, surf):\n if self.state == self.intro:\n self.drawText(surf, 'Press a button to start', 140, 200)\n elif self.state == self.playing:\n self.all_sprites.draw(surf)\n self.drawText(surf, str(self.score), 10, 20)\n elif self.state == self.gameover:\n self.drawText(surf, 'Wanna play again?', 140, 200)\n self.drawText(surf,\n 'Your score was {}'.format(self.score),\n 140,\n 240)\n\n def drawText(self, surface, text, x, y):\n textobj = self.text_font.render(text, 1, self.text_color)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surface.blit(textobj, textrect)\n\n\ndef main():\n bb = BrickBuster()\n bb.run()\n\nif __name__ == '__main__':\n main()\n" } ]
4
lsiudut/pymoveseeker
https://github.com/lsiudut/pymoveseeker
99469d3e8b7150e16992497212973c66c36760ed
a36ae0b225669202830e82c04709215605b44f3e
5d085e5aaf95311ce8ee58a2f8d07d1a1fbf168a
refs/heads/master
2021-01-19T12:26:34.022002
2017-08-19T11:23:01
2017-08-19T11:23:01
100,788,282
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5644204616546631, "alphanum_fraction": 0.7902964949607849, "avg_line_length": 40.22222137451172, "blob_id": "d0335d10b47452f242e7a16438a8607a6acd8b0e", "content_id": "0eabe76b55c35b8ae1c96d8bc45bfc70c1371f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1856, "license_type": "no_license", "max_line_length": 311, "num_lines": 45, "path": "/README.md", "repo_name": "lsiudut/pymoveseeker", "src_encoding": "UTF-8", "text": "# Python Move Seeker\n\n### Hi, my name is Łukasz\nPurely for the needs of the open source community I'm giving you this code. It was written only for my needs, I only made sure that it actually works (for me) and flake8 is not overly complaining about it.\n\nThe alghoritm is shamelessly stolen from https://github.com/cedricve/motion-detection, simplified and rewritten to Python.\n\n### What's the story?\n\nSo I have two cats. Lovely beasts. Unfortunately one of them got very sick when he was just a kitten. Because of that I set bunch of cameras that were looking at them when I was absent and this way I was making sure that he's fine when I was away. Obviously I was recording everything. Even after he got better.\n\nThis way I ended up with 600GB of videos, 90% of each was static image of sleeping cats. I could either delete them all or find a way to find the timespans where there is actual movement. Obviosuly I went with the latter. Computer watched all videos for me.\n\nThis is not exact code that I used but it can be easily adapted to read a file and print out timestamps when the move is detected.\n\n### Coool, how to use it?\n\nAs you wish :).\n\nThis one reads camera 0 from the system. It's very simple to read a file, just replace `cv2.VideoCapture()` argument to point to a file:\n\nThe output is time in milliseconds and number of measured changes.\n```\n> ./pymoveseeker.py \n15397.74325908558 1481\n15497.728604923797 1725\n15597.713950762016 1277\n15697.699296600234 1160\n15797.684642438453 1558\n15997.655334114888 2459\n16097.640679953107 1069\n16197.626025791324 1255\n16297.611371629542 945\n16397.59671746776 1219\n17097.494138335285 1776\n17397.450175849943 1696\n20596.98124267292 1786\n21596.8347010551 2051\n21696.820046893317 1032\n21796.805392731534 1691\n21996.77608440797 3193\n24496.409730363423 3191\n24596.39507620164 1704\n47293.06858147714 3378\n```\n" }, { "alpha_fraction": 0.542879045009613, "alphanum_fraction": 0.5895864963531494, "avg_line_length": 23.641510009765625, "blob_id": "d85474928883fa0692bad02f04ea2c86e2f4db58", "content_id": "e76f06151da32af8734e44ba75f27a1018499134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1306, "license_type": "no_license", "max_line_length": 67, "num_lines": 53, "path": "/pymoveseeker.py", "repo_name": "lsiudut/pymoveseeker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n# shamelessly stolen, simplified and adapted to Python\n# from https://github.com/cedricve/motion-detection\n\nimport cv2\n\nkernel_ero = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n\nvcap = cv2.VideoCapture(0)\n\n\ndef readframe(vcap):\n ret, frame = vcap.read()\n if not ret:\n return ret, None, None\n ts = vcap.get(cv2.CAP_PROP_POS_MSEC)\n frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n return ret, ts, frame\n\n\nbuff = []\nwhile len(buff) < 3:\n ret, ts, frame = readframe(vcap)\n if ret:\n buff.append(frame)\n\nwhile True:\n ret, ts, frame = readframe(vcap)\n if ret:\n buff.append(frame)\n buff.pop(0)\n\n d1 = cv2.absdiff(buff[0], buff[2])\n d2 = cv2.absdiff(buff[2], buff[1])\n motion = cv2.bitwise_and(d1, d2)\n ret, motion = cv2.threshold(motion, 25, 255, cv2.THRESH_BINARY)\n cv2.erode(motion, motion, kernel_ero)\n\n mean, stddev = cv2.meanStdDev(motion)\n if stddev[0][0] > 20:\n motionum = 0\n h, w = motion.shape\n for x in range(0, w-1, 2):\n for y in range(0, h-1, 2):\n if motion[y][x] == 255:\n motionum += 1\n\n print(f\"{ts} {motionum}\")\n\n cv2.imshow('VIDEO', motion)\n cv2.waitKey(1)\n" } ]
2
Oriphiel/Blog-API
https://github.com/Oriphiel/Blog-API
51c3dc5b097fd6fe73003b8f797835bafb87eeeb
87e1c7666b945335c339cccf855cf48b08be40de
576335ba9ac3ea3c05b4efb846fca42ac1663aba
refs/heads/master
2021-05-14T02:06:01.344341
2018-07-18T16:22:13
2018-07-18T16:22:13
116,584,742
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.707317054271698, "avg_line_length": 31.799999237060547, "blob_id": "437775ba2dbce64b1075c804dcca609a458d6ae5", "content_id": "49e801f67571c816894b7a910368d9beccd5bed8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 57, "num_lines": 5, "path": "/venv/Scripts/django-admin.py", "repo_name": "Oriphiel/Blog-API", "src_encoding": "UTF-8", "text": "#!C:\\Users\\tony_\\Desktop\\Blog API\\venv\\Scripts\\python.exe\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n" } ]
1
zakeerm1212/program
https://github.com/zakeerm1212/program
7268c205b21d2ca7472864ea479b20b5c5d426cc
64405c0e1841ee663597558bd58346ad4b3f5064
58c06f37898ca6fce3294613017c937ad3a189e4
refs/heads/master
2020-06-26T16:15:49.301528
2019-09-11T23:47:15
2019-09-11T23:47:15
199,682,968
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 25, "blob_id": "dc4a6ed786682c87062c2cbe184fe7c1e0767791", "content_id": "616f8b21337dc84adff26810b9c97b1cfd784b4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/food143.py", "repo_name": "zakeerm1212/program", "src_encoding": "UTF-8", "text": "print(\"Glad to meet you\")" } ]
1
hdoupe/Matchups
https://github.com/hdoupe/Matchups
53de6e4d86e143a582a475106465def1c070d849
a2c6ba64c4cf3553dd07ebc32d170fa349d8c3b1
bc1a999601c38cb274e7eaa1c522c553a911a655
refs/heads/master
2021-11-03T07:16:12.688496
2021-10-11T14:59:36
2021-10-11T14:59:36
160,841,696
1
3
MIT
2018-12-07T15:32:06
2019-10-09T17:43:54
2020-01-16T21:00:35
Python
[ { "alpha_fraction": 0.6238757371902466, "alphanum_fraction": 0.6451349258422852, "avg_line_length": 26.177778244018555, "blob_id": "0beecbd3afbc83e940eea16f0a88621e974afc41", "content_id": "e9c90ff391de44ba0f3004286660bd1119af460d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "permissive", "max_line_length": 95, "num_lines": 45, "path": "/data/build_data.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n# people from: https://raw.githubusercontent.com/chadwickbureau/register/master/data/people.csv\n# sc from:\nfrom pybaseball import statcast\n\nsc = statcast(start_dt=\"2012-01-01\", end_dt=\"2021-01-01\")\nsc.to_parquet(\"statcast_dump.parquet\", engine=\"fastparquet\")\n\npeople = pd.read_csv(\"github://chadwickbureau:register@master/data/people.csv\")\n# sc = pd.read_parquet(\"statcast_dump.parquet\", engine=\"fastparquet\")\npeople[\"batter_name\"] = people.name_first + \" \" + people.name_last\nmerged = pd.merge(\n sc,\n people.loc[:, [\"key_mlbam\", \"batter_name\"]],\n how=\"left\",\n left_on=\"batter\",\n right_on=\"key_mlbam\",\n)\ncols2keep = [\n \"player_name\",\n \"batter_name\",\n \"pitch_type\",\n \"game_date\",\n \"release_speed\",\n \"events\",\n \"launch_speed\",\n \"woba_value\",\n \"bb_type\",\n \"balls\",\n \"strikes\",\n \"outs_when_up\",\n \"at_bat_number\",\n \"type\",\n \"plate_x\",\n \"plate_z\",\n \"stand\",\n]\nsc = merged.loc[:, cols2keep]\nsc.to_parquet(\"statcast.parquet\", engine=\"pyarrow\")\n\nsc[\"date\"] = pd.to_datetime(merged[\"game_date\"])\nrecent = sc.loc[sc.date > \"2020-01-01\", :]\nrecent.drop(columns=[\"date\"], inplace=True)\nrecent.to_parquet(\"statcast_recent.parquet\", engine=\"pyarrow\")\n" }, { "alpha_fraction": 0.5631579160690308, "alphanum_fraction": 0.5947368144989014, "avg_line_length": 30.66666603088379, "blob_id": "36670c1074a94eb67e79c4dd0d432f950f581577", "content_id": "e3341428fb310cc2550bf2137c6f0e2a280a8fe5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "permissive", "max_line_length": 75, "num_lines": 18, "path": "/cs-config/cs_config/tests/test_functions.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "from cs_kit import CoreTestFunctions\n\nfrom cs_config import functions\n\n\nclass TestFunctions1(CoreTestFunctions):\n get_version = functions.get_version\n get_inputs = functions.get_inputs\n validate_inputs = functions.validate_inputs\n run_model = functions.run_model\n ok_adjustment = {\n \"matchup\": {\n \"pitcher\": [{\"value\": \"Max Scherzer\"}],\n \"batter\": [{\"value\": [\"Freddie Freeman\"]}],\n \"start_date\": [{\"value\": \"2020-10-19T04:00:00.000Z\"}],\n }\n }\n bad_adjustment = {\"matchup\": {\"pitcher\": [{\"value\": \"Not a pitcher\"}]}}\n" }, { "alpha_fraction": 0.6865671873092651, "alphanum_fraction": 0.6865671873092651, "avg_line_length": 13.357142448425293, "blob_id": "3fdc927fcf59e5d219ebbcc0f80e730cd19b8841", "content_id": "dd04e17f410fd15b4a575b73bb5d1bb768ac19ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 402, "license_type": "permissive", "max_line_length": 61, "num_lines": 28, "path": "/README.md", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "# Matchups\n\nProvides pitch data on pitcher and batter matchups.\n\nSetup\n-------------\n\n# Set conda environment.\n```\nconda create -n matchups-dev pip pandas numpy pyarrow\nconda activate matchups-dev\n```\n\n# Install from source\n```\npip install -e .\n```\n\n# Install using pip\n```\npip install git+https://github.com/hdoupe/Matchups.git@master\n```\n\n# Run tests\n```\npip install -r requirements.txt\npy.test\n```\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 16, "blob_id": "6f67330a175e2fd37fae3688f15c268751dae635", "content_id": "1d6011b6d567ed1b9d471fe1b809f929f786debe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "permissive", "max_line_length": 31, "num_lines": 6, "path": "/matchups/__init__.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "name = \"matchups\"\n\n__version__ = \"2021\"\n\nfrom matchups.matchups import *\nfrom matchups.utils import *\n" }, { "alpha_fraction": 0.5807291865348816, "alphanum_fraction": 0.5815972089767456, "avg_line_length": 23.510639190673828, "blob_id": "1bc16b0a2c7d83763f193073f16f7cb4cf025bbf", "content_id": "f25d768bca1a8b755e489e3f62778f003cdebb56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1152, "license_type": "permissive", "max_line_length": 75, "num_lines": 47, "path": "/matchups/utils.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "import json\nimport os\n\n\nCURRENT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_choices():\n with open(os.path.join(CURRENT_PATH, \"playerchoices.json\")) as f:\n return json.loads(f.read())\n\n\ndef pdf_to_clean_html(pdf):\n \"\"\"Takes a PDF and returns an HTML table without any deprecated tags or\n irrelevant styling\"\"\"\n return (pdf.to_html()\n .replace(' border=\"1\"', '')\n .replace(' style=\"text-align: right;\"', ''))\n\n\ndef renamedf(df, normalized=True):\n index_map = {\n \"balls\": \"Balls\",\n \"strikes\": \"Strikes\",\n \"type\": \"Pitch Outcome\",\n \"pitch_type\": \"Pitch Type\",\n }\n\n template_col_map = {\n \"type\": \"{op} of pitch outcome by count\",\n \"pitch_type\": \"{op} of pitch type by count\",\n }\n\n if normalized:\n op = \"Proportion\"\n else:\n op = \"Count\"\n\n # rename index\n df.index.names = [index_map[oldname] for oldname in df.index.names]\n\n col_map = {}\n for oldname, newname in template_col_map.items():\n if oldname in df.columns:\n col_map[oldname] = newname.format(op=op)\n\n return df.rename(columns=col_map)\n" }, { "alpha_fraction": 0.7058106660842896, "alphanum_fraction": 0.7265339493751526, "avg_line_length": 29.774999618530273, "blob_id": "01fd420fcf93223a42ad78e1d25678e3f68bb72c", "content_id": "1be59313eebb8921ae019ce66b1247c577058f52", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2461, "license_type": "permissive", "max_line_length": 552, "num_lines": 80, "path": "/comptemplate.md", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "# COMP Publishing Information\n\nApp Name\n-----------------\n*What's the name of the app?*\n\nCompBaseball\n\nApp Overview\n----------------------------------------\n*What does this app do? Must be less than 1000 characters.*\n\n[CompBaseball](https://github.com/hdoupe/compbaseball) is an entertaining way to document COMP and demonstrate its abilities. Select a date range using the format YYYY-MM-DD. Keep in mind that CompBaseball only provides data on matchups going back to 2008. Two datasets are offered to run this model: one that only has the most recent season, 2018, and one that contains data on every single pitch going back to 2008. Next, select your favorite pitcher and some batters who he's faced in the past. Click submit to start analyzing the selected matchups!\n\n\nPython Functions\n-------------------------\n*Insert code snippets satisfying the requirements detailed in the [functions documentation.](ENDPOINTS.md)*\n\n\n**Package Defaults:** Get the default Model Parameters and their meta data\n\n```python\n# code snippet here\nfrom compbaseball import baseball\n\n\ndef package_defaults(**meta_parameters):\n return baseball.get_inputs(use_2018=meta_parameters[\"use_2018\"])\n```\n\n\n**Parse user adjustments:** Do model-specific formatting and validation on the user adjustments\n\n```python\n# code snippet here\nfrom compbaseball import baseball\n\n\ndef parse_user_inputs(params, jsonparams, errors_warnings,\n **meta_parameters):\n # parse the params, jsonparams, and errors_warnings further\n use_2018 = meta_parameters[\"use_2018\"]\n params, jsonparams, errors_warnings = baseball.parse_inputs(\n params, jsonparams, errors_warnings, use_2018=use_2018)\n return params, jsonparams, errors_warnings\n```\n\n\n**Run simulation:** Submit the user adjustments (or none) to the model to run the simulations\n\n```python\n# code snippet here\nfrom compbaseball import baseball\n\n\ndef run_simulation(use_2018, user_mods):\n result = baseball.get_matchup(use_2018, user_mods)\n return result\n```\n\nEnvironment\n---------------\n*Describe how to install this project and its resource requirements as detailed in [the environment documentation](ENVIRONMENT.md).*\n\n\n**Installation:** How is this project installed?\n\n```\nconda install pandas pyarrow bokeh\npip install pybaseball compbaseball\n```\n\n**Memory:** How much memory in GB will this project require to run?\n\n2 GB\n\n**Run time:** About how long will it take to run this project?\n\n20 seconds" }, { "alpha_fraction": 0.5639415979385376, "alphanum_fraction": 0.5747445225715637, "avg_line_length": 32.08695602416992, "blob_id": "b5cd3370ffd355bce8b016ebc263f045e220cb91", "content_id": "6245ed62898f1765857b4ac22d495dcc0a6b33d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6850, "license_type": "permissive", "max_line_length": 115, "num_lines": 207, "path": "/matchups/matchups.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "import json\nimport os\nfrom datetime import datetime, date\n\n\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.embed import json_item\nfrom bokeh.palettes import d3\nfrom bokeh.models.widgets import Tabs, Panel\nimport pandas as pd\nimport numpy as np\n\nimport paramtools\nimport marshmallow as ma\nfrom marshmallow import ValidationError\n\nfrom matchups.utils import CURRENT_PATH, renamedf, pdf_to_clean_html\nfrom matchups import __version__\n\nCURRENT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\ndef count_plot(df, title):\n p = figure(title=title, match_aspect=True)\n p.grid.visible = False\n strike_zone_cds = ColumnDataSource(\n {\n \"x\": [-8.5 / 12, 8.5 / 12],\n \"x_side1\": [-8.5 / 12, -8.5 / 12],\n \"x_side2\": [8.5 / 12, 8.5 / 12],\n \"top\": [3.0, 3.0],\n \"bottom\": [1.2, 1.2],\n \"side1\": [3.0, 1.2],\n \"side2\": [1.2, 3.0],\n }\n )\n p.line(x=\"x\", y=\"top\", line_width=3, color=\"red\", source=strike_zone_cds)\n p.line(x=\"x\", y=\"bottom\", line_width=3, color=\"red\", source=strike_zone_cds)\n p.line(x=\"x_side1\", y=\"side1\", line_width=3, color=\"red\", source=strike_zone_cds)\n p.line(x=\"x_side2\", y=\"side2\", line_width=3, color=\"red\", source=strike_zone_cds)\n pitch_types = df.pitch_type.unique()\n palette = d3[\"Category20\"][max(3, pitch_types.shape[0])]\n for ix, (pitch_type, df) in enumerate(df.groupby(\"pitch_type\")):\n p.circle(\n -df.plate_x,\n df.plate_z,\n legend_label=pitch_type,\n color=palette[ix],\n size=10,\n alpha=1,\n muted_color=palette[ix],\n muted_alpha=0.2,\n )\n p.legend.click_policy = \"hide\"\n return p\n\n\ndef count_panels(df, main_title):\n p = count_plot(df, main_title)\n panels = [Panel(child=p, title=\"All counts\")]\n\n for (balls, strikes), df in df.groupby([\"balls\", \"strikes\"]):\n panels.append(\n Panel(\n child=count_plot(\n df, f\"{main_title} (balls={balls}, strikes={strikes})\"\n ),\n title=f\"{balls}-{strikes}\",\n )\n )\n\n tabs = Tabs(tabs=panels)\n return tabs\n\n\ndef append_output(df, title, renderable, downloadable):\n if len(df) == 0:\n renderable.append(\n {\n \"media_type\": \"table\",\n \"title\": title,\n \"data\": \"<p><b>No matchups found.</b></p>\",\n }\n )\n else:\n data = json_item(count_panels(df, title))\n renderable.append({\"media_type\": \"bokeh\", \"title\": title, \"data\": data})\n downloadable.append({\"media_type\": \"CSV\", \"title\": title, \"data\": df.to_csv()})\n\n\nclass MetaParams(paramtools.Parameters):\n array_first = True\n defaults = {\n \"use_full_sample\": {\n \"title\": \"Use Full Data\",\n \"description\": \"Flag that determines whether Matchups uses the 10 year data set or the 2020 data set.\",\n \"type\": \"bool\",\n \"value\": True,\n \"validators\": {\"choice\": {\"choices\": [True, False]}},\n }\n }\n\n\nclass MatchupsParams(paramtools.Parameters):\n defaults_template = os.path.join(CURRENT_PATH, \"defaults.json\")\n\n def __init__(self, *args, **kwargs):\n players = pd.read_parquet(\n os.path.join(CURRENT_PATH, \"players.parquet\"), engine=\"fastparquet\"\n )\n\n with open(self.defaults_template, \"r\") as f:\n self.defaults = json.loads(f.read())\n\n self.defaults[\"pitcher\"][\"validators\"][\"choice\"][\"choices\"] = [\n \"Max Scherzer\"\n ] + players.players.tolist()\n self.defaults[\"batter\"][\"validators\"][\"choice\"][\"choices\"] = [\n \"Freddie Freeman\"\n ] + players.players.tolist()\n\n super().__init__(*args, **kwargs)\n\n\ndef get_inputs(meta_params_dict):\n meta_params = MetaParams()\n meta_params.adjust(meta_params_dict)\n params = MatchupsParams()\n params.set_state(use_full_sample=meta_params.use_full_sample.tolist())\n\n # Drop choice lists to reduce JSON size.\n matchup_params = params.dump()\n matchup_params[\"pitcher\"][\"validators\"] = {}\n matchup_params[\"batter\"][\"validators\"] = {}\n return {\n \"meta_parameters\": meta_params.dump(),\n \"model_parameters\": {\"matchup\": matchup_params},\n }\n\n\ndef fixup_dates(adjustment):\n adj = adjustment[\"matchup\"]\n for var in [\"start_date\", \"end_date\"]:\n if var in adj:\n if not isinstance(adj[var], list):\n adj[var] = [{\"value\": adj[var]}]\n for value in adj[var]:\n try:\n value[\"value\"] = (\n ma.fields.DateTime()\n ._deserialize(value[\"value\"], None, None)\n .date()\n )\n except Exception as e:\n print(\"exception parsing:\", value)\n print(e)\n pass\n\n\ndef validate_inputs(meta_param_dict, adjustment, errors_warnings):\n # matchups doesn't look at meta_param_dict for validating inputs.\n params = MatchupsParams()\n fixup_dates(adjustment)\n params.adjust(adjustment[\"matchup\"], raise_errors=False)\n errors_warnings[\"matchup\"][\"errors\"].update(params.errors)\n return {\"errors_warnings\": errors_warnings}\n\n\ndef get_matchup(meta_param_dict, adjustment):\n meta_params = MetaParams()\n meta_params.adjust(meta_param_dict)\n params = MatchupsParams()\n params.set_state(use_full_sample=meta_params.use_full_sample.tolist())\n fixup_dates(adjustment)\n params.adjust(adjustment[\"matchup\"])\n print(\n \"getting data according to: \",\n meta_params.specification(),\n params.specification(),\n )\n if meta_params.use_full_sample:\n path = os.path.join(CURRENT_PATH, \"statcast.parquet\")\n else:\n path = os.path.join(CURRENT_PATH, \"statcast_recent.parquet\")\n scall = pd.read_parquet(path, engine=\"fastparquet\")\n print(\"data read\")\n scall[\"date\"] = pd.to_datetime(scall[\"game_date\"])\n sc = scall.loc[\n (scall.date >= pd.Timestamp(params.start_date[0][\"value\"]))\n & (scall.date < pd.Timestamp(params.end_date[0][\"value\"]))\n ]\n print(\"filtered by date\")\n\n pitcher, batters = params.pitcher[0][\"value\"], params.batter[0][\"value\"]\n renderable = []\n downloadable = []\n pitcher_df = sc.loc[(scall[\"player_name\"] == pitcher), :]\n append_output(pitcher_df, f\"{pitcher} v. All batters\", renderable, downloadable)\n\n for batter in batters:\n batter_df = pitcher_df.loc[\n (scall[\"player_name\"] == pitcher) & (scall[\"batter_name\"] == batter), :\n ]\n append_output(batter_df, f\"{pitcher} v. {batter}\", renderable, downloadable)\n\n return {\"renderable\": renderable, \"downloadable\": downloadable}\n\n" }, { "alpha_fraction": 0.7027778029441833, "alphanum_fraction": 0.7138888835906982, "avg_line_length": 39, "blob_id": "42e78073c0ee3b9049ee8def70a2b9cab1b6a1d0", "content_id": "bd13a6366520456217b4a1d645a49914c7514224", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "permissive", "max_line_length": 87, "num_lines": 9, "path": "/data/write_players.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "import pandas as pd\n\npeople = pd.read_csv(\"github://chadwickbureau:register@master/data/people.csv\")\npeople = people.loc[people.mlb_played_last > 2009, :]\nall_players = pd.DataFrame.from_dict(\n {\"players\": sorted((people.name_first + \" \" + people.name_last).dropna().unique())}\n)\n\nall_players.to_parquet(\"../matchups/players.parquet\", engine=\"fastparquet\")\n" }, { "alpha_fraction": 0.5437352061271667, "alphanum_fraction": 0.5437352061271667, "avg_line_length": 23.882352828979492, "blob_id": "a3b60b863e69f2d7ed564b7f68634dd26862bf4f", "content_id": "fdc662d2c2bb66bf0c55dfffd297df193b593019", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "permissive", "max_line_length": 79, "num_lines": 17, "path": "/matchups/tests/test_outputs.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "import matchups\n\n\ndef test_get_matchup():\n adj = {\n \"matchup\": {\n \"batter\": [{\"value\": [\"Freddie Freeman\"], \"use_full_sample\": False}]\n }\n }\n assert matchups.get_matchup({\"use_full_sample\": False}, adj)\n\n\ndef test_get_matchup_empty():\n adj = {\n \"matchup\": {\"pitcher\": [{\"value\": \"Freddie Freeman\", \"use_full_sample\": False}]}\n }\n assert matchups.get_matchup({\"use_full_sample\": False}, adj)\n" }, { "alpha_fraction": 0.5947812795639038, "alphanum_fraction": 0.5986185669898987, "avg_line_length": 33.28947448730469, "blob_id": "c3f16166d891ad20f34a24c2c37156e72380a39a", "content_id": "15556ac21d41b1639b93e1941a1b11b5aa5312aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1303, "license_type": "permissive", "max_line_length": 72, "num_lines": 38, "path": "/matchups/tests/test_inputs.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "import matchups\n\n\ndef test_MatchupsParams():\n params = matchups.MatchupsParams()\n assert params\n\n\ndef test_update_params():\n params = matchups.MatchupsParams()\n adj = {\"batter\": [{\"use_full_sample\": False, \"value\": [\"Alex Rodriguez\"]}]}\n params.adjust(adj)\n params.set_state(use_full_sample=False)\n assert params.batter == adj[\"batter\"]\n\n\ndef test_parse_inputs():\n meta_params = {\"use_full_sample\": True}\n adj = {\"matchup\": {\"batter\": [\"Alex Rodriguez\"]}}\n ew = {\"matchup\": {\"errors\": {}, \"warnings\": {}}}\n assert matchups.validate_inputs(meta_params, adj, ew)\n\n\ndef test_parse_bad_inputs():\n meta_params = {\"use_full_sample\": True}\n adj = {\"matchup\": {\"batter\": [1], \"pitcher\": 1234,}}\n ew = {\"matchup\": {\"errors\": {}, \"warnings\": {}}}\n ew = matchups.validate_inputs(meta_params, adj, ew)\n ew = ew[\"errors_warnings\"]\n assert ew[\"matchup\"][\"errors\"][\"batter\"] == [\"Not a valid string.\"]\n assert ew[\"matchup\"][\"errors\"][\"pitcher\"] == [\"Not a valid string.\"]\n\n adj = {\"matchup\": {\"batter\": [\"fake batter\"],}}\n ew = {\"matchup\": {\"errors\": {}, \"warnings\": {}}}\n ew = matchups.validate_inputs(meta_params, adj, ew)\n ew = ew[\"errors_warnings\"]\n exp = ['batter \"fake batter\" must be in list of choices.']\n assert ew[\"matchup\"][\"errors\"][\"batter\"] == exp\n" }, { "alpha_fraction": 0.7536585330963135, "alphanum_fraction": 0.7536585330963135, "avg_line_length": 23.117647171020508, "blob_id": "f00413aac96f02f6149c8b5bcb45b03a8887b0b4", "content_id": "a252d5fb3482cc8bc3650b08043cc0eee802ec01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "permissive", "max_line_length": 81, "num_lines": 17, "path": "/cs-config/cs_config/functions.py", "repo_name": "hdoupe/Matchups", "src_encoding": "UTF-8", "text": "import matchups\n\n\ndef get_version():\n return matchups.__version__\n\n\ndef get_inputs(meta_param_dict):\n return matchups.get_inputs(meta_param_dict)\n\n\ndef validate_inputs(meta_param_dict, adjustment, errors_warnings):\n return matchups.validate_inputs(meta_param_dict, adjustment, errors_warnings)\n\n\ndef run_model(meta_param_dict, adjustment):\n return matchups.get_matchup(meta_param_dict, adjustment)\n" } ]
11
Dhruvil1304/https-github.com-avish28-untitled
https://github.com/Dhruvil1304/https-github.com-avish28-untitled
967c722b1ad217a0430510d35cf761700556dd50
46451607e3c6e3aada22e7de12731237c3cfaafe
8e49fa23f7975daf8f008e7a7201e080ae0f72f7
refs/heads/master
2022-12-01T19:25:00.368041
2020-08-13T16:19:25
2020-08-13T16:19:25
287,326,417
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5484460592269897, "alphanum_fraction": 0.5502741932868958, "avg_line_length": 16.54838752746582, "blob_id": "9d59f76aae5b8fac0ec4dc06b4fceeab0baeb005", "content_id": "135ee1afa7a2fc2e199e9b42436250b4a7a79f90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 30, "num_lines": 31, "path": "/Airport.py", "repo_name": "Dhruvil1304/https-github.com-avish28-untitled", "src_encoding": "UTF-8", "text": "class Airport():\n def __init__(self):\n self.id=0\n self.name=''\n self.city=''\n self.type='dom'\n\n def setairid(self,id):\n self.id = id\n\n def setairname(self,name):\n self.name=name\n\n def setaircity(self,city):\n self.city=city\n\n def setairtype(self,type):\n self.type=type\n\n\n def getairid(self):\n return self.id\n\n def getairname(self):\n return self.name\n\n def getaircity(self):\n return self.city\n\n def getairtype(self):\n return self.type\n\n\n\n" }, { "alpha_fraction": 0.6780821681022644, "alphanum_fraction": 0.6900684833526611, "avg_line_length": 25.590909957885742, "blob_id": "fd239ea94624d8c574db8cba889ef1e8fe030529", "content_id": "1b5aa40cb0f53468d3abb40a6f97606311dc548f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 118, "num_lines": 22, "path": "/Custdb.py", "repo_name": "Dhruvil1304/https-github.com-avish28-untitled", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom Customer import Customer\n\nconnection=psycopg2.connect(host=\"localhost\", dbname=\"Booking\", user=\"postgres\", password=\"avish283\")\ncur = connection.cursor()\n\ndef insertcust(c1):\n cur.execute(\"insert into customers values(%s,%s,%s,%s)\",(c1.getcname(),c1.getcid(),c1.getcmob(),c1.getbookings()))\n connection.commit()\n return True\n\ndef viewcust():\n cur.execute(\"select * from customers\")\n return cur\n\ndef findcust(id):\n cur.execute(\"select * from customers where cust_id=%s\",(id,))\n return cur\n\ndef close():\n cur.close()\n connection.close()" }, { "alpha_fraction": 0.6868686676025391, "alphanum_fraction": 0.6902356743812561, "avg_line_length": 22.799999237060547, "blob_id": "9cd2e4b1514ce6454be97cbcfb3f2873b1fe347e", "content_id": "8211c04aa3dfff88bf2f7d2089a6487299fb3cd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "no_license", "max_line_length": 126, "num_lines": 25, "path": "/Airdb.py", "repo_name": "Dhruvil1304/https-github.com-avish28-untitled", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom Airport import Airport\n\nconnection=psycopg2.connect(host=\"localhost\", dbname=\"Booking\", user=\"postgres\", password=\"avish283\")\ncur = connection.cursor()\n\ndef insertair(air):\n cur.execute(\"insert into airport values(%s,%s,%s,%s)\",(air.getaircity(),air.getairid(),air.getairname(),air.getairtype()))\n connection.commit()\n return True\n\ndef viewair():\n cur.execute(\"select * from airport\")\n return cur\n\ndef findair(search):\n cur.execute(\"select * from airport where air_id=%s\",(search,))\n return cur\n\n\n\n\ndef close():\n cur.close()\n connection.close()" }, { "alpha_fraction": 0.3432769775390625, "alphanum_fraction": 0.3610650897026062, "avg_line_length": 42.248802185058594, "blob_id": "0bf31b66aa1618a8e143b418535d585bfdbc7024", "content_id": "5f3201485198a9f6fdb467cc5e4f8ebbb5916fdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9051, "license_type": "no_license", "max_line_length": 175, "num_lines": 209, "path": "/Booking.py", "repo_name": "Dhruvil1304/https-github.com-avish28-untitled", "src_encoding": "UTF-8", "text": "\nfrom Airdb import *\nfrom Flidb import *\nfrom Custdb import *\n\n\nop=1\nwhile op==1:\n print('Please select ur choice:')\n print('\\t\\t 1.Airport')\n print('\\t\\t 2.Flights')\n print('\\t\\t 3.Customer')\n print('\\t\\t 4.exit')\n choice=int(input('Enter ur choice:'))\n\n if choice==1:\n op1=1\n while op1 !=5:\n print('\\t1 for entry')\n print('\\t2 for view')\n print('\\t3 for search')\n print('\\t4 for view flights')\n print('\\t5 for main menu')\n op1=int(input('Enter ur choice:'))\n\n if op1==1:\n add='y'\n while add=='y':\n print('-------Entry--------')\n a1=Airport()\n a1.setairid(int(input('Enter Airport id:')))\n a1.setaircity(input('Enter Airport city:'))\n a1.setairname(input('Enter Airport name:'))\n a1.setairtype(input('Enter Airport type:'))\n\n if insertair(a1):\n print(\"You have added airport properly.\")\n else:\n print(\"Error in adding data of airport.. \")\n add=input('Do u want to add another airport?(y/n)')\n\n elif op1==2:\n print('---------View----------')\n print('CITY \\t ID \\t NAME \\t TYPE ')\n for row in viewair():\n print(row[0],'\\t',row[1],'\\t',row[2],'\\t',row[3])\n print('------------------------------------------')\n\n\n elif op1==3:\n print('--------Search--------')\n search=int(input('Enter id for get details:'))\n print('CITY \\t ID \\t NAME \\t TYPE ')\n for row in findair(search):\n print(row[0], '\\t', row[1], '\\t', row[2], '\\t', row[3])\n print('------------------------------------------')\n\n\n\n elif op1==4:\n print('--------Available FLights---------')\n source=int(input('Enter source id of airport where u want to check flights:'))\n print('\\tID \\t SOURCE \\t DESTINATION \\t DATE \\t TAKEOFF \\t CLASS \\t AIRLINE \\t VIA \\t LANDING ')\n\n for row in available(source):\n print('\\t',row[0], '\\t', row[1], '\\t', row[2], '\\t', row[3], '\\t', row[4], '\\t', row[5], '\\t', row[6],'\\t', row[7],' \\t',row[8])\n print('--------------------------------------------------------------------------------------------------------------------------')\n\n\n\n\n\n elif choice==2:\n op5=1\n while op5 != 4:\n print('\\t1 for entry')\n print('\\t2 for view')\n print('\\t3 for search')\n print('\\t4 for main menu')\n op5 = int(input('Enter ur choice:'))\n\n\n if op5 == 1:\n print('-------Entry--------')\n f1=Flight()\n print('CITY \\t ID \\t NAME \\t TYPE ')\n for row in viewair():\n print(row[0], '\\t', row[1], '\\t', row[2], '\\t', row[3])\n f1.setsource(input('Enter source:'))\n f1.setdesti(input('Enter destination:'))\n f1.setid(int(input('Enter flight id:')))\n f1.setairline(input('Enter flight airline:'))\n f1.setclasss(input('Enter flight class:'))\n f1.setdate(input('Enter flying date:'))\n f1.settakeoff(input('Enter takeoff timing:'))\n f1.setlanding(input('Enter landing timing:'))\n f1.setvia(input('Enter via airport:'))\n if insertfli(f1):\n print(\"You have added flight properly.\")\n else:\n print(\"Error in adding data of flight.. \")\n\n elif op5 == 2:\n\n print('-------View--------')\n print('\\tID \\t SOURCE \\t DESTINATION \\t DATE \\t TAKEOFF \\t CLASS \\t AIRLINE \\t VIA \\t LANDING ')\n for row in viewfli():\n print('\\t',row[0],'\\t',row[1],'\\t \\t',row[2],'\\t \\t',row[3],' \\t',row[4],' \\t\\t',row[5],' \\t',row[6],' \\t',row[7],' \\t',row[8])\n print('--------------------------------------------------------------------------------------------------------------------------')\n\n\n\n\n\n elif op5==3:\n searchf=int(input('Enter flight id :'))\n print('\\tID \\t SOURCE \\t DESTINATION \\t DATE \\t TAKEOFF \\t CLASS \\t AIRLINE \\t VIA \\t LANDING ')\n for row in findfli(searchf):\n print('\\t',row[0], '\\t', row[1], '\\t \\t', row[2], '\\t \\t', row[3], ' \\t', row[4], ' \\t\\t', row[5], ' \\t',row[6], ' \\t', row[7],' \\t',row[8])\n print('--------------------------------------------------------------------------------------------------------------------------')\n\n\n\n\n elif choice==3:\n op8 = 1\n while op8 != 4:\n print('\\t1 for entry')\n print('\\t2 for view')\n print('\\t3 for search')\n print('\\t4 for main menu')\n op8 = int(input('Enter ur choice:'))\n\n if op8==1:\n print('-------Entry------')\n c1=Customer()\n c1.setcname(input('Enter customer name: '))\n c1.setcid(int(input('Enter customer id:')))\n c1.setcmob(int(input('Enter customer mobile no:')))\n\n i='yes'\n while i=='yes':\n print('Select source id and destination id')\n print('CITY \\t ID \\t NAME \\t TYPE ')\n for row in viewair():\n print('\\t',row[0], '\\t', row[1], '\\t', row[2], '\\t', row[3])\n print('-----------------------------------------------------')\n\n sid = input(\"Enter Scource Airport city from list :\")\n did = input(\"Enter Destination Ariport city from list :\")\n\n print('======Available flights======')\n print('\\tID \\t SOURCE \\t DESTINATION \\t DATE \\t TAKEOFF \\t CLASS \\t AIRLINE \\t VIA \\t LANDING ')\n for row in avail(sid,did):\n print('\\t',row[0], '\\t', row[1], ' \\t\\t', row[2], '\\t \\t', row[3], ' \\t', row[4], ' \\t\\t', row[5], ' \\t',row[6], ' \\t', row[7],' \\t',row[8])\n print('--------------------------------------------------------------------------------------------------------------------------')\n\n\n select=int(input('Please select ur flight'))\n c1.setbookings(select)\n i=input('Do u want to do another booking?..yes/no')\n if insertcust(c1):\n print(\"Booking confirmed.\")\n else:\n print(\"Error in flight booking.. \")\n\n\n\n\n\n elif op8==2:\n print('--------Customers--------')\n\n\n for row in viewcust():\n print('\\tC_NAME \\t C_ID \\t C_MOB \\t C_BOOKINGS')\n print('\\t',row[0],'\\t',row[1],'\\t',row[2],'\\t',row[3])\n print('-----------------------------------------------')\n x=input('Flight details?y/n')\n if x.lower()=='y':\n for r in row[3]:\n print('\\tID \\t SOURCE \\t DESTINATION \\t DATE \\t TAKEOFF \\t CLASS \\t AIRLINE \\t VIA \\t LANDING ')\n for row in findfli(r):\n print('\\t', row[0], '\\t', row[1], '\\t \\t', row[2], '\\t \\t', row[3], ' \\t', row[4],' \\t\\t', row[5], ' \\t', row[6], ' \\t', row[7], ' \\t', row[8])\n print('--------------------------------------------------------------------------------------------------------------------------')\n\n\n\n\n\n elif op8==3:\n\n find=int(input('Enter customer id: '))\n print('\\tC_NAME \\t C_ID \\t C_MOB \\t C_BOOKINGS')\n for row in findcust(find):\n print('\\t',row[0], '\\t', row[1], '\\t', row[2], '\\t', row[3])\n print('-----------------------------------------------------')\n x = input('Flight details?y/n')\n if x.lower() == 'y':\n for r in row[3]:\n print('\\tID \\t SOURCE \\t DESTINATION \\t DATE \\t TAKEOFF \\t CLASS \\t AIRLINE \\t VIA \\t LANDING ')\n for row in findfli(r):\n print('\\t', row[0], '\\t', row[1], '\\t \\t', row[2], '\\t \\t', row[3], ' \\t', row[4],' \\t\\t', row[5], ' \\t', row[6], ' \\t', row[7], ' \\t', row[8])\n print('--------------------------------------------------------------------------------------------------------------------------')\n\n\n\n\n elif choice==4:\n print('You are exited')\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.561188817024231, "alphanum_fraction": 0.5629370808601379, "avg_line_length": 16.363636016845703, "blob_id": "fd8791f49c2c38056c284be7edd41d50fe54ddf8", "content_id": "c04c5c28782642f9b1ea7ba7f81f392190027b2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 33, "num_lines": 33, "path": "/Customer.py", "repo_name": "Dhruvil1304/https-github.com-avish28-untitled", "src_encoding": "UTF-8", "text": "class Customer():\n\n def __init__(self):\n self.cust_name=''\n self.cust_id=0\n self.mob=''\n self.bookings=[]\n\n\n\n def setcname(self,name):\n self.cust_name=name\n\n def setcid(self,id):\n self.cust_id=id\n\n def setcmob(self,mob):\n self.mob=mob\n\n def setbookings(self,fly):\n self.bookings.append(fly)\n\n def getcname(self):\n return self.cust_name\n\n def getcid(self):\n return self.cust_id\n\n def getcmob(self):\n return self.mob\n\n def getbookings(self):\n return self.bookings" }, { "alpha_fraction": 0.5790309906005859, "alphanum_fraction": 0.5845909714698792, "avg_line_length": 17.217391967773438, "blob_id": "7e8bef57ba22cde2e26dc841efde53e9721dff30", "content_id": "9c2d257b330348bd9174bab57fdf4c218ebc37b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1259, "license_type": "no_license", "max_line_length": 33, "num_lines": 69, "path": "/Flight.py", "repo_name": "Dhruvil1304/https-github.com-avish28-untitled", "src_encoding": "UTF-8", "text": "from Airport import Airport\n\nclass Flight:\n\n def __init__(self):\n self.id=0\n self.source=''\n self.destination=''\n self.date=1-1-20\n self.landing=0\n self.takeoff=0\n self.classs=''\n self.via=''\n self.airline=''\n\n def setvia(self,via):\n self.via=via\n\n def setairline(self,airline):\n self.airline=airline\n\n def setsource(self,source):\n self.source=source\n\n def setdesti(self,desti):\n self.destination=desti\n\n def setid(self,id):\n self.id=id\n\n def setdate(self,date):\n self.date=date\n\n def setclasss(self,classs):\n self.classs=classs\n\n def settakeoff(self,takeoff):\n self.takeoff=takeoff\n\n def setlanding(self,land):\n self.landing=land\n\n\n def getid(self):\n return self.id\n\n def getdate(self):\n return self.date\n\n def getclasss(self):\n return self.classs\n\n def gettakeoff(self):\n return self.takeoff\n\n def getlanding(self):\n return self.landing\n\n def getsource(self):\n return self.source\n\n def getdesti(self):\n return self.destination\n\n def getvia(self):\n return self.via\n\n def getairline(self):\n return self.airline\n\n\n" }, { "alpha_fraction": 0.6640712022781372, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 27.125, "blob_id": "fd4f5ecfd9b71001d8da23f2c223432fce87b6a2", "content_id": "218eb9518dfa63d61909e96c199470f6fc80e0e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 899, "license_type": "no_license", "max_line_length": 202, "num_lines": 32, "path": "/Flidb.py", "repo_name": "Dhruvil1304/https-github.com-avish28-untitled", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom Flight import Flight\n\nconnection=psycopg2.connect(host=\"localhost\", dbname=\"Booking\", user=\"postgres\", password=\"avish283\")\ncur = connection.cursor()\n\ndef insertfli(f1):\n cur.execute(\"insert into flight values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\",(f1.getid(),f1.getsource(),f1.getdesti(),f1.getdate(),f1.gettakeoff(),f1.getclasss(),f1.getairline(),f1.getvia(),f1.getlanding()))\n connection.commit()\n return True\n\ndef viewfli():\n cur.execute(\"select * from flight\")\n return cur\n\ndef findfli(id):\n cur.execute(\"select * from flight where fli_id=%s\",(id,))\n return cur\n\ndef available(source):\n cur.execute(\"select * from flight where fli_source=%s\",(source,))\n return cur\n\ndef avail(source,desti):\n cur.execute(\"select * from flight where fli_source=%s and fli_desti=%s\", (source,desti))\n return cur\n\n\n\ndef close():\n cur.close()\n connection.close()" } ]
7
professorlust/AI-for-game-2048
https://github.com/professorlust/AI-for-game-2048
c580484f0fa7a3c67da68775226fd15bf14a9815
b0bcbc25dd1237dff1edbfe4f13e350fc93c00fb
153970f62be2d7d05e3d408424f1d2b95cbc722a
refs/heads/master
2020-04-04T22:05:49.254275
2017-04-23T22:01:46
2017-04-23T22:01:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7432299256324768, "alphanum_fraction": 0.7685868740081787, "avg_line_length": 81.89795684814453, "blob_id": "40d66e03b9fa7fa7c499212afa9a2470c850d52c", "content_id": "5288f171c36409b29eff0e39c41307f26aded732", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4062, "license_type": "no_license", "max_line_length": 506, "num_lines": 49, "path": "/README.md", "repo_name": "professorlust/AI-for-game-2048", "src_encoding": "UTF-8", "text": "# RL-AI for game 2048\nIt uses reinforcement learning to play game 2048\n\n### update\nNow, user can literally play game 2048 with AI, but it is not powered by reinforcement learning though. Still figure out how RL can apply to this.\n\n### Disclaimer\nFirst all, this project is still in progress. And due to the limit of states, which I am getting trouble on, RL-powered AI can only play 2*2 game following the rule of game 2048. And in authentic game 2048, I use real-time state-value searching algorithm, kind of analogous to what we know as state-value in RL, but it is in more approximating terms. So I may have lied in the title, saying it's the RL-AI for game 2048. Well, it is true in some way, that it is a RL for simplified game 2048 after all - -.\n\n### customize your own AI game engine\nwell, fancy as the title may sound, the process is easy as hell.\n* use LearningProc.py to generate a new array.data file, that is:\n\n`python LearningProc.py`\n\nthat's it, that's the only step. \nRight now it has its own data.array file, a [pickle](https://docs.python.org/2/library/pickle.html)-dumped file that descibes the action-values of all the possible states in this game, which I may have run under the condition of:\n(I can't remember for sure)\n* alpha:0.7\n* Learning times:500000\n* greedy level:0.97\n* Reward Mode:False, the mode in which only winning state gives positive reward, which is the game score, with other state-reward set to be 0\n\nYou are encouraged to use other settings. you may have to run `python LearningProc.py --help` to see further details about all the possible parameters.\n**warning**: you may need a little patience becuase running this file might take a while, especilly when you set the Learning Times to large, say, a million.\n\n### About 2048_automatic.py and 2048_hint.py\nyou can simply run both files without any further arguments: `python 2048_automatic.py` or `python 2048_hint.py`. And it plays 2*2 game powered by RL-AI for you. And if you play authentic 2048 game, you can type `python 2048_automatic.py --real` or `python 2048_hint.py --real`\nyou may not need any other user manual when entering into game, since as far as I see it the game per se is quite self-explanatory. And as name of both files implies, the differences between them are:\n* in 2048_automatic.py, once upon the game, AI engine takes control and make a move automatically *every second*. Basically you lose control of the game and there is nothing you can do but wait until one round of game ends. And then you can restart or quit the game.\n* in 2048_hint.py, users play the game themselves and see the hint provided by AI engine as they stuck on the game.\n\nthe 2*2 AI engines is based on array.data file. The user-interaction part of the game is powered by python package [curses](https://docs.python.org/3.3/howto/curses.html#user-input). And most part of the code of the game are referenced in [here](https://www.shiyanlou.com/courses/368)(it's online course website, you may have to sign up to check out more)\n\n### test on current 2048 AI in RL-ish but definitely not RL algorithm\nAfter running realGameAI.py for hours, I have collected some data to evaluate the performance of different exploring levels ranging from 2 to 8. And here they are.\n\n![win_ratio](/record/win_rate.png)\n![avg_score](/record/avg_score.png)\n![avg_highscore](/record/avg_highscore.png)\n\nDue to the unacceptable low speed of the algorithm with exporing level more than 9, I can only test the AI with exploring level as shown above. But all these exporing level with satifying running speed yield really unstatifying performance. There is no one winning more than 50%. And this is nothing but bad.\n\nBut one interesting phenomena is that the performance seems reach highest at the level 6 with respect to the range of 2 to 8. And this is counter intuitive to me because AI should have been smarter when exploring deeper of the game state. well, it looks that there is some myth over here.\n\n#### All in all, it is still in progress\n\n### Reference\nhttps://www.shiyanlou.com/courses/368\n" }, { "alpha_fraction": 0.6536338329315186, "alphanum_fraction": 0.6692732572555542, "avg_line_length": 25.487804412841797, "blob_id": "7641e3a18f9ba5e7dbb25d6d8912172193a4c16f", "content_id": "445cb3f6f64921fe91d978a09818b25dec430777", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2174, "license_type": "no_license", "max_line_length": 102, "num_lines": 82, "path": "/realGameAI.py", "repo_name": "professorlust/AI-for-game-2048", "src_encoding": "UTF-8", "text": "import GameAgent2\nimport GameAgent\nimport json\nfrom random import choice\nactionTranslate = ['Up', 'Left', 'Down', 'Right']\n\nagent = GameAgent2.GameField()\ndiscount = 0.95\t\t\ndef explore(field, level):\n\tif level == 1:\n\t\trecord = []\n\t\t#for all the posible move \n\t\tfor move in actionTranslate:\n\t\t\t#set the field\n\t\t\tagent.setField(field)\n\t\t\tif agent.move(move):\n\t\t\t\treward = agent.givereward()\n\t\t\telse:\n\t\t\t\treward = 0\n\n\t\t\trecord.append(reward)\n\n\t\treturn max(enumerate(record),key=lambda x:x[1])\n\telse:\n\t\trecord = []\n\t\t#for all the posible move \n\t\tfor move in actionTranslate:\n\t\t\t#set the field\n\t\t\tagent.setField(field)\n\t\t\tif agent.move(move):\n\t\t\t\treward = agent.givereward()\n\t\t\t\tvalue_next = explore(agent.field, level-1)[1]\n\t\t\t\tvalue = reward + discount * value_next\n\t\t\telse:\n\t\t\t\treward = 0\n\t\t\t\tvalue = reward\n\n\t\t\trecord.append(value)\n\t\treturn max(enumerate(record),key=lambda x:x[1])\n\nclass AIagent(object):\n\tdef makeMove(self, field):\n\t\tbestSolution = explore(field,6)\n\t\tmove = actionTranslate[bestSolution[0]]\n\t\treturn move\n\n\tdef makeMoveOnLevel(self, field, level):\n\t\tbestSolution = explore(field,level)\n\t\tmove = actionTranslate[bestSolution[0]]\n\t\treturn move\n\nif __name__ == '__main__':\n\tgame = GameAgent.GameField(height=4, width=4, win=2048)\n\tai = AIagent()\n\n\tfor level in range(7,9):\n\n\t\trecord = []\n\t\tfor times in range(50):\n\t\t\tgame.reset()\n\t\t\twhile (not game.is_win()) and (not game.is_gameover()):\n\t\t\t\taction = ai.makeMoveOnLevel(game.field, level)\n\t\t\t\twhile not game.move(action):\n\t\t\t\t\t#it would have some unfeasible action, especilly when lv is small\n\t\t\t\t\taction = choice(actionTranslate)\n\t\t\t\n\t\t\t#take the record\n\t\t\trecord.append((game.is_win(),game.score,game.highscore))\n\t\t\tprint \"result:\",game.is_win(),\"game score:\",game.score,\"highest score:\",game.highscore\n\n\t\t#do some stats\n\t\twin_lose = [i[0] for i in record]\n\t\twin_rate = win_lose.count(True)\n\t\tavg_score = sum([i[1] for i in record]) / 50.0\n\t\tavg_highscore = sum([i[2] for i in record]) / 50.0\n\n\t\tsumup = {\"record\":record, \"win_rate\":win_rate, \"avg_score\":avg_score, \"avg_highscore\":avg_highscore}\n\t\tprint \"sum up:\",sumup\n\n\t\t#write to file\n\t\twith open(\"level|\"+str(level),\"w\") as f:\n\t\t\tjson.dump(sumup,f)\n\n\n" }, { "alpha_fraction": 0.6628056764602661, "alphanum_fraction": 0.6795367002487183, "avg_line_length": 22.57575798034668, "blob_id": "cee01bcd286f824439d199d155ada71def71105e", "content_id": "e0389f351badbb569fa8b78ddaa3e66966818607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 777, "license_type": "no_license", "max_line_length": 56, "num_lines": 33, "path": "/lib/RandomPolicy.py", "repo_name": "professorlust/AI-for-game-2048", "src_encoding": "UTF-8", "text": "import GameAgent as g\nfrom random import randrange, choice\n#initialize the game\ngame = g.GameField(height=2, width=2, win=32)\n\nclass RamdomPolicy(object):\n\tdef __init__(self):\n\t\tself.pool = ['Up', 'Left', 'Down', 'Right']\n\n\tdef reset(self):\n\t\tdel self.pool\n\t\tself.pool = ['Up', 'Left', 'Down', 'Right']\n\t\n\tdef generate(self, action=None):\n\t\tif action:\n\t\t\tself.pool.remove(action)\n\t\treturn choice(self.pool)\n\nrpolicy = RamdomPolicy()\nrecords =[]\n\n#repeat the game 1000 times\nfor i in range(10000):\n\t#reset the game\n\tgame.reset()\n\twhile (not game.is_win()) and (not game.is_gameover()):\n\t\trpolicy.reset()\n\t\taction = rpolicy.generate()\n\t\twhile not game.move(action):\n\t\t\taction = rpolicy.generate(action)\n\n\tprint \"time \",i,\":\",game.score\n\trecords.append((game.is_win(),game.score))" }, { "alpha_fraction": 0.6310380101203918, "alphanum_fraction": 0.6485097408294678, "avg_line_length": 20.15217399597168, "blob_id": "694230d735216c096e366e776a6dc0754301b051", "content_id": "a647376b4f5de4f158992a1f637c41cbaa7241ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 49, "num_lines": 46, "path": "/gameAI.py", "repo_name": "professorlust/AI-for-game-2048", "src_encoding": "UTF-8", "text": "\nfrom random import randrange, choice, random\nimport numpy as np\nimport math\nimport pickle\n\nwith open(\"array.data\",\"r\") as f:\n\tarray = pickle.load(f)\n\nactionTranslate = ['Up', 'Left', 'Down', 'Right']\nclass QLearning(object):\n\tdef __init__(self):\n\t\tself.pool = [0,1,2,3]\n\t\tself.greedy = 1\n\n\tdef reset(self):\n\t\tdel self.pool\n\t\tself.pool = [0,1,2,3]\n\t\n\tdef generateAction(self, state, action=None):\n\t\tif action:\n\t\t\tself.pool.remove(action)\n\n\t\tif random()<self.greedy:\n\t\t\t#go greedy way\n\t\t\ta = array[state].argmax()\n\t\t\tif a in self.pool:\n\t\t\t\treturn a\n\t\t\telse:\n\t\t\t\treturn choice(self.pool)\n\t\telse:\n\t\t\treturn choice(self.pool)\n\n\tdef fieldToState(self, field):\n\t\toutput = 0\n\t\tfor i in range(2):\n\t\t\tfor j in range(2):\n\t\t\t\tnum = 0\n\t\t\t\tif field[i][j]:\n\t\t\t\t\tnum = int(math.log(field[i][j],2))\n\t\t\t\toutput += num*(10**(2*i+j))\n\t\treturn output\n\n\tdef makeMove(self, field):\n\t\tstate = self.fieldToState(field)\n\t\taction_num = self.generateAction(state)\n\t\treturn actionTranslate[action_num]" }, { "alpha_fraction": 0.6477169394493103, "alphanum_fraction": 0.6664651036262512, "avg_line_length": 25.661291122436523, "blob_id": "57c4027f3c4b434fe4038ed7fe2419b264d3dccc", "content_id": "ef2409fbb3b6df7a5e9b77be3984ba947f4d5f94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 161, "num_lines": 124, "path": "/LearningProc.py", "repo_name": "professorlust/AI-for-game-2048", "src_encoding": "UTF-8", "text": "import GameAgent as g\nfrom random import randrange, choice, random\nimport numpy as np\nimport math\nimport pickle\nimport argparse\n\n#initialize the array\narray = np.zeros((7000,4))\n\n# #array from the file\n# with open(\"array.data\",\"r\") as f:\n# \tarray = pickle.load(f)\n# #model\n# model = {}\n\nactionTranslate = ['Up', 'Left', 'Down', 'Right']\n\nclass QLearning(object):\n\tdef __init__(self, greedy=None):\n\t\tself.pool = [0,1,2,3]\n\t\tif greedy == None:\n\t\t\t#default\n\t\t\tgreedy = 0.97\n\t\tself.greedy = greedy\n\n\tdef reset(self):\n\t\tdel self.pool\n\t\tself.pool = [0,1,2,3]\n\t\n\tdef generateAction(self, state, action=None):\n\t\tif action:\n\t\t\tself.pool.remove(action)\n\n\t\tif random()<self.greedy:\n\t\t\t#go greedy way\n\t\t\ta = array[state].argmax()\n\t\t\tif a in self.pool:\n\t\t\t\treturn a\n\t\t\telse:\n\t\t\t\treturn choice(self.pool)\n\t\telse:\n\t\t\treturn choice(self.pool)\n\n\tdef fieldToState(self, field):\n\t\toutput = 0\n\t\tfor i in range(2):\n\t\t\tfor j in range(2):\n\t\t\t\tnum = 0\n\t\t\t\tif field[i][j]:\n\t\t\t\t\tnum = int(math.log(field[i][j],2))\n\t\t\t\toutput += num*(10**(2*i+j))\n\t\treturn output\n\n\n\n\ndef train(mode=False,alpha = None,countingTimes = None, greedy=None):\n\t#set the configuration\n\tif alpha==None:\n\t\talpha = 0.7\n\tif countingTimes == None:\n\t\tcountingTimes = 500000\n\n\tglobal array\n\t#initialize the game\n\tgame = g.GameField(height=2, width=2, win=32)\n\t#initalize the Learning engine\n\tq = QLearning(greedy)\n\t#set the list to store all the results\n\trecords =[]\n\tfor i in range(countingTimes):\n\t\t#reset the game\n\t\tgame.reset()\n\t\twhile (not game.is_win()) and (not game.is_gameover()):\n\t\t\t\n\t\t\tstate = q.fieldToState(game.field)\n\t\t\tq.reset()\n\t\t\taction = q.generateAction(state)\n\n\t\t\twhile not game.move(actionTranslate[action]):\n\t\t\t\taction = q.generateAction(state, action)\n\n\t\t\t#the mode\n\t\t\tif mode == False:\n\t\t\t\tif game.is_win():\n\t\t\t\t\treward = game.score\n\t\t\t\telse:\n\t\t\t\t\treward = 0\n\t\t\telse:\n\t\t\t\treward = game.reward\n\n\t\t\tarray[state][action] += alpha * (reward+array[q.fieldToState(game.field)].max()-array[state][action])\n\t\t\t#save into the model\n\t\t\t# model[(state,action)] = (reward, q.fieldToState(game.field))\n\t\t\t# for repeat in range(50):\n\t\t\t# \ts,a = choice(model.keys())\n\t\t\t# \tr,s1 = model[(s,a)]\n\t\t\t# \tarray[s][a] += alpha * (r+array[s1].max()-array[s][a])\n\t\tprint \"time \",i,\"game score:\",game.score\n\t\trecords.append((game.is_win(),game.score))\n\n\treturn records\n\ndef recordStat(records):\n\twin = [i[0] for i in records]\n\twin_distribute = [win[sli:(sli+1000)].count(True) for sli in range(0,len(win),1000)]\n\nif __name__ == '__main__':\n\t#set the argparse\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-a\",\"--alpha\", help=\"learning step alpha for QLearning, defualt:0.7\",type=float)\n\tparser.add_argument(\"-t\",\"--times\", help=\"learning times for QLearning, defualt:500000\",type=int)\n\tparser.add_argument(\"--reward\", help=\"enable the reward to be score of every move, otherwise reward only being the finel score of the game\",action=\"store_true\")\n\tparser.add_argument(\"-g\",\"--greedy\", help=\"greedy level of QLearning, defualt:0.97\",type=float)\n\targs = parser.parse_args()\n\n\t#it still has bug, user might input unexpected paras\n\trecords = train(mode=args.reward, alpha=args.alpha, countingTimes=args.times, greedy=args.greedy)\n\twin_distribute = recordStat(records)\n\n\t#dump the array.data file\n\twith open(\"array.data\",\"w\") as f:\n\t \tpickle.dump(array, f)\n\t" }, { "alpha_fraction": 0.6250764727592468, "alphanum_fraction": 0.6593272089958191, "avg_line_length": 22.042253494262695, "blob_id": "49629e1f9730e2482807f8900536651c2f4934a9", "content_id": "cfb0ef39755a04c075ec86ab9c535b278b7475f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1635, "license_type": "no_license", "max_line_length": 110, "num_lines": 71, "path": "/lib/QLearning2_2.py", "repo_name": "professorlust/AI-for-game-2048", "src_encoding": "UTF-8", "text": "import GameAgent as g\nfrom random import randrange, choice, random\nimport numpy as np\nimport math\nimport pickle\n#initialize the game\ngame = g.GameField(height=2, width=2, win=32)\n\n\n# array = np.zeros((7000,4))\nwith open(\"array.data\",\"r\") as f:\n\tarray = pickle.load(f)\n\nactionTranslate = ['Up', 'Left', 'Down', 'Right']\nclass QLearning(object):\n\tdef __init__(self):\n\t\tself.pool = [0,1,2,3]\n\t\tself.greedy = 0.99\n\n\tdef reset(self):\n\t\tdel self.pool\n\t\tself.pool = [0,1,2,3]\n\t\n\tdef generateAction(self, state, action=None):\n\t\tif action:\n\t\t\tself.pool.remove(action)\n\n\t\tif random()<self.greedy:\n\t\t\t#go greedy way\n\t\t\ta = array[state].argmax()\n\t\t\tif a in self.pool:\n\t\t\t\treturn a\n\t\t\telse:\n\t\t\t\treturn choice(self.pool)\n\t\telse:\n\t\t\treturn choice(self.pool)\n\n\tdef fieldToState(self, field):\n\t\toutput = 0\n\t\tfor i in range(2):\n\t\t\tfor j in range(2):\n\t\t\t\tnum = 0\n\t\t\t\tif field[i][j]:\n\t\t\t\t\tnum = int(math.log(field[i][j],2))\n\t\t\t\toutput += num*(10**(2*i+j))\n\t\treturn output\n\n\nq = QLearning()\nrecords =[]\nalpha = 0.8\n\n#repeat the game 1000 times\nfor i in range(400000):\n\t#reset the game\n\tgame.reset()\n\twhile (not game.is_win()) and (not game.is_gameover()):\n\t\tstate = q.fieldToState(game.field)\n\t\tq.reset()\n\t\taction = q.generateAction(state)\n\t\twhile not game.move(actionTranslate[action]):\n\t\t\taction = q.generateAction(state, action)\n\n\t\t#action being taken\n\t\tarray[state][action] += alpha * (game.reward()+array[q.fieldToState(game.field)].max()-array[state][action])\n\n\tprint \"time \",i,\":\",game.score\n\trecords.append((game.is_win(),game.score))\n\nwin = [i[0] for i in records]\nwin_distribute = [win[sli:(sli+1000)].count(True) for sli in range(0,400000,1000)]" } ]
6
Lee6604/kunsan
https://github.com/Lee6604/kunsan
8952b58d99948467cee88a8d3f1f7e906482428e
355b00c3790cf37cd5b3f9a6fa8c436f4cff2564
b0b1a1ad714241cbbde3a34803da1a4e3c3b5746
refs/heads/master
2022-11-17T23:55:11.504189
2020-07-10T01:48:40
2020-07-10T01:48:40
278,509,050
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 13, "blob_id": "4dd053e46e7dd9796c96a2175983f294eec5fd6b", "content_id": "9c2eb6913ad067084d254fade8cbf8a831b70c61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/wecar.py", "repo_name": "Lee6604/kunsan", "src_encoding": "UTF-8", "text": "print('wecar')\nprint('abc')\n" }, { "alpha_fraction": 0.5517241358757019, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 13.5, "blob_id": "55b87c545732b84509c05856584e86f5663fee94", "content_id": "841a2c0ec17b4821065dc2fcdfac1fd03b34580e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/master.py", "repo_name": "Lee6604/kunsan", "src_encoding": "UTF-8", "text": "print('master')\nprint('456')\n" } ]
2
mnirsb/FlowerDetection
https://github.com/mnirsb/FlowerDetection
70d6f74f842c90c8bc1260fad6d54bde1ecb5c1b
a6f025a282f10b46e387ab7eea43d59ed1472156
c397367a9e81cc1d5fec6c3beb230ae2e253dd2d
refs/heads/master
2022-12-10T10:22:47.459324
2020-08-29T11:45:03
2020-08-29T11:45:03
291,259,599
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.646576464176178, "alphanum_fraction": 0.662566602230072, "avg_line_length": 23.867347717285156, "blob_id": "cd6074d96030988e2b5b4c57a14eb78711fe0dd0", "content_id": "5c8e2c9c416a091f108b8108eebbe0c56f2453c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4878, "license_type": "no_license", "max_line_length": 137, "num_lines": 196, "path": "/Tensorflow_classification.py", "repo_name": "mnirsb/FlowerDetection", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nfrom __future__ import absolute_import,division,print_function,unicode_literals\nimport tensorflow as tf\n\nimport pandas as pd\n\n\n# In[5]:\n\n\nCSV_COLUMN.NAMES = ['SepalLength','SepalWidth','PetalLength','PetalWidth','Species']\nSPECIES =['Setosa','Versicolor','Virginica']\n\n\n# In[7]:\n\n\ntrain_path = tf.keras.utils.get_file(\"iris_training.csv\",\"https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv\")\ntest_path = tf.keras.utils.get_file(\"iris_test.csv\",\"https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv\")\n\ntrain = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\ntest = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\ntrain.head()\n\n\n# In[9]:\n\n\nfrom __future__ import absolute_import,division,print_function,unicode_literals\nimport tensorflow as tf\n\nimport pandas as pd\nCSV_COLUMN_NAMES = ['SepalLength','SepalWidth','PetalLength','PetalWidth','Species']\nSPECIES =['Setosa','Versicolor','Virginica']\ntrain_path = tf.keras.utils.get_file(\"iris_training.csv\",\"https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv\")\ntest_path = tf.keras.utils.get_file(\"iris_test.csv\",\"https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv\")\n\ntrain = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\ntest = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\ntrain.head()\n\n\n# In[10]:\n\n\ntrain_y = train.pop('Species')\ntest_y = test.pop('Species')\n\n\n# In[11]:\n\n\ntrain.head()\n\n\n# In[12]:\n\n\ntrain.shape()\n\n\n# In[13]:\n\n\ntrain.shape\n\n\n# In[14]:\n\n\ntrain.describe()\n\n\n# In[22]:\n\n\ndef input_fn(features, labels, training = True, batch_size=256):\n dataset = tf.data.Dataset.from_tensor_slices((dict(features),labels))\n if training: \n dataset = dataset.shuffle(1000).repeat()\n return dataset.batch(batch_size)\nmy_feature_columns = []\nfor key in train.keys():\n my_feature_columns.append(tf.feature_column.numeric_column(key=key))\n \nclassifier = tf.estimator.DNNClassifier(feature_columns = my_feature_columns,\n #two hidden layers of 30 and 10 nodes\n hidden_units=[30,10],\n # the model must choose between 3 classes\n n_classes=3)\n\nclassifier.train(input_fn=lambda: input_fn(train, train_y, training=True),\n steps=5000)\n\n\n# In[20]:\n\n\n\n\n\n# In[23]:\n\n\neval_result=classifier.evaluate(input_fn=lambda: input_fn(test, test_y, training=False))\nprint('\\nTest set Accuracy: {accuracy:0.3f}\\n'.format(**eval_result))\n\n\n# In[25]:\n\n\ndef input_fn(features, batch_size=256):\n return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_sizes)\n\nfeatures = ['SepalLength','SepalWidth','PetalLength','PetalWidth']\npredict = {}\n\nprint('Please Type numeric values as prompted.')\nfor feature in features:\n valid = True\n while valid:\n val = input(feature +\": \")\n if not val.isdigit(): valid = False\n \n predict[feature] = [float(val)]\npredictions = classifier.predict(input_fn = lambda: input_fn(predict))\nfor prod_dict in predictions:\n class_id = pred_dict['class_ids'][0]\n possibility = pred_dict['probabilities'][class_id]\n \n print('Prediction is \"{}\"({:.1f}%'.format(SPECIES[class_id],100*probability))\n\n\n# In[ ]:\n\n\n\n\n\n# In[29]:\n\n\ndef input_fn(features, batch_size=256):\n return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_sizes)\n\nfeatures = ['SepalLength','SepalWidth','PetalLength','PetalWidth']\npredict = {}\n\nprint('Please Type numeric values as prompted.')\nfor feature in features:\n valid = True\n while valid:\n val = input(feature +\": \")\n if not val.isdigit(): valid = False\n \n predict[feature] = [float(val)]\npredictions = classifier.predict(input_fn = lambda: input_fn(predict))\nfor pred_dict in predictions:\n class_id = pred_dict['class_ids'][0]\n probability = pred_dict['probabilities'][class_id]\n \n print('Prediction is \"{}\"({:.1f}%'.format(SPECIES[class_id],100*probability))\n\n\n# In[36]:\n\n\ndef input_fn(features, batch_sizes=256):\n return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_sizes)\n\nfeatures = ['SepalLength','SepalWidth','PetalLength','PetalWidth']\npredict = {}\n\nprint('Please Type numeric values as prompted.')\nfor feature in features:\n valid = True\n while valid:\n val = input(feature +\": \")\n if not val.isdigit(): valid = False\n \n predict[feature] = [float(val)]\n \npredictions = classifier.predict(input_fn = lambda: input_fn(predict))\nfor pred_dict in predictions:\n class_id = pred_dict['class_ids'][0]\n probability = pred_dict['probabilities'][class_id]\n \n print('Prediction is \"{}\"({:.1f}%)'.format(SPECIES[class_id],100*probability))\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 38, "blob_id": "f556a589a579f717f287c6ae6bd7031c42d6abca", "content_id": "7b8846b3cd40f83f67ecd06deae7c1c635ec722d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 78, "license_type": "no_license", "max_line_length": 59, "num_lines": 2, "path": "/README.md", "repo_name": "mnirsb/FlowerDetection", "src_encoding": "UTF-8", "text": "# FlowerDetection\nCreated Flower Detection Using TensorFlow with DNN library.\n" } ]
2
ashaychangwani/AILearnsSnake
https://github.com/ashaychangwani/AILearnsSnake
62d31c2cd2d99457b994e94c3de5461369e40dee
9ccc4cb1f8e36276ea74506afbb789684c9007c2
605a47f7cb9082476be0ef5dcb3cd8e53f6db6ce
refs/heads/master
2022-08-05T14:54:44.577716
2022-07-18T00:04:29
2022-07-18T00:04:29
219,677,058
7
0
null
null
null
null
null
[ { "alpha_fraction": 0.757383406162262, "alphanum_fraction": 0.7656638026237488, "avg_line_length": 56.507938385009766, "blob_id": "77f46551b8f8bcc4c2406ad7c2ffffe3dd32aff9", "content_id": "c5555dfd7057bc53316bdac02f3e3cbdba8bcd44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3623, "license_type": "no_license", "max_line_length": 333, "num_lines": 63, "path": "/README.md", "repo_name": "ashaychangwani/AILearnsSnake", "src_encoding": "UTF-8", "text": "# AILearnsSnake\n\nThis app was an experimental game used to create a game in Python using PyGame and then teaching an AI to play the game using an **Artificial Neural Network** and optimizing the gameplay using **Genetic Algorithm (GA)**.\n\n# Demo\n![Demo](images/demo.gif)\n___\nThe snakes look in 8 intercardinal directions, and in each direction, check for 3 things:\n 1. Whether the apple exists in the chosen direction\n 2. Whether a part of the snake's body lies in the chosen direction (and if yes, how far is it)\n 3. How far the closest boundary is, along the chosen direction.\n\nThese 3 inputs multiplied by the 8 directions give us the 24 inputs that will be fed to the neural network. \n\nThe neural network returns 3 kinds of output: \n * 1: Move forward in the previous direction\n * 2: Turn left\n * 3: Turn right\nPlease note that these directions are relative to the current direction of the snake.\n\nThere is fairly detailed documentation for each function in each of the classes. Please feel free to go through them and raise a new pull request to add features or edits that you want to edit.\n\nThere will also be Medium posts regarding the project, the process followed to build it and the intuition behind it will be explained in depth in the blogs. I'll make sure to update the links into the README.md file as soon as the blogs are published. \n\nThere are 4 files required to train the network.\n* snake.py\n* params.py\n* nn.py\n* ga.py\n\n1. *snake.py*\n\n This file contains two classes: snake and environment. \n\n The snake class is responsible for storage of all data relevant to the particular snake like the coordinates of it's head, the rest of it's body as well as results like score, time since it last ate an apple, etc.\n\n The Environment class contains information regarding the pygame frame, and is responsible for drawing the apple, snake and boundary onto the pygame frame. It also contains the game specific variables like the position of the apple at that instant, as well as generating a new apple position if the snake eats the previous apple. \n\n2. *nn.py*\n\n The nn.py file contains the NeuralNet class and contains the code for the neural network. The architecure of the neural network is as follows: \n\n 24 neurons in the input layer\n ReLU activation\n 16 neurons in the hidden layer\n Softmax activation\n 3 neurons in the output layer\n\n The 24 input and 3 outputs are explained above. The class is also responsible for generating the 24 inputs by taking information about the snake and it's surroundings and converting it into inputs that will be accepted by the neural network.\n\n3. *params.py*\n\n This file simply initializes some variables that are shared between different classes. \n\n4. *ga.py*\n\n This is the class that needs to be run in order to train the neural network and run the Genetic Algo. The GeneticAlgo class is initialized by passing all the parameters about the display, the snake, the neural network and hyperparameters for the genetic algorithm (like percentage for elitism, mutation, crossover, etc). \n\n The class starts with executing the \"runner\" function. It iterates through the number of generations to train for, and logs infromation about each generation-- like the average score, the 90th percentile score, etc.\n\n Once it has executed for all generations, it takes the top n snakes of each generation and saves them in a file so that they can be visualized later. \n\n The rest of the GA functions have been documented in depth within the ga.py file, feel free to go through them for any other clarification. " }, { "alpha_fraction": 0.5246288180351257, "alphanum_fraction": 0.529813826084137, "avg_line_length": 42.306121826171875, "blob_id": "f95af14ba7665fbc0bd391ae1487ec7a74219a66", "content_id": "37d5b5cbed4d589cdaf529e2610f859b60ff20db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4243, "license_type": "no_license", "max_line_length": 126, "num_lines": 98, "path": "/PlayGameAI.py", "repo_name": "ashaychangwani/AILearnsSnake", "src_encoding": "UTF-8", "text": "import pygame\nimport pickle\nfrom snake import Environment, snake\nfrom nn import NeuralNet\nimport time\nimport copy\nfrom params import *\n\nfile = open('saved/model.pickle', \"rb\") \nsnake_generations = pickle.load(file)\nfile.close()\n\n\n\npygame.init()\npygame.font.init()\nmyfont = pygame.font.SysFont('Bitstream Vera Serif', 20)\nscreen = pygame.display.set_mode((display_width, display_height))\nenvironment = Environment(display_height, display_width, unit)\nfor i in range(len(snake_generations)):\n snakes = snake_generations[i]\n prev_score = -1\n for j in range(len(snakes)):\n saved_snake = snakes[j]\n pygame.display.set_caption('Generation : '+str(i+1)+'\\t\\tSnake Num: '+str(j+1)+'\\t\\tPrevious Score: '+str(prev_score))\n t_snake = snake(display_width, display_height, NN_shape, unit, False)\n t_snake.neuralnet.theta = saved_snake.neuralnet.theta\n t_snake.neuralnet.bias = saved_snake.neuralnet.bias\n t_snake.neuralnet.setNextFood(\n environment.create_new_apple(t_snake.snake_position))\n screen = environment.create(screen, gray)\n screen = environment.draw_apple(screen, pink)\n screen = t_snake.draw_snake(screen, blue, cherry)\n pygame.display.update()\n checkloop = False\n start_time = time.time()\n while t_snake.isAlive():\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_q:\n t_snake.collision_with_boundary = True\n t_snake.collision_with_self = True\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n \n if (t_snake.head_x, t_snake.head_y) == environment.apple_position:\n t_snake.time_since_apple = 0\n result = t_snake.neuralnet.decision(t_snake.head_x, t_snake.head_y, t_snake.snake_position, t_snake.direction)\n t_snake.eatApple(result)\n t_snake.neuralnet.setNextFood(environment.create_new_apple(t_snake.snake_position))\n start_time = time.time()\n checkloop = False\n \n \n\n if t_snake.time_since_apple > 250:\n if not checkloop:\n checkloop = True\n any_point = (t_snake.head_x, t_snake.head_y)\n times = 0\n elif (t_snake.head_x, t_snake.head_y) == any_point:\n times += 1\n if times > 4:\n t_snake.collision_with_boundary = True\n t_snake.collision_with_self = True\n alive = False\n if time.time() - start_time > 7:\n t_snake.collision_with_boundary = True\n t_snake.collision_with_self = True\n \n \n \n result = t_snake.neuralnet.decision(\n t_snake.head_x, t_snake.head_y, t_snake.snake_position, t_snake.direction)\n\n \n if not t_snake.move(result):\n prev_score = len(t_snake.snake_position) - 1\n \n \n if t_snake.collision_with_boundary and t_snake.collision_with_self:\n print('Generation: ' + str(i+1) + '\\t\\t' + \\\n 'Snake Number: ' + str(j+1) + '\\t\\t' + \\\n 'Score: ' + str(prev_score)+'\\t\\tReason: Stuck in Loop\\t[Dead]')\n elif t_snake.collision_with_boundary:\n print('Generation: ' + str(i+1) + '\\t\\t' + \\\n 'Snake Number: ' + str(j+1) + '\\t\\t' + \\\n 'Score: ' + str(prev_score)+'\\t\\tReason: Collision With Boundary\\t[Dead]')\n else:\n print('Generation: ' + str(i+1) + '\\t\\t' + \\\n 'Snake Number: ' + str(j+1) + '\\t\\t' + \\\n 'Score: ' + str(prev_score)+'\\t\\tReason: Collision With Self\\t[Dead]')\n \n screen = environment.create(screen, gray)\n screen = environment.draw_apple(screen, pink)\n screen = t_snake.draw_snake(screen, blue, cherry)\n pygame.display.update()\n time.sleep(0.5)" }, { "alpha_fraction": 0.5147929191589355, "alphanum_fraction": 0.6745561957359314, "avg_line_length": 20.1875, "blob_id": "45694482445dc92a591e1ff372e44dc4735ae44f", "content_id": "72b87dfcdd162169ba0810bcb186624e637fad2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 35, "num_lines": 16, "path": "/params.py", "repo_name": "ashaychangwani/AILearnsSnake", "src_encoding": "UTF-8", "text": "display_width = 540\ndisplay_height = 440\nunit = 10\nNN_shape = [24, 16, 3]\ninit_NN = True\npopulation_size = 50\nno_of_generations = 100\npercentage_best_performers = 20.0 \npercentage_worst_performers = 2.0 \nmutation_percent = 7.0\nmutation_intensity = 0.1\n\ncherry = (150, 0, 0)\nblue = (106, 133, 164)\npink = (171, 54, 81)\ngray = (55, 55, 55)" }, { "alpha_fraction": 0.5758964419364929, "alphanum_fraction": 0.5819263458251953, "avg_line_length": 40.188743591308594, "blob_id": "0b5f8e57ca4e00841eee5b15e8f06990358e5f1b", "content_id": "a04971e45e55ce408fa94fe43a3fbabe44d21173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12438, "license_type": "no_license", "max_line_length": 127, "num_lines": 302, "path": "/ga.py", "repo_name": "ashaychangwani/AILearnsSnake", "src_encoding": "UTF-8", "text": "import random\nimport pickle\nimport time\nfrom snake import Environment, snake\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom params import *\n\nclass GeneticAlgo:\n\n def __init__(self, display_width, display_height, unit, NN_shape, init_NN, population_size, no_of_generations,\n percentage_best_performers, percentage_worst_performers, mutation_percent, mutation_intensity):\n \"\"\"\n Initializes an object of class GeneticAlgo with the parameters of the game. \n\n Args:\n display_width (int): The width of the frame in pixels\n display_height (int): The height of the frame in pixels\n unit (int): The size of each block of the frame in pixels\n NN_shape (list): The shape of the NeuralNetwork responsible for converting the input to outputs\n init_NN (bool): Boolean decribing whether the neural network should be initialized with random wieghts\n population_size (int): Number of objects in each generation\n no_of_generations (int): Number of generations to run the neural net\n percentage_best_performers (int): Percentage of top performers of the previous generation to be used for elitism\n percentage_worst_performers (int): Percentage of worst performers of the previous generation to be used for elitism\n mutation_percent (int): Percentage chance of mutation of each member in weight matrix\n mutation_intensity (int): Intensity of mutation (magnitude of change in the weights)\n \"\"\"\n\n self.display_width = display_width\n self.display_height = display_height\n self.unit = unit\n self.NN_shape = NN_shape\n self.init_NN = init_NN\n self.population_size = population_size\n self.no_of_generations = no_of_generations\n self.percentage_best_performers = percentage_best_performers\n self.percentage_worst_performers = percentage_worst_performers\n self.mutation_percent = mutation_percent\n self.mutation_intensity = mutation_intensity\n\n def run(self, snakes, environment):\n \"\"\"Runs the snake for a single generation.\n\n Args:\n snakes (list of type snake): List of all the snakes of the current generation to be run.\n environment (object): Object of type environment\n\n Returns:\n average of all scores\n 90th percentile scores\n \"\"\"\n\n i = 1\n scores = []\n\n generation_seed = random.random()\n for snake in snakes:\n start_time = time.time()\n checkloop = False\n self.progress(i/self.population_size, 30)\n random.seed(generation_seed)\n apple_position = environment.create_new_apple(snake.snake_position)\n snake.neuralnet.setNextFood(apple_position)\n\n while(snake.isAlive()):\n if (snake.head_x, snake.head_y) == environment.apple_position:\n snake.time_since_apple = 0\n result = snake.neuralnet.decision(\n snake.head_x, snake.head_y, snake.snake_position, snake.direction)\n snake.eatApple(result)\n start_time = time.time()\n snake.neuralnet.setNextFood(\n environment.create_new_apple(snake.snake_position))\n checkloop = False\n\n if snake.time_since_apple > 250: # could be tuned\n if not checkloop:\n checkloop = True\n any_point = (snake.head_x, snake.head_y)\n times = 0\n elif (snake.head_x, snake.head_y) == any_point:\n times += 1\n if times > 2:\n snake.collision_with_boundary = True\n snake.collision_with_self = True\n\n if time.time() - start_time > 0.5:\n snake.collision_with_boundary = True\n snake.collision_with_self = True\n\n result = snake.neuralnet.decision(\n snake.head_x, snake.head_y, snake.snake_position, snake.direction)\n\n if snake.move(result) == False:\n break\n random.seed()\n scores.append(len(snake.snake_position) - 1)\n i += 1\n print(\"\\nAverage: %.2f \\n90th percentile: %.2f\" %\n (np.average(scores), np.percentile(scores, 90)))\n return np.average(scores), np.percentile(scores, 90)\n\n def print_top(self, snakes):\n \"\"\"Prints information (number, score, and reason for death) about the top snakes in each generation\n\n Args:\n snakes (list): List of objects (of type snake) of the top for current generation\n \"\"\"\n i = 0\n for snake in snakes:\n i += 1\n print('snake ', i, ', score : ', len(snake.snake_position)\n - 1, end='\\t')\n if snake.collision_with_self and snake.collision_with_boundary:\n print('stuck in loop')\n elif snake.collision_with_boundary and not snake.collision_with_self:\n print('crashed wall')\n else:\n print('crashed body')\n\n def save(self, snakes, filename):\n \"\"\"Saves the top snakes from every generation into a pickle file to be loaded in the gui.py file\n\n Args:\n snakes (list): List of top snakes of every generation\n filename (str): String representing filename of the output file\n \"\"\"\n f = open(filename, \"wb\")\n pickle.dump(snakes, f)\n f.close()\n\n def cloneOfParents(self, parents):\n \"\"\"Creates clones of parents selected for elitism to be added to the next generation\n\n Args:\n parents (list): List of parents selected for elitism\n\n Returns:\n [list]: List of the clones of the input snakes\n \"\"\"\n snakes = []\n for parent in parents:\n babySnake = snake(self.display_width, self.display_height,\n self.NN_shape, self.unit,\n False)\n babySnake.neuralnet.theta = parent.neuralnet.theta\n babySnake.neuralnet.bias = parent.neuralnet.bias\n snakes.append(babySnake)\n return snakes\n\n def elitism(self, snakes):\n \"\"\"Selects top performing parents for elitism (along with a few bottom performers for variance)\n\n Args:\n snakes (list): List of all snakes in the generation sorted by their scores\n\n Returns:\n [list]: List of parents that have been selected for elitism and cloned for future generation\n \"\"\"\n parents = []\n num_top = int(self.population_size *\n self.percentage_best_performers / 100)\n num_bottom = int(self.population_size *\n self.percentage_worst_performers / 100)\n\n parents.extend(self.cloneOfParents(snakes[:num_top]))\n parents.extend(self.cloneOfParents(snakes[-num_bottom:]))\n return parents, num_top, num_bottom\n \n def create_new_pop(self, snakes):\n \"\"\"Function to create the new generation using the parents from the previous generation\n\n Args:\n snakes (list): List of all snakes from the previous generation\n\n Returns:\n [list]: List of snakes that represent the next generation\n \"\"\"\n parents, num_top, num_bottom = self.elitism(snakes)\n children = self.offspringGeneration(\n parents, self.population_size - num_top - num_bottom)\n\n children = self.mutate(children)\n parents.extend(children)\n return parents\n\n def crossOver(self, parent1, parent2):\n \"\"\"Performs crossover function of genetic algos\n\n Args:\n parent1 (snake): Input parent 1\n parent2 (snake): Input parent 2\n\n Returns:\n [snake]: Returns the child born from crossover of the two input parents\n \"\"\"\n child = snake(self.display_width, self.display_height,\n self.NN_shape, self.unit)\n for i in range(len(parent1.neuralnet.theta)):\n for j in range(parent1.neuralnet.theta[i].shape[0]):\n for k in range(parent1.neuralnet.theta[i].shape[1]):\n child.neuralnet.theta[i][j, k] = random.choice([\n parent1.neuralnet.theta[i][j, k],\n parent2.neuralnet.theta[i][j, k]])\n\n for j in range(parent1.neuralnet.bias[i].shape[1]):\n child.neuralnet.bias[i][0, j] = random.choice(\n [parent1.neuralnet.bias[i][0, j],\n parent2.neuralnet.bias[i][0, j]\n ]\n )\n return child\n\n def offspringGeneration(self, parents, no_of_children):\n \"\"\"Generates the rest of the population after elitism is done by perfoming crossover\n on the parents until the members of the next generation is equal to the specified \n population\n\n Args:\n parents (list): List of snakes that have been selected via elitism\n no_of_children (int): Number of snakes that are to be generated via crossover\n\n Returns:\n [list]: List of all the snakes of the next generation produced via crossover\n \"\"\"\n all_children = []\n for _ in range(no_of_children):\n parent1 = random.choice(parents)\n parent2 = random.choice(parents)\n\n all_children.append(self.crossOver(parent1, parent2))\n\n return all_children\n\n def mutate(self, children):\n \"\"\"Performs mutation task of Genetic Algos on the snakes in order to increase variety\n\n Args:\n children (list): List of all snakes in current generation (produced via elitism + crossover)\n\n Returns:\n [list]: List of all snakes in current generation after mutation is complete\n \"\"\"\n for child in children:\n for W in child.neuralnet.theta:\n for _ in range(int(W.shape[0] * W.shape[1] * self.mutation_percent/100)):\n row = random.randint(0, W.shape[0]-1)\n col = random.randint(0, W.shape[1]-1)\n W[row][col] += random.uniform(-self.mutation_intensity,\n self.mutation_intensity)\n return children\n\n def runner(self):\n \"\"\"\n Main function of the GeneticAlgo Class that evaluates the result for each generation of\n and populates the next generation along.\n Prints the graph of the average and 90th percentile score for each generation to identify\n ideal early stopping point\n \"\"\"\n snakes = [snake(self.display_width, self.display_height, self.NN_shape,\n self.unit) for _ in range(self.population_size)]\n environment = Environment(self.display_height, self.display_width, self.unit)\n top_snakes = []\n averages = []\n percentile = []\n for i in range(self.no_of_generations):\n print('GENERATION: ', i+1, end='\\n')\n avg, ptile = self.run(snakes, environment)\n averages.append(avg)\n percentile.append(ptile)\n\n snakes.sort(key=lambda x:\n len(x.snake_position), reverse=True)\n\n self.print_top(snakes[0:5])\n\n top_snakes.append(snakes[:3])\n\n snakes = self.create_new_pop(snakes)\n self.save(top_snakes, \"saved/test.pickle\")\n plt.plot(averages)\n plt.plot(percentile)\n plt.show()\n\n def progress(self, percent, length):\n \"\"\"Creates a progress bar to check progress of current generation\n\n Args:\n percent (int): Percentage that is complete\n length (int): Length of the progress bar\n \"\"\"\n hashes = round(percent*length)\n print('\\r', '*'*hashes + '_'*(length - hashes),\n '[{:.2%}]'.format(percent), end='')\n\n\nif __name__ == '__main__':\n ga = GeneticAlgo(display_width, display_height, unit, NN_shape, init_NN, population_size, no_of_generations,\n percentage_best_performers, percentage_worst_performers, mutation_percent, mutation_intensity)\n\n ga.runner()" }, { "alpha_fraction": 0.5439071655273438, "alphanum_fraction": 0.5510203838348389, "avg_line_length": 35.56346893310547, "blob_id": "851d9ea62b33df9b78d1bd0c145d8a5eac404f00", "content_id": "6f4b241eb70c3fb1d486eebfd1f458b3cb8e80ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11809, "license_type": "no_license", "max_line_length": 163, "num_lines": 323, "path": "/snake.py", "repo_name": "ashaychangwani/AILearnsSnake", "src_encoding": "UTF-8", "text": "from pygame import draw, image, transform\nfrom nn import NeuralNet\nimport random\n\n\nclass Environment:\n def __init__(self, display_height, display_width, unit_size):\n \"\"\"Creates an object of type Environment.\n\n Args:\n display_height (int): Height of display in pixels.\n display_width (int): Width of display in pixels.\n unit_size (int): Size of each block in pixels.\n \"\"\"\n self.display_height = display_height\n self.display_width = display_width\n self.unit = unit_size\n self.apple_position = (0, 0)\n\n def draw_apple(self, environment, color):\n \"\"\"Draw apple on the screen\n\n Args:\n environment (object): Instance of type Environment\n color (tuple): RGB values of colour\n\n Returns:\n environment: Returns instance of type Environment after drawing apple\n \"\"\"\n apple_image = image.load('apple.png')\n apple_image = transform.scale(apple_image, (10, 10))\n environment.blit(\n apple_image, (self.apple_position[0], self.apple_position[1], self.unit, self.unit))\n return environment\n\n def draw_boundary(self, environment, color):\n \"\"\"Draws boundary on the screen\n\n Args:\n environment (object): Instance of type Environment\n color (tuple): RGB values of colour\n \"\"\"\n unit = self.unit\n for w in range(0, self.display_width, self.unit):\n draw.rect(environment, color, (w, 0, unit, unit))\n draw.rect(environment, color,\n (w, self.display_height - unit, unit, unit))\n for h in range(0, self.display_height, self.unit):\n draw.rect(environment, color, (0, h, unit, unit))\n draw.rect(environment, color,\n (self.display_width - unit, h, unit, unit))\n\n def create(self, environment, color):\n \"\"\"Initialize the environment and draw boundaries\n\n Args:\n environment (object): Instance of type Environment\n color (tuple): RGB values of colour\n\n Returns:\n environment: Returns instance of type Environment after drawing apple\n \"\"\"\n environment.fill((200, 200, 200))\n self.draw_boundary(environment, color)\n return environment\n\n def create_new_apple(self, snake_position):\n \"\"\"Creates new apple, checks that the new apple does not appear on the body of the snake\n\n Args:\n snake_position (list): List of the snake body coordinates\n\n Returns:\n list: Coordinates of new apple position\n \"\"\"\n unit = self.unit\n apple_position = (unit*random.randint(2, self.display_width/unit - 2),\n unit*random.randint(2, self.display_height/unit - 2))\n while any(body == apple_position for body in snake_position):\n apple_position = (unit*random.randint(2, self.display_width/unit - 2),\n unit*random.randint(2, self.display_height/unit - 2))\n self.apple_position = apple_position\n return self.apple_position\n\nclass snake:\n def __init__(self, display_width, display_height, NN_shape, unit, init_NN=True, random_start=True):\n \"\"\"Initializes an object of type snake\n\n Args:\n display_height (int): Height of display in pixels.\n display_width (int): Width of display in pixels.\n NN_shape (list): Shape of neural network architecure\n unit_size (int): Size of each block in pixels.\n init_NN (bool, optional): Initalize neural network with random weights. Defaults to True.\n random_start (bool, optional): Start the snake position randomly or at predefined location. Defaults to True.\n \"\"\"\n self.snake_position = []\n self.display_width = display_width\n self.display_height = display_height\n self.time_since_apple = 0\n self.collision_with_boundary = False\n self.collision_with_self = False\n self.unit = unit\n\n self.neuralnet = NeuralNet(\n NN_shape, self.display_width, self.display_height, self.unit, init_NN)\n\n self.snake_position.append(self.initSnake(random_start))\n\n def initSnake(self, random_start):\n \"\"\"Set the start position and direction of snake\n\n Args:\n random_start (bool): Describes whether the snake should start randomly or\n\n Returns:\n tuple: X and Y coordinates of snake_head (starting position)\n \"\"\"\n if random_start:\n self.direction = random.choice(['RIGHT', 'UP', 'DOWN', 'LEFT'])\n self.head_x = random.randint(\n 3, self.display_width / self.unit - 3) * self.unit\n self.head_y = random.randint(\n 3, self.display_height / self.unit - 3) * self.unit\n else:\n self.direction = 'RIGHT'\n self.head_x, self.head_y = 40, 40\n return (self.head_x, self.head_y)\n\n def isAlive(self):\n \"\"\"Check if snake is alive\n \n Returns:\n bool: True if alive, False otherwise\n \"\"\"\n if not self.collision_with_self and not self.collision_with_boundary:\n return True\n return False\n \n def eatApple(self, direction):\n \"\"\"Add the location to snake body and increase snake size by 1\n\n Args:\n direction (str): Direction of movement after eating apple\n \"\"\"\n self.snake_position.insert(0, (self.head_x, self.head_y))\n self.move(direction)\n \n def eatAppleHuman(self, direction):\n \"\"\"Eat Apple method but for player playing the game instead of AI\n\n Args:\n direction (str): Direction of movement after eating apple\n \"\"\"\n self.snake_position.insert(0, (self.head_x, self.head_y))\n self.moveHuman(direction)\n\n def moveInDirection(self, direction):\n \"\"\"Move the snake in a particular direction, if chosen direction is valid. Else keep moving in current direction.\n\n Args:\n direction (str): Direction chosen by user\n \"\"\"\n if direction == 'UP':\n self.head_y = self.head_y - self.unit\n elif direction == 'DOWN':\n self.head_y = self.head_y + self.unit\n elif direction == 'LEFT':\n self.head_x = self.head_x - self.unit\n else:\n self.head_x = self.head_x + self.unit\n self.direction = direction\n self.snake_position.insert(0, (self.head_x, self.head_y))\n self.snake_position.pop()\n self.check_valid()\n\n def check_valid(self):\n \"\"\"Check if the snake is alive / has crashed into it's own body or boundary\n \"\"\"\n if self.head_x == self.unit or self.head_x == self.display_width - self.unit or self.head_y == self.unit or self.head_y == self.display_height - self.unit:\n self.collision_with_boundary = True\n for (body_x, body_y) in self.snake_position[1:]:\n if body_x == self.head_x and body_y == self.head_y:\n self.collision_with_self = True\n\n def move(self, result):\n \"\"\"Move the snake in a chosen direction\n\n Args:\n result (int): Direction chosen by the AI for movement of the snake\n\n Returns:\n bool: Describes whether or not snake is alive after movement\n \"\"\"\n if self.direction == 'UP':\n if result == 1:\n self.moveInDirection('UP')\n elif result == 2:\n self.moveInDirection('LEFT')\n else:\n self.moveInDirection('RIGHT')\n elif self.direction == 'RIGHT':\n if result == 1:\n self.moveInDirection('RIGHT')\n elif result == 2:\n self.moveInDirection('UP')\n else:\n self.moveInDirection('DOWN')\n elif self.direction == 'DOWN':\n if result == 1:\n self.moveInDirection('DOWN')\n elif result == 2:\n self.moveInDirection('RIGHT')\n else:\n self.moveInDirection('LEFT')\n else:\n if result == 1:\n self.moveInDirection('LEFT')\n elif result == 2:\n self.moveInDirection('DOWN')\n else:\n self.moveInDirection('UP')\n self.time_since_apple += 1\n return self.isAlive()\n\n def moveHuman(self, result):\n \"\"\"Move the snake in a chosen direction for player, not for AI\n\n Args:\n result (int): Direction chosen by the player for movement of the snake\n\n Returns:\n bool: Describes whether or not snake is alive after movement\n \"\"\"\n if self.direction == 'UP':\n if result == 1:\n self.moveInDirection('UP')\n elif result == 2:\n self.moveInDirection('LEFT')\n elif result == 3:\n self.moveInDirection('RIGHT')\n elif self.direction == 'RIGHT':\n if result == 1:\n self.moveInDirection('UP')\n elif result == 3:\n self.moveInDirection('RIGHT')\n elif result == 4:\n self.moveInDirection('DOWN')\n elif self.direction == 'DOWN':\n if result == 2:\n self.moveInDirection('LEFT')\n elif result == 3:\n self.moveInDirection('RIGHT')\n elif result == 4:\n self.moveInDirection('DOWN')\n elif self.direction == 'LEFT':\n if result == 1:\n self.moveInDirection('UP')\n elif result == 2:\n self.moveInDirection('LEFT')\n elif result == 4:\n self.moveInDirection('DOWN')\n elif result!=0:\n self.moveInDirection(self.direction)\n return self.isAlive()\n \n def convAIToDirections(self, result):\n \"\"\"Convert relative integer output by AI helper into absolute direction for the\n\n Args:\n result ([int]): Direction as output by the AI helper \n\n Returns:\n str : Absolute direction \n \"\"\"\n if self.direction == 'UP':\n if result == 1:\n return 'UP'\n elif result == 2:\n return 'LEFT'\n else:\n return 'RIGHT'\n elif self.direction == 'RIGHT':\n if result == 1:\n return 'RIGHT'\n elif result == 2:\n return 'UP'\n else:\n return 'DOWN'\n elif self.direction == 'DOWN':\n if result == 1:\n return 'DOWN'\n elif result == 2:\n return 'RIGHT'\n else:\n return 'LEFT'\n else:\n if result == 1:\n return 'LEFT'\n elif result == 2:\n return 'DOWN'\n else:\n return 'UP'\n \n def draw_snake(self, environment, color, color_head):\n \"\"\"Draws the snake on the environment\n\n Args:\n environment (object): Instance of class environment\n color (tuple): RGB values of color of snake\n color_head (tuple): RGB values of color of snake\n\n Returns:\n environment: Returns the environment after the snake has been drawn\n \"\"\"\n l = self.unit\n for (x, y) in self.snake_position[1:]:\n draw.rect(environment, color, (x, y, l, l), 1)\n draw.rect(environment, color, (x+2, y+2, l-4, l-4))\n draw.rect(environment, color_head, (self.head_x, self.head_y, l, l), 1)\n draw.rect(environment, color_head,\n (self.head_x+2, self.head_y+2, l-4, l-4))\n return environment" }, { "alpha_fraction": 0.5454732775688171, "alphanum_fraction": 0.5561971664428711, "avg_line_length": 36.011451721191406, "blob_id": "afb407fa2c0603daa4cc0479bd288bab30ac8ed7", "content_id": "19a72db86dc235ec3a2ac6956cabf5fc60617621", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9698, "license_type": "no_license", "max_line_length": 128, "num_lines": 262, "path": "/nn.py", "repo_name": "ashaychangwani/AILearnsSnake", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\n\n\nclass NeuralNet:\n def __init__(self, NN_shape, display_width, display_height, unit, init_NN=True):\n \"\"\"Initializes a class of type NeuralNet.\n\n Args:\n NN_shape (list): Shape of the neural network architecure\n display_width (int): Width of display in pixels\n display_height (int): Height of display in pixels\n unit (int): Size of each unit \n init_NN (bool, optional): Whether the neural network should be initalized with random weights. Defaults to True.\n \"\"\"\n self.display_width = display_width\n self.display_height = display_height\n self.unit = unit\n self.apple_position = ()\n self.theta = []\n self.bias = []\n if init_NN:\n self.initialize_weights(NN_shape)\n \n def sigmoid(self, mat):\n \"\"\"Performs sigmoid operation\n\n Args:\n mat (matrix): Input matrix\n\n Returns:\n [matrix]: result which is sigmoid(matrix)\n \"\"\"\n return 1.0 / (1.0 + np.exp(-mat))\n\n def relu(self, mat):\n \"\"\"Performs ReLU operation\n\n Args:\n mat (matrix): Input matrix\n\n Returns:\n [matrix]: result which is ReLU(matrix)\n \"\"\"\n return mat * (mat > 0)\n\n def softmax(self, mat):\n \"\"\"Performs Softmax operation\n\n Args:\n mat (matrix): Input matrix\n\n Returns:\n [matrix]: result which is softmax(matrix)\n \"\"\"\n mat = mat - np.max(mat)\n return np.exp(mat) / np.sum(np.exp(mat), axis=1)\n\n def setNextFood(self, apple_position):\n \"\"\"Sets the next location for the apple\n\n Args:\n apple_position ([list]): List of x and y coordinates of apple\n \"\"\"\n self.apple_position = apple_position\n\n def appleSense(self, x, y, dX, dY, foodX, foodY):\n \"\"\"Check if apple is present along current direction\n\n Args:\n x ([int]): X coordinate of snake_head\n y ([int]): Y coordinate of snake_head\n dX ([int]): Direction of movement of snake in x-direction\n dY ([int]): Direction of movement of snake in y-direction\n foodX ([int]): X coordinate of food\n foodY ([int]): Y coordinate of food\n\n Returns:\n [boolean]: Represents 1 if food is present along path, else 0\n \"\"\"\n if dX == 0:\n if foodX - x == 0 and (foodY - y)/dY > 0:\n return 1\n elif dY == 0:\n if foodY - y == 0 and (foodX - x)/dX > 0:\n return 1\n else:\n if (foodX - x)/dX == (foodY - y)/dY and (foodY - y)/dY > 0:\n return 1\n return 0\n\n def bodyCalculation(self, x, y, dX, dY, x2, y2):\n \"\"\"Checks if specified part of snake's body is present along chosen direction\n\n Args:\n x ([int]): X coordinate of selected body part of snake\n y ([int]): Y coordinate of snake_head\n dX ([int]): Direction of movement of snake in x-direction\n dY ([int]): Direction of movement of snake in y-direction\n x2 ([int]): [description]\n y2 ([int]): Y coordinate of selected body part of snake\n\n Returns:\n [type]: [description]\n \"\"\"\n if dX == 0:\n if x2 - x == 0 and (y2 - y)/dY > 0:\n return (y2 - y)/dY\n elif dY == 0:\n if y2 - y == 0 and (x2 - x)/dX > 0:\n return (x2 - x)/dX\n else:\n if (x2 - x)/dX == (y2 - y)/dY and (y2 - y)/dY > 0:\n return (x2 - x)/dX\n return 10000\n\n def bodySense(self, x, y, dX, dY, snake_position):\n \"\"\"Check if any part of the body of the snake exists along chosen \n direction\n\n Args:\n x ([int]): X coordinate of selected body part of snake\n y ([int]): Y coordinate of snake_head\n dX ([int]): Direction of movement of snake in x-direction\n dY ([int]): Direction of movement of snake in y-direction\n snake_position ([list]): List of the body parts of the snake\n\n Returns:\n [int]: Normalized distance between snake_head and closest part of the snake's body\n along chosen direction\n \"\"\"\n minDist = 10000\n for (body) in snake_position[1:]:\n minDist = min(minDist, self.bodyCalculation(x, y, dX, dY, body[0], body[1]))\n if minDist == 10000:\n return 0\n return 1/minDist\n\n def sense_in_direction(self, x, y, dX, dY, foodX, foodY, snake_position):\n \"\"\"Sense for apple and body parts in selected direction\n\n Args:\n x ([int]): X coordinate of selected body part of snake\n y ([int]): Y coordinate of snake_head\n dX ([int]): Direction of movement of snake in x-direction\n dY ([int]): Direction of movement of snake in y-direction\n foodX ([int]): X coordinate of apple\n foodY ([int]): Y coordinate of apple\n snake_position ([list]): list of the positions of the snake's body\n\n Returns:\n [list]: 2 values containing results for apple and body part respectively\n \"\"\"\n input = [0, 0]\n input[0] = self.appleSense(x, y, dX, dY, foodX, foodY)\n input[1] = self.bodySense(x, y, dX, dY, snake_position)\n return input\n\n def checkForZero(self, x):\n \"\"\"Checks for 0 to avoid division by 0 errors\n\n Args:\n x ([int]): Input\n\n Returns:\n [int]: Output\n \"\"\"\n if x == 0:\n return 1\n return x\n \n def make_input(self, x, y, foodX, foodY, snake_position, direction):\n \"\"\"Function to sense in all directions and produce the input for the neural network\n\n Args:\n x ([int]): x coordinate of snake head\n y ([int]): y coordinate of snake head\n foodX ([int]): x coordinate of food\n foodY ([int]): y coordinate of food\n snake_position ([list]): List of coordinates of snake's body\n direction ([int]): previous direction\n\n Returns:\n [list]: List of length 24 representing the 3 inputs in each of 8 directions \n \"\"\"\n input = []\n\n input.extend(self.sense_in_direction(x, y, 0, -self.unit, foodX,foodY, snake_position)) \n input.extend([self.unit/self.checkForZero((y-self.unit))])\n \n\n input.extend(self.sense_in_direction(x, y, self.unit, -self.unit, foodX, foodY, snake_position))\n input.extend([self.unit/self.checkForZero(min(y - self.unit, self.display_width - self.unit - x))])\n\n input.extend(self.sense_in_direction(x, y, self.unit, 0, foodX, foodY, snake_position))\n input.extend([self.unit/self.checkForZero((self.display_width - self.unit - x))])\n\n input.extend(self.sense_in_direction(x, y, self.unit,self.unit, foodX, foodY, snake_position))\n input.extend([self.unit/self.checkForZero(min(self.display_height - self.unit -y, self.display_width - self.unit - x))])\n\n input.extend(self.sense_in_direction(x, y, 0, self.unit, foodX, foodY, snake_position))\n input.extend([self.unit/self.checkForZero((self.display_height - self.unit -y))])\n\n input.extend(self.sense_in_direction(x, y, -self.unit,self.unit, foodX, foodY, snake_position))\n input.extend([self.unit/self.checkForZero(min(x - self.unit, self.display_height - self.unit -y))])\n\n input.extend(self.sense_in_direction(x, y, -self.unit, 0, foodX, foodY, snake_position))\n input.extend([self.unit/self.checkForZero((x - self.unit))])\n\n input.extend(self.sense_in_direction(x, y, -self.unit, -self.unit, foodX, foodY, snake_position))\n input.extend([self.unit/self.checkForZero(min((y-self.unit), (x - self.unit)))])\n \n if(direction == 'RIGHT'):\n input = input[6:] + input[:6]\n elif (direction == 'DOWN'):\n input = input[12:] + input[:12]\n elif (direction == 'LEFT'):\n input = input[18:] + input[:18]\n \n return input \n\n def initialize_weights(self, NN_shape):\n \"\"\"Initialize weights of the neural network\n\n Args:\n NN_shape ([list]): Shape of the neural network\n \"\"\"\n for i in range(len(NN_shape)-1):\n theta = np.random.uniform(-0.5, 0.5,\n (NN_shape[i], NN_shape[i+1]))\n self.theta.append(theta)\n\n bias = np.random.uniform(-0.1, 0.1, (1, NN_shape[i+1]))\n self.bias.append(bias)\n\n def decision(self, x, y, snake_position, direction):\n \"\"\"Run inputs through neural network to get the output as the decision of \n \n\n Args:\n x (int): X coordinate of snake_head\n y (int): Y Coordinate of snake_head\n snake_position (list): List of the coordinates of snake's body\n direction (str): String representing the previous direction \n\n Returns:\n int: Integer output of the neural network\n \"\"\"\n foodX, foodY = self.apple_position\n input = self.make_input(x, y, foodX, foodY, snake_position, direction)\n \n input = np.array(input)\n outputs = []\n output = input\n for i in range(len(self.theta) - 1):\n output = self.relu(np.dot(output, self.theta[i]) + self.bias[i])\n outputs.append(output)\n output = self.softmax(\n np.dot(output, self.theta[i+1]) + self.bias[i+1])\n outputs.append(output)\n result = np.argmax(outputs[-1]) + 1\n return result\n\n" }, { "alpha_fraction": 0.6758649349212646, "alphanum_fraction": 0.6828665733337402, "avg_line_length": 31.386667251586914, "blob_id": "f95523281e6749c64c59433baa1b9503e2798882", "content_id": "b34c04caf7ac9d2928d55cc4ea722b8da87a2987", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2428, "license_type": "no_license", "max_line_length": 91, "num_lines": 75, "path": "/PlayGameHuman.py", "repo_name": "ashaychangwani/AILearnsSnake", "src_encoding": "UTF-8", "text": "import pygame\nimport pickle\nfrom snake import Environment, snake\nfrom nn import NeuralNet\nimport time\nimport copy\nfrom params import *\n\n\n\nfile = open('saved/test.pickle', \"rb\") \nsnake_generations = pickle.load(file)\nfile.close()\n\nbest_snake = snake_generations[len(snake_generations)-1][0]\nclock = pygame.time.Clock()\npygame.init()\npygame.font.init()\nmyfont = pygame.font.SysFont('Bitstream Vera Serif', 20)\nscreen = pygame.display.set_mode((display_width, display_height))\nenvironment = Environment(display_height, display_width, unit)\nplayer = snake(display_width, display_height, NN_shape, unit, False)\nplayer.neuralnet.theta = []\nplayer.neuralnet.bias = []\nplayer.neuralnet.setNextFood(\n environment.create_new_apple(player.snake_position))\nscreen = environment.create(screen, gray)\nscreen = environment.draw_apple(screen, pink)\nscreen = player.draw_snake(screen, blue, cherry)\npygame.display.update()\n\nscore = 0\ndecision = 0\nwhile(player.isAlive()):\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n decision = 2\n elif event.key == pygame.K_RIGHT:\n decision = 3\n elif event.key == pygame.K_DOWN:\n decision = 4\n else:\n decision = 1\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n \n if (player.head_x, player.head_y) == environment.apple_position:\n player.eatAppleHuman(decision)\n player.neuralnet.setNextFood(environment.create_new_apple(player.snake_position))\n score+=1\n \n\n\n player.moveHuman(decision)\n screen = environment.create(screen, gray)\n screen = environment.draw_apple(screen, pink)\n screen = player.draw_snake(screen, blue, cherry)\n prediction = player.convAIToDirections(best_snake.neuralnet.decision(\n player.head_x, player.head_y, player.snake_position, player.direction))\n\n pygame.display.set_caption('Score: '+str(score)+'\\t\\tAI recommends moving '+prediction)\n pygame.display.update()\n clock.tick(6)\n \npygame.display.update() \nlargeText=pygame.font.Font('freesansbold.ttf',30)\nTextSurf=largeText.render(str(\"Your final score is \"+str(score)),True,pink)\nTextRect=TextSurf.get_rect()\nTextRect.center=((display_width/2),(display_height/2))\nscreen.blit(TextSurf,TextRect)\npygame.display.update()\ntime.sleep(2)\npygame.quit()" } ]
7
danielcorreia96/MOTSD
https://github.com/danielcorreia96/MOTSD
65527dd6c8f3815d8747d72a881717de48f94b6c
5e0690579440560928b3b76800f5faa0c8f41a7c
8bbda0c9d3ef3b8246b3518cd738fa4f03d07e61
refs/heads/master
2020-05-23T06:20:38.273944
2019-11-04T19:04:06
2019-11-04T19:07:52
186,664,489
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6899879574775696, "alphanum_fraction": 0.6924004554748535, "avg_line_length": 24.121212005615234, "blob_id": "147ede1aa9cff9e2076c29a2dec7a7aca47c7492", "content_id": "e6457d3f949574e0085523efeef5bb22bf7e6148", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 829, "license_type": "permissive", "max_line_length": 69, "num_lines": 33, "path": "/backend/integrations/svn_utils.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport datetime\nimport os.path\nfrom pathlib import Path\n\nimport svn.local\nfrom joblib import Memory\n\nmemory = Memory(Path(\"data\"), verbose=1)\n\n\[email protected]\ndef get_log(branch, from_dt, to_dt):\n repo_path = os.path.abspath(branch)\n client = svn.local.LocalClient(path_=repo_path)\n\n log = client.log_default(\n timestamp_from_dt=datetime.datetime.fromisoformat(from_dt),\n timestamp_to_dt=datetime.datetime.fromisoformat(to_dt),\n changelist=True,\n )\n return [log_e for log_e in log]\n\n\[email protected]()\ndef get_log_for_revision(branch, revision):\n repo_path = os.path.abspath(branch)\n client = svn.local.LocalClient(path_=repo_path)\n\n log = client.log_default(\n revision_from=revision, revision_to=revision, changelist=True\n )\n return [log_e for log_e in log]\n" }, { "alpha_fraction": 0.6333457827568054, "alphanum_fraction": 0.6348377466201782, "avg_line_length": 29.465909957885742, "blob_id": "821fa2555106e486b2e2c2e1b6005bd9bf5fcf81", "content_id": "faca34d29770f9c836031dd394a29932962a9f46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2681, "license_type": "permissive", "max_line_length": 95, "num_lines": 88, "path": "/cov_profiler.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport json\nimport time\nimport subprocess\nimport re\nimport click\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected](\"run\")\[email protected](\"config_file\", type=click.Path(exists=True, readable=True))\ndef run_profiler_for_config(config_file):\n \"\"\"\n Run OpenCover profiler for a given configuration file\n\n :param config_file: path to the configuration file\n \"\"\"\n # Load config file\n with open(config_file, mode=\"r\") as demo_file:\n config = json.load(demo_file)\n test_lists = config[\"runlists\"]\n\n with open(f\"{config['branch']}_log_profiler.txt\", mode=\"a\") as log_file:\n for testlist in test_lists:\n run_coverage_profiler(config, testlist, log_file)\n\n\ndef run_coverage_profiler(config, testlist, log_file):\n \"\"\"\n Run OpenCover profiler for a given list of tests and configuration file\n\n :param config: path to configuration file\n :param testlist: path to file with list of tests to run\n :param log_file: path to logging file\n \"\"\"\n def write_log(message):\n print(message)\n log_file.write(message + \"\\n\")\n log_file.flush()\n\n write_log(testlist)\n command, testlist_id = get_opencover_args(config, testlist)\n write_log(f\"Command: {command} --> Output: {testlist_id}\")\n\n # Run and profile tests with OpenCover\n start = time.perf_counter()\n subprocess.call(command)\n end = time.perf_counter()\n\n write_log(f\"Run for {testlist_id}: {(end - start) / 60} minutes\")\n\n\ndef get_opencover_args(config, testlist):\n \"\"\"\n Builds an OpenCover command according to the configuration file and list of tests provided.\n\n :param config: path to configuration file\n :param testlist: path to file with list of tests to run\n :return: an OpenCover command and the id of the list of tests\n \"\"\"\n # Load relevant data from config\n args = [\n f\" -target: {config['runner']}\",\n f\" -targetargs:{' '.join(config['runner_args'])} {testlist}\",\n f\" -threshold:{config['threshold']} \",\n \" -hideskipped:All \",\n \" -mergebyhash \",\n # \" -skipautoprops \",\n f\" -filter:{' '.join(config['filters'])} \",\n f\" -coverbytest:{';'.join(config['cover_by_test'])} \",\n f\" -searchdirs: {config['searchdirs_path']} \",\n \" -register:user \",\n ]\n testlist_id = re.search(re.escape(config[\"runlists_path\"]) + r\"(.*).in\", testlist).group(1)\n\n # Build OpenCover command with arguments\n command = [config[\"opencover_exec\"]]\n command.extend(args)\n command.append(f\"-output:{config['reports_path']}refactor_{testlist_id}.xml\")\n return command, testlist_id\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.5686016082763672, "alphanum_fraction": 0.583113431930542, "avg_line_length": 23.45161247253418, "blob_id": "c2116d6a4c5b1ae67352f537f9381984b05005a3", "content_id": "934e7c4620577b049cc6a9cd5ebe461f4b6423b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "permissive", "max_line_length": 74, "num_lines": 31, "path": "/get_csv_lines.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport pickle\nimport backend\nimport os\nimport click\n\nfrom backend.evaluation.summary import ResultsSummary\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected](\"start\")\[email protected](\"data_dir\", type=click.Path(exists=True))\ndef start(data_dir):\n for batch in [\"demo1\", \"demo2\", \"demo3\", \"demo4\"]:\n results = [\n os.path.abspath(os.path.join(data_dir, x))\n for x in os.listdir(data_dir)\n if f\"{batch}.pickle\" in x # and len(x) == 24\n ]\n for file in results:\n summary: ResultsSummary = pickle.load(open(file, mode=\"rb\"))\n # print(f\"{file[-16:-13]}\")\n print(f\"==Results for {file}\\n{summary.export_to_csv_line()}\")\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.5609714984893799, "alphanum_fraction": 0.5651180744171143, "avg_line_length": 36.39556884765625, "blob_id": "3e8232ca893a5f0afc491c70d0f14580b3cf94be", "content_id": "da72380ec31f1ce6c5cbf8c1c50abce298d15095", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11817, "license_type": "permissive", "max_line_length": 109, "num_lines": 316, "path": "/backend/evaluation/execution_item.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport random\nimport re\nfrom dataclasses import dataclass\nfrom typing import List\n\nimport numpy as np\nfrom faker import Factory\n\nfrom backend.integrations.database import get_testfails_for_revision\nfrom backend.selection.problem_data import ProblemData\n\n\ndef print_function_values_to_screen(solutions, data):\n # Adapted from JMetalPy\n if type(solutions) is not list:\n solutions = [solutions]\n\n for solution in solutions:\n print(str(solutions.index(solution)) + \": \", sep=\" \", end=\"\", flush=True)\n print(solution.objectives, sep=\" \", end=\"\", flush=True)\n pos = np.array(solution.variables[0])\n rev_solution = list(data.tests_index[pos == 1])\n print(f\" (sol_size: {len(rev_solution)})\")\n\n\n@dataclass\nclass RevisionResults:\n branch: str\n rev_id: str\n rev_date: str\n changelist: list\n error_no_changed_items: str\n solutions_found: list\n score: tuple # (score %, # matched, # expected, # tests)\n solution_metrics: list\n new_feedback_time: float\n computing_time: float\n orig_rev_history: set\n real_rev_history: set\n innocent: bool\n\n def __init__(\n self, svn_log_entry, branch, ignored_tests, previous_rev, masked=False\n ):\n self.branch = branch\n self.rev_id = svn_log_entry.revision\n self.rev_date = str(svn_log_entry.date)\n self.changelist = svn_log_entry.changelist\n self.masked = masked\n\n self.error_no_changed_items = None\n self.innocent = None\n\n self.set_revision_history(previous_rev, ignored_tests)\n\n if masked:\n self.fake = Factory.create()\n\n self.solutions_found = []\n self.score = (-1, -1, -1, -1)\n self.new_feedback_time = 0\n self.computing_time = 0\n self.solution_metrics = []\n\n def set_revision_history(self, previous: \"RevisionResults\", ignored: List[str]):\n \"\"\"\n Set revision history values (i.e. lists of failing tests names) for this revision.\n\n :param previous: execution results from the previous revision\n :param ignored: list of tests to ignore\n \"\"\"\n # Set original revision history\n rev_results = get_testfails_for_revision(revision=self.rev_id)\n self.orig_rev_history = set(rev_results.FULLNAME.values)\n\n # If no failing tests returned from the database, use failing tests of previous revision\n if len(self.orig_rev_history) == 0:\n if previous is not None:\n self.orig_rev_history = previous.orig_rev_history\n else:\n self.orig_rev_history = set()\n\n # Set real revision history to be used\n # 1. remove ignored tests based on configuration file\n self.real_rev_history = set(\n filter(\n lambda test: all(x not in test for x in ignored), self.orig_rev_history\n )\n )\n\n # 2. keep only failing tests not in previous revision\n if previous is not None:\n self.real_rev_history = set(\n filter(\n lambda x: x not in previous.orig_rev_history, self.real_rev_history\n )\n )\n\n def print_results(self, data: ProblemData, fixed_demo=False):\n \"\"\"\n Print execution results to stdout.\n\n :param data: data associated with this execution\n :param fixed_demo: flag indicating whether this a random selection or not\n \"\"\"\n\n def get_fake_filename(file: str) -> str:\n \"\"\"\n Get a fake filename to mask the given file path.\n\n :param file: file path for which a fake name should be generated\n :return: a generated fake file path\n \"\"\"\n result = re.search(r\"/.*\\.(.*)\", file)\n if result is None:\n # this is a directory -> get a random file path with some random file extension\n return self.fake.file_path(\n depth=random.randint(3, 5),\n extension=random.choice(\n [\"cs\", \"tsx\", \"json\", \"oml\", \"csproj\", \"xml\"]\n ),\n )\n else:\n # this a file -> get a random filename and keep the file extension\n extension = result.group(1)\n filename = self.fake.file_path(\n depth=random.randint(3, 5), extension=\" \"\n )\n filename = \"/\".join([x.capitalize() for x in filename[:-1].split(\"/\")])\n return filename + extension\n\n # Revision Id + Changelist\n if self.masked:\n fake_changelist = [(x[0], get_fake_filename(x[1])) for x in self.changelist]\n changes = \"\\n\\t\".join(map(lambda x: str(x), fake_changelist))\n else:\n changes = \"\\n\\t\".join(map(lambda x: str(x), self.changelist))\n\n revision_id = f\"rev_id: {self.rev_id} ({self.rev_date})\"\n print(f\"{revision_id}\\nchangelist:\\n\\t{changes}\")\n\n # Execution results\n if type(self.error_no_changed_items) == str:\n # If no changed indexes were extracted, then print the error message\n print(f\"Revision {self.rev_id} failed due to {self.error_no_changed_items}\")\n self.print_revision_status()\n else:\n if fixed_demo:\n # For random selections, the solution is stored in self.solutions_found\n self.print_revision_status()\n self.print_solution_score(0, self.solutions_found)\n self.computing_time = 0.1\n print(f\"Solution Size: {len(self.solutions_found)} tests\")\n self.new_feedback_time = sum(\n [\n data.history_test_execution_times[test]\n for test in self.solutions_found\n ]\n )\n print(\n f\"Solution Feedback Loop Time: {self.new_feedback_time:.0f} seconds\"\n )\n else:\n self.print_revision_status()\n self.print_execution_results(data)\n self.print_solution_list(data)\n self.print_execution_inspection(data)\n\n # separator\n print(\"==========================\" * 4)\n\n def print_revision_status(self):\n \"\"\"\n Print status of this revision: pass/fail, number and list of failing tests.\n\n \"\"\"\n if len(self.orig_rev_history) == 0:\n print(f\"Revision {self.rev_id} had no failing tests\")\n else:\n failed_tests = f\"{len(self.orig_rev_history)} failed tests\"\n if self.masked:\n print(f\"Revision {self.rev_id} - {failed_tests}\")\n else:\n joined = \"\\n\\t\".join(self.orig_rev_history)\n print(f\"Revision {self.rev_id} - {failed_tests}:\\n\\t{joined}\")\n\n def print_execution_results(self, data: ProblemData):\n \"\"\"\n Print results of this execution to stdout\n\n - Computing Time\n - Objectives values of each solution\n - Score of each solution\n :param data: data related to this execution\n \"\"\"\n # Computing Time\n print(\"Computing time: \" + str(self.computing_time))\n\n # Objectives values of each solution\n print_function_values_to_screen(self.solutions_found, data)\n\n # Score of each solution\n for i, solution in enumerate(self.solutions_found):\n pos = np.array(solution.variables[0])\n rev_solution = list(data.tests_index[pos == 1])\n self.print_solution_score(i, rev_solution)\n\n def print_execution_inspection(self, data: ProblemData):\n \"\"\"\n Print inspection conclusions over this execution.\n\n Inspection checks if it was possible to select a test given the available data (before/after filters)\n\n :param data: data related to this execution\n \"\"\"\n\n def inspection_checker(tests_data: np.ndarray):\n \"\"\"\n Check if the provided array of tests contains the failing tests for this revision.\n\n The counts of possible/impossible to find tests are printed.\n\n :param tests_data: array of test names\n \"\"\"\n available, impossible = 0, 0\n for test in self.real_rev_history:\n if any(x in test for x in tests_data):\n # print(f\"{test} = Available\")\n available += 1\n else:\n print(f\"\\t{test} = Impossible\")\n impossible += 1\n print(f\"Available={available} || Impossible={impossible}\")\n\n print(f\"Check test availability vs original data - {data.original_tests.shape}\")\n inspection_checker(data.original_tests)\n\n print(f\"Check test availability vs filtered data - {data.tests_index.shape}\")\n inspection_checker(data.tests_index)\n\n def print_solution_list(self, data: ProblemData):\n \"\"\"\n Print solution results for this execution: solution size, feedback time and list of selected tests\n\n :param data: data related to this execution\n \"\"\"\n\n def get_fake_test_name() -> str:\n \"\"\"\n Get a generated fake test name\n :return: a random test name\n \"\"\"\n test_name = self.fake.file_path(depth=random.randint(3, 5), extension=\" \")\n test_name = test_name[1:-2].replace(\"/\", \".\")\n test_name = \"Test.\" + \".\".join(\n [x.capitalize() for x in test_name.split(\".\")]\n )\n return test_name\n\n # Store objectives values of this solution\n solution = self.solutions_found[0]\n self.solution_metrics = solution.objectives\n\n pos = np.array(solution.variables[0])\n rev_solution = list(data.tests_index[pos == 1])\n # Solution Size + Feedback Time\n print(f\"Solution Size: {len(rev_solution)} tests\")\n self.new_feedback_time = sum(\n [data.history_test_execution_times[test] for test in rev_solution]\n )\n print(f\"Solution Feedback Loop Time: {self.new_feedback_time:.0f} seconds\")\n\n # Selected Tests\n if self.masked:\n rev_solution = [get_fake_test_name() for _ in rev_solution]\n solution_tests = \"\\n\\t\".join(rev_solution)\n print(f\"\\t{solution_tests}\")\n\n def print_solution_score(self, i: int, rev_solution: List[str]):\n \"\"\"\n Print score (i.e. number of failing tests found) of this solution.\n\n :param i: number id of this solution\n :param rev_solution: list of tests selected by this solution\n \"\"\"\n\n def get_matching_tests() -> List[str]:\n \"\"\"\n Get list of selected tests matching with the set of failing tests.\n\n :return: a list of test names\n \"\"\"\n return [\n test\n for test in self.real_rev_history\n if any(x in test for x in rev_solution)\n ]\n\n sol_size = len(rev_solution)\n sol_id = f\"Solution {i} ({sol_size})\"\n\n if len(self.real_rev_history) == 0:\n print(f\"{sol_id} = only ignored tests\")\n self.score = (-1, 0, 0, sol_size)\n else:\n matching = get_matching_tests()\n score = (len(matching) / len(self.real_rev_history)) * 100\n match_vs_rev = f\"{len(matching)}/{len(self.real_rev_history)}\"\n if self.masked:\n print(f\"{sol_id} = {match_vs_rev} ({score:.0f}%)\")\n else:\n # Also print matching test names, if not using masked mode\n print(f\"{sol_id} = {match_vs_rev} ({score:.0f}%) -> {matching}\")\n\n self.score = (score, len(matching), len(self.real_rev_history), sol_size)\n" }, { "alpha_fraction": 0.6527358889579773, "alphanum_fraction": 0.6546747088432312, "avg_line_length": 31.921985626220703, "blob_id": "307cfdc56273aeaeeb75b3210dfdf280eafa37a0", "content_id": "a6040445b8092dff11643ddab3229c28fbfa4bf9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9284, "license_type": "permissive", "max_line_length": 109, "num_lines": 282, "path": "/testsel_pipeline.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport json\nimport random\n\nimport click\nimport numpy as np\nfrom jmetal.core.algorithm import Algorithm\n\nimport backend.selection.objectives as metrics\nfrom backend.evaluation.execution_item import RevisionResults\nfrom backend.evaluation.summary import ResultsSummary\nfrom backend.integrations.svn_utils import get_log, get_log_for_revision\nfrom backend.selection.problem_data import ProblemData\nfrom backend.selection.test_selection import TestSelection, my_binary_mopso\n\nnp.random.seed(1234)\nnp.set_printoptions(threshold=np.inf)\n\nOBJECTIVES_MAP = {\n \"ddu\": metrics.calculate_ddu,\n \"n_tests\": metrics.calculate_number_of_tests,\n \"fails\": metrics.calculate_test_fails,\n \"exec_times\": metrics.calculate_exec_times,\n \"norm_coverage\": metrics.calculate_norm_coverage,\n \"coverage\": metrics.calculate_coverage,\n}\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected](\"user\")\[email protected](\n \"--objectives\",\n \"-o\",\n required=True,\n type=click.Choice(list(OBJECTIVES_MAP.keys())),\n multiple=True,\n)\[email protected](\"--masked\", is_flag=True)\[email protected](\"swarm_size\", type=click.INT)\[email protected](\"activity_matrix\", type=click.Path(exists=True, readable=True))\[email protected](\"demo_config\", type=click.Path(exists=True, readable=True))\ndef run_optimization(objectives, masked, activity_matrix, demo_config, swarm_size):\n \"\"\"\n User input-based execution of the pipeline\n \"\"\"\n with open(demo_config, mode=\"r\") as demo_file:\n config = json.load(demo_file)\n # Build problem data\n data = ProblemData(\n activity_matrix,\n config[\"branch\"],\n config[\"fails_start_dt\"],\n config[\"from_dt\"],\n config[\"to_dt\"],\n ignore_tests=config[\"ignore_tests\"],\n )\n\n data.swarm_size = swarm_size\n\n while True:\n revision = input(\"Target Revision Id: \")\n log = [log_e for log_e in get_log_for_revision(config[\"branch_path\"], revision)]\n if not log:\n continue\n log_entry = log[0]\n\n print(f\"Running pipeline demo with the following objectives: {objectives}\")\n metrics = [OBJECTIVES_MAP[key] for key in objectives]\n # Reset problem data to original matrices\n data.reset()\n\n # Run pipeline for revision\n revision_results = RevisionResults(\n log_entry, data.branch, data.ignore_tests, masked\n )\n run_pipeline(data, metrics, revision_results, config[\"ignore_changes\"])\n revision_results.print_results(data)\n\n\[email protected](\"demo\")\[email protected](\n \"--objectives\",\n \"-o\",\n required=True,\n type=click.Choice(list(OBJECTIVES_MAP.keys())),\n multiple=True,\n)\[email protected](\"--masked\", is_flag=True)\[email protected](\"swarm_size\", type=click.INT)\[email protected](\"activity_matrix\", type=click.Path(exists=True, readable=True))\[email protected](\"demo_config\", type=click.Path(exists=True, readable=True))\[email protected](\"output_file\", type=click.Path())\ndef run_optimization_for_demo(\n activity_matrix, demo_config, objectives, masked, swarm_size, output_file\n):\n def run_tool_for_revision(revision, data, previous_rev, ignore_changes):\n print(f\"Running pipeline demo with the following objectives: {objectives}\")\n metrics = [OBJECTIVES_MAP[key] for key in objectives]\n # Reset problem data to original matrices\n data.reset()\n\n # Run pipeline for revision\n revision_results = RevisionResults(\n revision, data.branch, data.ignore_tests, previous_rev, masked\n )\n if len(revision_results.real_rev_history) > 0:\n run_pipeline(data, metrics, revision_results, ignore_changes)\n revision_results.print_results(data)\n\n return revision_results\n\n # Get log based on demo config\n with open(demo_config, mode=\"r\") as demo_file:\n config = json.load(demo_file)\n\n log = get_log(config[\"branch_path\"], config[\"from_dt\"], config[\"to_dt\"])\n\n # Build problem data\n data = ProblemData(\n activity_matrix,\n config[\"branch\"],\n config[\"fails_start_dt\"],\n config[\"from_dt\"],\n config[\"to_dt\"],\n ignore_tests=config[\"ignore_tests\"],\n )\n\n data.swarm_size = swarm_size\n\n # Run tool for each revision\n results = []\n previous = None\n # for log_e in log[:100]:\n for log_e in log:\n if not is_ignored_project(log_e.changelist, config[\"ignore_changes\"]):\n res = run_tool_for_revision(log_e, data, previous, config[\"ignore_changes\"])\n results.append(res)\n previous = res\n\n # Build results summary report\n summary = ResultsSummary(results, data)\n\n # - print summary to terminal\n summary.export_to_text()\n\n # save data to pickle\n with open(output_file, mode=\"wb\") as output:\n summary.export_to_pickle(output)\n\n\[email protected](\"random\")\[email protected](\"--fixed\", is_flag=True, help=\"Use a fixed test sample for evaluation\")\[email protected](\n \"--filtered\",\n is_flag=True,\n help=\"Filter matrix using changelist for evaluation fairness with MOTSD\",\n)\[email protected](\"random_p\", type=click.FLOAT)\[email protected](\"all_tests\", type=click.Path(exists=True, readable=True))\[email protected](\"activity_matrix\", type=click.Path(exists=True, readable=True))\[email protected](\"demo_config\", type=click.Path(exists=True, readable=True))\[email protected](\"output_file\", type=click.Path())\ndef run_random_demo(\n activity_matrix, demo_config, output_file, random_p, all_tests, fixed, filtered\n):\n def run_tool_for_revision(revision, data, previous_rev, ignore_changes, t_sample):\n revision_results = RevisionResults(\n revision, data.branch, data.ignore_tests, previous_rev\n )\n if filtered:\n # Running in filtered mode for evaluation fairness with MOTSD, i.e. filter matrix with changelist\n # Get indexes for methods changed by a commit\n changed_idxs = data.get_changed_indexes_for_changelist(\n revision.changelist, ignore_changes\n )\n\n # Stop pipeline if no changed indexes were extracted\n if type(changed_idxs) == str:\n revision_results.error_no_changed_items = changed_idxs\n return revision_results\n\n if not fixed:\n # Running in not fixed sample mode, i.e. get a new test sample for each commit\n t_sample = random.sample(tests, int(random_p * (len(tests))))\n\n if len(revision_results.real_rev_history) > 0:\n revision_results.solutions_found = t_sample\n revision_results.print_results(data, fixed_demo=True)\n\n return revision_results\n\n # Get log based on demo config\n with open(demo_config, mode=\"r\") as demo_file:\n config = json.load(demo_file)\n\n log = get_log(config[\"branch_path\"], config[\"from_dt\"], config[\"to_dt\"])\n\n # Read all tests file\n with open(all_tests, mode=\"r\") as tests_file:\n tests = [test.strip() for test in tests_file.readlines()]\n\n tests_sample = random.sample(tests, int(random_p * (len(tests))))\n\n # Build problem data\n data = ProblemData(\n activity_matrix,\n config[\"branch\"],\n config[\"fails_start_dt\"],\n config[\"from_dt\"],\n config[\"to_dt\"],\n ignore_tests=config[\"ignore_tests\"],\n )\n\n # Run tool for each revision\n results = []\n previous = None\n # for log_e in log[:100]:\n for log_e in log:\n if not is_ignored_project(log_e.changelist, config[\"ignore_changes\"]):\n res = run_tool_for_revision(\n log_e, data, previous, config[\"ignore_changes\"], tests_sample\n )\n results.append(res)\n previous = res\n\n # Build results summary report\n summary = ResultsSummary(results, data)\n\n # - print summary to terminal\n summary.export_to_text()\n\n # save data to pickle\n with open(output_file, mode=\"wb\") as output:\n summary.export_to_pickle(output)\n\n\ndef run_pipeline(data, objectives, revision: RevisionResults, ignore_changes):\n # Get indexes for methods changed by a commit\n changed_idxs = data.get_changed_indexes_for_changelist(\n revision.changelist, ignore_changes\n )\n\n # Stop pipeline if no changed indexes were extracted\n if type(changed_idxs) == str:\n revision.error_no_changed_items = changed_idxs\n return\n\n # Filter matrix and indexes based on commit\n data.filter_data_for_commit(changed_idxs)\n\n # Run optimizer for the reduced matrix\n problem = TestSelection(data, objectives)\n solution_front = run_optimizer(my_binary_mopso(problem, data.swarm_size), revision)\n revision.solutions_found = solution_front\n\n\ndef run_optimizer(algorithm: Algorithm, revision: RevisionResults):\n # Run optimizer algorithm\n algorithm.run()\n front = algorithm.get_result()\n revision.computing_time = algorithm.total_computing_time\n\n # return sorted(front, key=lambda x: (x.objectives[0]))\n return sorted(front, key=lambda x: (x.objectives[0], x.objectives[1]))\n\n\ndef is_ignored_project(changelist, ignore_changes):\n return all(\n any(\n ignore in change[1]\n for ignore in ignore_changes\n )\n for change in changelist\n )\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.66139817237854, "alphanum_fraction": 0.6713272333145142, "avg_line_length": 35.286766052246094, "blob_id": "21bd6210665478a4dc412e6792da2a34b1ccf8a3", "content_id": "726d48a925ac5d33506be91e99df2b6d92dd3b06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4935, "license_type": "permissive", "max_line_length": 105, "num_lines": 136, "path": "/backend/evaluation/utils.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom typing import List, Tuple\n\nimport numpy as np\n\nfrom backend.evaluation.execution_item import RevisionResults\n\n\ndef get_metric_stats(data: np.ndarray) -> List[int]:\n \"\"\"\n Calculate basic stats and percentiles for the given metric data points.\n\n - Stats: average, min, max, standard deviation\n - Percentiles: 10, 25, 50, 75, 90\n\n :param data: array of metric values\n :return: list of stats and percentiles values\n \"\"\"\n stats = [np.average(data), np.min(data), np.max(data), np.std(data)]\n percentiles = [np.percentile(data, p) for p in [10, 25, 50, 75, 90]]\n return list(map(int, [*stats, *percentiles]))\n\n\ndef get_micro_recall(executions: List[RevisionResults]) -> float:\n \"\"\"\n Calculate micro-averaged recall for a list of tool executions.\n\n :param executions: list of RevisionResults objects\n :return: micro-recall value\n \"\"\"\n micro_recall_n = [res.score[1] for res in executions if res.score[2] > 0]\n micro_recall_d = [res.score[2] for res in executions if res.score[2] > 0]\n return sum(micro_recall_n) / sum(micro_recall_d)\n\n\ndef get_macro_recall(executions: List[RevisionResults]) -> float:\n \"\"\"\n Calculate macro-averaged recall for a list of tool executions.\n\n :param executions: list of RevisionResults objects\n :return: macro-recall value\n \"\"\"\n red_tests_recall = [\n res.score[1] / res.score[2] for res in executions if res.score[2] > 0\n ]\n return sum(red_tests_recall) / len(red_tests_recall)\n\n\ndef get_micro_precision(executions: List[RevisionResults]) -> float:\n \"\"\"\n Calculate micro-averaged precision for a list of tool executions.\n\n :param executions: list of RevisionResults objects\n :return: micro-precision value\n \"\"\"\n micro_precision_n = [res.score[1] for res in executions if res.score[2] > 0]\n micro_precision_d = [res.score[3] for res in executions if res.score[2] > 0]\n return sum(micro_precision_n) / sum(micro_precision_d)\n\n\ndef get_macro_precision(executions: List[RevisionResults]) -> float:\n \"\"\"\n Calculate macro-averaged precision for a list of tool executions.\n\n :param executions: list of RevisionResults objects\n :return: macro-precision value\n \"\"\"\n red_tests_precision = [\n res.score[1] / res.score[3]\n for res in executions\n if res.score[2] > 0 and res.score[3] > 0\n ]\n return sum(red_tests_precision) / len(red_tests_precision)\n\n\ndef get_error_stats(\n pattern: str, executions: List[RevisionResults]\n) -> Tuple[List, List]:\n \"\"\"\n Get lists of execution results with a given error message for all commits and for only red commits.\n\n :param pattern: error message pattern to search for\n :param executions: list of execution results\n :return: two lists of execution results: one for all commits, another only for red commits\n \"\"\"\n error_cases = [res for res in executions if pattern in res.error_no_changed_items]\n red_error_cases = [res for res in error_cases if len(res.real_rev_history) > 0]\n return error_cases, red_error_cases\n\n\ndef get_tool_executions(executions: List[RevisionResults]) -> List[RevisionResults]:\n \"\"\"\n Get only the tool executions from a list of execution results.\n\n :param executions: list of execution results\n :return: list of tool executions\n \"\"\"\n return [res for res in executions if type(res.error_no_changed_items) != str]\n\n\ndef get_tool_no_executions(executions: List[RevisionResults]) -> List[RevisionResults]:\n \"\"\"\n Get only the failed tool executions from a list of execution results.\n\n :param executions: list of execution results\n :return: list of failed tool executions\n \"\"\"\n return [res for res in executions if type(res.error_no_changed_items) == str]\n\n\ndef get_total_innocent_reds(executions: List[RevisionResults]) -> int:\n \"\"\"\n Get number of innocent red commits from a given list of execution results.\n\n :param executions: list of execution results\n :return: number of innocent red commits\n \"\"\"\n count = 0\n previous_fails = set()\n for res in executions:\n copy_res = res.real_rev_history.copy()\n # If it is a tool execution over a red commit\n if type(res.error_no_changed_items) != str and len(res.real_rev_history) > 0:\n # If previous revision test fails is a superset, then the current commit is innocent\n if previous_fails.issuperset(res.real_rev_history):\n count += 1\n res.innocent = True\n\n # Filter tests present in previous revision test fails\n # res.real_rev_history = set(filter(lambda x: x not in previous_fails, res.real_rev_history))\n # # If the new set of test fails is empty, then the current commit is innocent\n # if len(res.real_rev_history) == 0:\n # count += 1\n # res.innocent = True\n previous_fails = copy_res\n return count\n" }, { "alpha_fraction": 0.6268099546432495, "alphanum_fraction": 0.6286013126373291, "avg_line_length": 34.63298034667969, "blob_id": "bc36e549746c2ad99b3b8650ba8dea85faa483cb", "content_id": "1120065bbce0cbfa2064bd72c3fa37b8497d5a61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6699, "license_type": "permissive", "max_line_length": 83, "num_lines": 188, "path": "/backend/opencover/parser.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport xml.etree.ElementTree as ET\nimport json\nimport itertools\nimport re\nimport numpy as np\n\nimport backend.opencover.utils as utils\n\n\ndef get_modules_from_report(report):\n \"\"\"\n Extracts maps for tests and code modules names to its XML elements.\n\n :param report: path to XML coverage report\n :return: 2 maps (tests and code) mapping a module name to its XML element\n \"\"\"\n _, root_modules = ET.parse(report).getroot()\n test_modules, code_modules = {}, {}\n\n for child in root_modules:\n name = utils.get_module_name(child)\n code_modules[name] = child\n if \"Tests\" in name:\n test_modules[name] = child\n\n return test_modules, code_modules\n\n\ndef get_files_map_from_report(report, branch):\n \"\"\"\n Get map between file uids and their file path name.\n\n :param report: path to the XML coverage report\n :param branch: branch name to locate the start of the file path\n :return: map between file uids and file path names\n \"\"\"\n test_modules, code_modules = get_modules_from_report(report)\n files_map = {}\n\n for module in code_modules.values():\n for file in utils.get_module_files(module):\n uid, path = file.attrib[\"uid\"], file.attrib[\"fullPath\"]\n re_search = re.search(branch + r\"\\\\(.*)\\.cs\", path)\n if re_search:\n name = re_search.group(1).replace(\"\\\\\", \".\")\n files_map[uid] = name\n\n return files_map\n\n\ndef build_tests_map(test_modules_map):\n tests_uids_map = {}\n\n for module in test_modules_map.values():\n for method in utils.get_module_tracked_methods(module):\n uid, name = method.attrib[\"uid\"], method.attrib[\"name\"]\n tests_uids_map[uid] = name\n\n return tests_uids_map\n\n\ndef build_methods_map(code_modules):\n methods_uids_map = {}\n counter = itertools.count(1)\n\n for module in code_modules.values():\n for clazz in utils.get_module_classes(module):\n for method in utils.get_class_methods(clazz):\n method_name = utils.get_method_name(method)\n methods_uids_map[\"m\" + str(next(counter))] = method_name\n\n return methods_uids_map\n\n\ndef build_id_activity_matrix(code_modules, methods_uids_map, files_map):\n # id-activity matrix\n # key - method id\n # value - test id\n def get_method_id(method_name):\n for (key, value) in methods_uids_map.items():\n if value == method_name:\n return key\n\n activity_matrix = dict.fromkeys(methods_uids_map.keys(), [])\n\n for module in code_modules.values():\n for clazz in utils.get_module_classes(module):\n for method in utils.get_class_methods(clazz):\n method_name, tests = utils.get_method_coverage(method)\n if method_name is not None:\n method_id = get_method_id(method_name)\n activity_matrix[method_id] = tests\n # Update methods map with namespace fix\n fix_methods_map_namespace(\n files_map, method, method_id, method_name, methods_uids_map\n )\n\n return activity_matrix\n\n\ndef fix_methods_map_namespace(\n files_map, method, method_id, method_name, methods_uids_map\n):\n \"\"\"\n Replace method namespace with containing file path.\n\n :param files_map: map of uids to file paths\n :param method: method XML element\n :param method_id: method uid\n :param method_name: method name\n :param methods_uids_map: map of uids to method names\n \"\"\"\n file_ref = utils.get_method_file_ref(method)\n if file_ref is not None:\n file_ref = file_ref.attrib[\"uid\"]\n if files_map.get(file_ref) is not None:\n return_type, name = re.search(r\"(.* ).*(::.*)\", method_name).groups()\n new_namespace = files_map[file_ref]\n new_method_name = \"\".join([return_type, new_namespace, name])\n methods_uids_map[method_id] = new_method_name\n\n\ndef build_binary_activity_matrix(id_act_matrix, method_uid_map, test_uid_map):\n binary_activity_matrix = []\n tests_index = list(test_uid_map.keys())\n methods_index = list(method_uid_map.keys())\n\n # Fill with empty cells\n for _ in range(len(test_uid_map.keys())):\n row = [0 for _ in range(len(method_uid_map.keys()))]\n binary_activity_matrix.append(row)\n\n # Fill with activity results\n for method, tests in id_act_matrix.items():\n if method is not None and tests is not None:\n method_pos = methods_index.index(method)\n for test in tests:\n try:\n test_pos = tests_index.index(test)\n binary_activity_matrix[test_pos][method_pos] = 1\n except ValueError:\n pass\n\n return binary_activity_matrix\n\n\ndef filter_activity_matrix(activity_matrix, method_uid_map, test_uid_map):\n # Load data before filters\n array_act_matrix = np.array(activity_matrix, dtype=bool)\n tests_index = np.array(list(test_uid_map.keys()))\n methods_index = np.array(list(method_uid_map.keys()))\n print(f\"-- Before filters: {array_act_matrix.shape}\")\n\n # Filter methods without activity\n active_methods = ~np.all(array_act_matrix == 0, axis=0)\n array_act_matrix = array_act_matrix[:, active_methods]\n methods_index = methods_index[active_methods]\n filtered_method_uid_map = {k: method_uid_map[k] for k in methods_index}\n print(f\"-- After methods filter: {array_act_matrix.shape}\")\n\n # Filter tests without activity\n active_tests = ~np.all(array_act_matrix == 0, axis=1)\n tests_index = tests_index[active_tests]\n filtered_test_uid_map = {k: test_uid_map[k] for k in tests_index}\n array_act_matrix = array_act_matrix[active_tests]\n print(f\"-- After tests filter: {array_act_matrix.shape}\")\n\n return array_act_matrix, filtered_method_uid_map, filtered_test_uid_map\n\n\ndef export_data_to_json(output_name, activity_matrix, methods_map, tests_map):\n \"\"\"\n Exports processed data to json files\n\n :param output_name: name identifier for the JSON output files\n :param tests_map: map of ids to tests\n :param methods_map: map of ids to methods\n :param activity_matrix: binary activity matrix (test x method)\n \"\"\"\n with open(f\"data/jsons/testids_{output_name}.json\", \"w\") as outfile:\n json.dump(tests_map, outfile, indent=4)\n\n with open(f\"data/jsons/methodids_{output_name}.json\", \"w\") as outfile:\n json.dump(methods_map, outfile, indent=4)\n\n with open(f\"data/jsons/actmatrix_{output_name}.json\", \"w\") as outfile:\n json.dump(activity_matrix.astype(\"int\").tolist(), outfile)\n" }, { "alpha_fraction": 0.700214147567749, "alphanum_fraction": 0.7028908133506775, "avg_line_length": 34.24528121948242, "blob_id": "5e0e4fa71d02a9556166acb2a96e1411eb1f614f", "content_id": "4782d0b8777c49f59d1e5d90c443a40be42bb47e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1868, "license_type": "permissive", "max_line_length": 78, "num_lines": 53, "path": "/backend/integrations/database.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport pandas as pd\nimport pyodbc\nfrom pathlib import Path\nfrom joblib import Memory\n\ndatabase_home = \"data\\\\database\\\\\"\nmemory = Memory(Path(f\"{database_home}\"), verbose=0)\nDB_CONFIG = Path(f\"{database_home}database.config\").read_text()\n\n\[email protected]\ndef get_test_name_fails(start_date: str, max_date: str) -> pd.DataFrame:\n \"\"\"\n Query the database for the number of test fails on a given date interval.\n\n :param start_date: start date\n :param max_date: maximum date\n :return: 2-columns dataframe with the tests names and number of fails\n \"\"\"\n print(\"Querying database for test name fails\")\n query = Path(f\"{database_home}test_name_fails.sql\").read_text()\n connection = pyodbc.connect(DB_CONFIG)\n return pd.read_sql_query(query, connection, params=[start_date, max_date])\n\n\[email protected]\ndef get_testfails_for_revision(revision: str) -> pd.DataFrame:\n \"\"\"\n Query the database for the tests that failed on a given revision.\n\n :param revision: revision id\n :return: 1-column dataframe with the test names\n \"\"\"\n print(f\"Querying db for test fails for rev {revision}\")\n query = Path(f\"{database_home}test_fails_rev.sql\").read_text()\n connection = pyodbc.connect(DB_CONFIG)\n return pd.read_sql_query(query, connection, params=[revision])\n\n\[email protected]\ndef get_test_execution_times(from_dt: str, to_dt: str) -> pd.DataFrame:\n \"\"\"\n Query the database for the test execution times on a given date interval.\n\n :param from_dt: start date\n :param to_dt: end date\n :return: 2-columns dataframe with the tests names and test execution times\n \"\"\"\n print(f\"Querying db for test execution times\")\n query = Path(f\"{database_home}test_execution_times.sql\").read_text()\n connection = pyodbc.connect(DB_CONFIG)\n return pd.read_sql_query(query, connection, params=[from_dt, to_dt])\n" }, { "alpha_fraction": 0.6016398072242737, "alphanum_fraction": 0.6121006608009338, "avg_line_length": 35.65285110473633, "blob_id": "51354886282bfbd02e96a50393c2a23f9cd9c6bd", "content_id": "a00b11abdd5e1a03ff34c637d648d59501abc916", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7074, "license_type": "permissive", "max_line_length": 87, "num_lines": 193, "path": "/backend/selection/binary_mopso.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport random\nfrom copy import copy\nfrom typing import List, Optional\n\nimport numpy\nfrom jmetal.config import store\nfrom jmetal.core.algorithm import ParticleSwarmOptimization\nfrom jmetal.core.problem import BinaryProblem\nfrom jmetal.core.solution import BinarySolution\nfrom jmetal.operator.mutation import BitFlipMutation\nfrom jmetal.util.archive import BoundedArchive, NonDominatedSolutionListArchive\nfrom jmetal.util.comparator import DominanceComparator, EpsilonDominanceComparator\nfrom jmetal.util.solution_list import Evaluator, Generator\nfrom jmetal.util.termination_criterion import TerminationCriterion\n\n\nclass BMOPSO(ParticleSwarmOptimization):\n def __init__(\n self,\n problem: BinaryProblem,\n swarm_size: int,\n mutation: BitFlipMutation,\n leaders: Optional[BoundedArchive],\n epsilon: float,\n termination_criterion: TerminationCriterion,\n swarm_generator: Generator = store.default_generator,\n swarm_evaluator: Evaluator = store.default_evaluator,\n ):\n\n super(BMOPSO, self).__init__(problem=problem, swarm_size=swarm_size)\n self.swarm_generator = swarm_generator\n self.swarm_evaluator = swarm_evaluator\n\n self.termination_criterion = termination_criterion\n self.observable.register(termination_criterion)\n\n self.mutation_operator = mutation\n\n self.leaders = leaders\n\n self.epsilon = epsilon\n self.epsilon_archive = NonDominatedSolutionListArchive(\n # EpsilonDominanceComparator(epsilon)\n DominanceComparator()\n )\n\n self.c1_min = 1.5\n self.c1_max = 2.0\n self.c2_min = 1.5\n self.c2_max = 2.0\n self.r1_min = 0.0\n self.r1_max = 1.0\n self.r2_min = 0.0\n self.r2_max = 1.0\n self.weight_min = 0.1\n self.weight_max = 0.5\n self.change_velocity1 = -1\n self.change_velocity2 = -1\n\n self.dominance_comparator = DominanceComparator()\n\n self.speed = numpy.zeros(\n (\n self.swarm_size,\n self.problem.number_of_variables,\n self.problem.number_of_tests,\n ),\n dtype=float,\n )\n\n def create_initial_solutions(self) -> List[BinarySolution]:\n return [self.swarm_generator.new(self.problem) for _ in range(self.swarm_size)]\n\n def evaluate(self, solution_list: List[BinarySolution]):\n return self.swarm_evaluator.evaluate(solution_list, self.problem)\n\n def stopping_condition_is_met(self) -> bool:\n return self.termination_criterion.is_met\n\n def initialize_global_best(self, swarm: List[BinarySolution]) -> None:\n for particle in swarm:\n if self.leaders.add(particle):\n self.epsilon_archive.add(copy(particle))\n\n def initialize_particle_best(self, swarm: List[BinarySolution]) -> None:\n for particle in swarm:\n particle.attributes[\"local_best\"] = copy(particle)\n\n def initialize_velocity(self, swarm: List[BinarySolution]) -> None:\n for i in range(self.swarm_size):\n for j in range(self.problem.number_of_variables):\n self.speed[i][j] = 0.0\n\n def update_velocity(self, swarm: List[BinarySolution]) -> None:\n for i in range(self.swarm_size):\n best_particle = copy(swarm[i].attributes[\"local_best\"])\n best_global = self.select_global_best()\n\n r1 = round(random.uniform(self.r1_min, self.r1_max), 1)\n r2 = round(random.uniform(self.r2_min, self.r2_max), 1)\n c1 = round(random.uniform(self.c1_min, self.c1_max), 1)\n c2 = round(random.uniform(self.c2_min, self.c2_max), 1)\n w = round(random.uniform(self.weight_min, self.weight_max), 1)\n\n for var in range(swarm[i].number_of_variables):\n best_particle_diff = numpy.subtract(\n numpy.array(best_particle.variables[var]),\n numpy.array(swarm[i].variables[var]),\n dtype=numpy.float32,\n )\n best_global_diff = numpy.subtract(\n numpy.array(best_global.variables[var]),\n numpy.array(swarm[i].variables[var]),\n dtype=numpy.float32,\n )\n\n self.speed[i][var] = (\n w * numpy.array(self.speed[i][var])\n + (c1 * r1 * best_particle_diff)\n + (c2 * r2 * best_global_diff)\n )\n\n def update_position(self, swarm: List[BinarySolution]) -> None:\n for i in range(self.swarm_size):\n particle = swarm[i]\n\n for j in range(particle.number_of_variables):\n particle.variables[j] = self.compute_position(self.speed[i][j])\n\n def compute_position(self, speed):\n updated_positions = (\n numpy.random.random_sample(speed.shape) < self._sigmoid(speed)\n ) * 1\n return list(numpy.array(updated_positions, dtype=bool))\n\n def _sigmoid(self, x):\n return 1 / (1 + numpy.exp(-x))\n\n def update_global_best(self, swarm: List[BinarySolution]) -> None:\n for particle in swarm:\n if self.leaders.add(copy(particle)):\n self.epsilon_archive.add(copy(particle))\n\n def update_particle_best(self, swarm: List[BinarySolution]) -> None:\n for i in range(self.swarm_size):\n flag = self.dominance_comparator.compare(\n swarm[i], swarm[i].attributes[\"local_best\"]\n )\n if flag != 1:\n swarm[i].attributes[\"local_best\"] = copy(swarm[i])\n\n def perturbation(self, swarm: List[BinarySolution]) -> None:\n for i in range(self.swarm_size):\n if (i % 6) == 0:\n self.mutation_operator.execute(swarm[i])\n\n def select_global_best(self) -> BinarySolution:\n leaders = self.leaders.solution_list\n\n if len(leaders) > 2:\n particles = random.sample(leaders, 2)\n\n if self.leaders.comparator.compare(particles[0], particles[1]) < 1:\n best_global = copy(particles[0])\n else:\n best_global = copy(particles[1])\n else:\n best_global = copy(self.leaders.solution_list[0])\n\n return best_global\n\n def init_progress(self) -> None:\n self.evaluations = self.swarm_size\n self.leaders.compute_density_estimator()\n\n self.initialize_velocity(self.solutions)\n self.initialize_particle_best(self.solutions)\n self.initialize_global_best(self.solutions)\n\n def update_progress(self) -> None:\n self.evaluations += self.swarm_size\n self.leaders.compute_density_estimator()\n\n observable_data = self.get_observable_data()\n observable_data[\"SOLUTIONS\"] = self.epsilon_archive.solution_list\n self.observable.notify_all(**observable_data)\n\n def get_result(self) -> List[BinarySolution]:\n return self.epsilon_archive.solution_list\n\n def get_name(self) -> str:\n return \"my-BMOPSO\"\n" }, { "alpha_fraction": 0.6931273341178894, "alphanum_fraction": 0.6949920058250427, "avg_line_length": 34.75238037109375, "blob_id": "04627f5f699be9757aca4b6df44ae5e9df58f4ef", "content_id": "0451f9b174f0b91af39bba7c995881e0e3205436", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3754, "license_type": "permissive", "max_line_length": 94, "num_lines": 105, "path": "/parse_xml.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport itertools\nimport os\nfrom functools import partial\nfrom multiprocessing import Pool\n\nimport click\nimport numpy as np\n\nfrom backend.opencover import parser\n\n\[email protected](\"multiple\")\[email protected](\"reports_path\")\[email protected](\"output_name\")\[email protected](\"branch_name\")\ndef process_multiple_xml_reports(reports_path, output_name, branch_name):\n \"\"\"\n Parse OpenCover's XML coverage reports into an activity matrix and tests/methods name maps\n Assumes that the reports_path is a directory containing multiple coverage reports.\n\n :param reports_path: path to directory containing the coverage reports\n :param output_name: name of the output files generated for the activity matrix and maps\n :param branch_name: name of the branch used for matching with files in the repository\n \"\"\"\n # Get coverage reports files from the directory\n report_files = list(\n map(\n lambda report: os.path.abspath(os.path.join(reports_path, report)),\n os.listdir(reports_path),\n )\n )\n\n # Collect id-activity matrices for each report\n with Pool(processes=2) as pool:\n result = pool.map(\n partial(get_id_activity_matrix, branch=branch_name), report_files\n )\n\n array_result = np.array(result)\n id_act_matrices = array_result[:, 0]\n methods_map, tests_map = array_result[0, 1], array_result[0, 2]\n\n # Merge id-activity matrices\n x = {\n k: [d.get(k, []) for d in id_act_matrices]\n for k in {k for d in id_act_matrices for k in d}\n }\n merged_id_act_matrices = {k: list(itertools.chain(*x[k])) for k in x}\n\n # Export merged results\n export_activity_matrix(output_name, methods_map, tests_map, merged_id_act_matrices)\n\n\ndef export_activity_matrix(output_name, methods_map, tests_map, activity_matrix):\n \"\"\"\n Build+export activity matrix and tests/methods map to JSON files.\n\n :param output_name: name identifier for the JSON output files\n :param activity_matrix: id-activity matrix\n :param methods_map: methods map\n :param tests_map: tests map\n \"\"\"\n # Convert id-activity matrix to binary activity matrix\n print(f\"Converting to the binary activity matrix\")\n binary_act_matrix = parser.build_binary_activity_matrix(\n activity_matrix, methods_map, tests_map\n )\n # Filter activity matrix to reduce json file output size\n print(f\"Filtering methods/tests with no activty from the matrix\")\n filter_act_matrix, methods_map, tests_map = parser.filter_activity_matrix(\n binary_act_matrix, methods_map, tests_map\n )\n # Export results to json\n print(f\"Exporting processed data to json files\")\n parser.export_data_to_json(output_name, filter_act_matrix, methods_map, tests_map)\n\n print(\"Report processing done\")\n\n\ndef get_id_activity_matrix(xml_report, branch):\n # Get files map\n print(f\"Getting file map to handle c# namespace issues\")\n files_map = parser.get_files_map_from_report(xml_report, branch)\n\n # Split xml report based on module type (test vs code)\n print(f\"Loading xml report {xml_report}\")\n test_modules, code_modules = parser.get_modules_from_report(xml_report)\n\n # Fill uid maps with tests names and methods names\n print(f\"Mapping tests and methods uids\")\n tests_map = parser.build_tests_map(test_modules)\n methods_map = parser.build_methods_map(code_modules)\n\n # Build activity matrix based on ids\n print(f\"Building the id-activity matrix\")\n id_act_matrix = parser.build_id_activity_matrix(\n code_modules, methods_map, files_map\n )\n print(f\" {xml_report} -- {len(id_act_matrix)}\")\n return id_act_matrix, methods_map, tests_map\n\n\nif __name__ == \"__main__\":\n process_multiple_xml_reports()\n" }, { "alpha_fraction": 0.6512523889541626, "alphanum_fraction": 0.6551059484481812, "avg_line_length": 17.535715103149414, "blob_id": "47fb08d60f6b5bb444366c632613aaa15655b30f", "content_id": "ccf4d0fb633ca3daa77dc61a601b7b9545a70971", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "permissive", "max_line_length": 74, "num_lines": 28, "path": "/run_combos.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport os\nfrom multiprocessing import Pool\nimport click\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected](\"start\")\[email protected](\"input_list\", type=click.Path(exists=True, readable=True))\ndef start(input_list):\n with open(input_list, mode=\"r\") as infile:\n combos = infile.readlines()\n\n with Pool(processes=3) as pool:\n pool.map(run_command, combos)\n\n\ndef run_command(command):\n print(f\"Running command: {command}\")\n os.system(command)\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.5916733145713806, "alphanum_fraction": 0.610088050365448, "avg_line_length": 28.046510696411133, "blob_id": "e0aece6b7fc24e35cc27a4e31e981d2197bf9acc", "content_id": "43064bd0d5a3d49f8884745a471bf7078c46a416", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2498, "license_type": "permissive", "max_line_length": 85, "num_lines": 86, "path": "/join_metrics.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport os\nimport pickle\nimport re\nfrom itertools import permutations\nfrom itertools import product\n\nimport click\n\nfrom backend.evaluation.summary import ResultsSummary\nfrom generate_tests import COVERAGE_MAP\nfrom generate_tests import HISTORY_MAP\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected](\"per_size\")\[email protected](\"data_dir\", type=click.Path(exists=True))\ndef start(data_dir):\n sizes = [5, 10, 25, 50, 100, 200, 400]\n for size in [str(x) for x in sizes]:\n print_merged_results(size, data_dir)\n\n\[email protected](\"per_2combos\")\[email protected](\"data_dir\", type=click.Path(exists=True))\ndef start(data_dir):\n all_combos = []\n for (cov, hist) in product(COVERAGE_MAP.items(), HISTORY_MAP.items()):\n combos = [f\"{m1}{m2}\" for ((_, m1), (_, m2)) in permutations([cov, hist], 2)]\n all_combos.extend(combos)\n all_combos.sort()\n\n for name in all_combos:\n print_merged_results(name, data_dir)\n\n\[email protected](\"per_3combos\")\[email protected](\"data_dir\", type=click.Path(exists=True))\ndef start(data_dir):\n all_combos = []\n for (cov, hist1, hist2) in product(\n COVERAGE_MAP.items(), HISTORY_MAP.items(), HISTORY_MAP.items()\n ):\n if hist1 == hist2:\n continue\n combos = permutations([cov, hist1, hist2], 3)\n for ((_, m1_name), (_, m2_name), (_, m3_name)) in combos:\n metrics_name = f\"{m1_name}{m2_name}{m3_name}\"\n if metrics_name in all_combos:\n continue\n all_combos.append(metrics_name)\n all_combos.sort()\n\n for name in all_combos:\n print_merged_results(name, data_dir)\n\n\ndef print_merged_results(key, data_dir):\n key_results = []\n for batch in [\"demo1\", \"demo2\", \"demo3\", \"demo4\"]:\n pattern = re.compile(r\"_\" + key + r\"_\" + batch + r\".pickle\")\n results = [\n os.path.abspath(os.path.join(data_dir, x))\n for x in os.listdir(data_dir)\n if re.search(pattern, x) is not None\n ]\n aggregated: ResultsSummary = pickle.load(open(results[0], mode=\"rb\"))\n for file in results[1:]:\n aggregated.merge_same(pickle.load(open(file, mode=\"rb\")))\n\n key_results.append(aggregated)\n\n while len(key_results) > 1:\n key_results[0].merge_diff(key_results.pop())\n key_final = key_results.pop()\n key_final.normalize_diff(4)\n # print(f\"{key}\")\n print(f\"{key_final.export_to_csv_line(prefix=key.upper())}\")\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.5975907444953918, "alphanum_fraction": 0.6087709665298462, "avg_line_length": 38.08000183105469, "blob_id": "c1fe756b70d56d50eb3372056d242a28f8ec0315", "content_id": "e412fecb611fbcb16b6d57c06873f108caa7cbe8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12701, "license_type": "permissive", "max_line_length": 112, "num_lines": 325, "path": "/backend/evaluation/summary.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport gc\nimport pickle\nfrom collections import Counter\nfrom dataclasses import dataclass\nfrom typing import List, BinaryIO\n\nimport numpy as np\n\nfrom backend.evaluation import utils\nfrom backend.evaluation.execution_item import RevisionResults\nfrom backend.selection.problem_data import ProblemData\n\nSTATS_KEYS = [\"avg\", \"min\", \"max\", \"std\", \"P10\", \"P25\", \"P50\", \"P75\", \"P90\"]\n\n\n@dataclass\nclass ResultsSummary:\n data: List[RevisionResults]\n commits: dict\n executions: dict\n errors: dict\n red_stats: dict\n solution_size: dict\n computing_time: dict\n orig_feedback_time: float\n new_feedback_time: dict\n\n def __init__(self, results: List[RevisionResults], data: ProblemData):\n \"\"\"\n Populate results summary with evaluation metrics values.\n\n :param results: list of execution results\n :param data: full dataset related to this set of results\n \"\"\"\n tool_executions = utils.get_tool_executions(results)\n tool_no_exec = utils.get_tool_no_executions(results)\n total_innocent_reds = utils.get_total_innocent_reds(results)\n\n # Commits\n red_commits = [res for res in results if len(res.real_rev_history) > 0]\n self.commits = {\n \"total\": len(results),\n \"red\": len(red_commits),\n \"red_p\": len(red_commits) / len(results),\n }\n\n # Executions\n red_executions = [\n res for res in tool_executions if len(res.real_rev_history) > 0\n ]\n self.executions = {\n \"total\": len(tool_executions),\n \"total_p\": len(tool_executions) / len(results),\n \"red\": len(red_executions),\n \"red_p\": len(red_executions) / len(tool_executions),\n }\n\n # Errors\n error_cases = {\n \"No .cs Files\": \"no covered .cs files\",\n \"No Coverage Data\": \"no coverage data\",\n \"New Files\": \"new files or modified\",\n }\n self.errors = {}\n for error, pattern in error_cases.items():\n total, red = utils.get_error_stats(pattern, tool_no_exec)\n self.errors[error] = {\"total\": len(total), \"red\": len(red)}\n\n # Red Stats: \"yes, at least one\", Precision, Recall\n self.set_red_stats(red_executions, total_innocent_reds)\n\n # Solution Size, Computing Time\n self.set_solution_size(tool_executions)\n self.set_computing_time(tool_executions)\n\n # Feedback Time (original, new)\n self.orig_feedback_time = sum(data.history_test_execution_times.values())\n self.set_feedback_time(tool_executions)\n\n # Store data\n self.data = results\n for res in self.data:\n res.solutions_found = []\n\n def set_red_stats(self, red_execs: List[RevisionResults], total_innocent_reds: int):\n \"\"\"\n Populate map of values related to red executions, namely Precision and Recall values.\n\n :param red_execs: list of execution results for red commits\n :param total_innocent_reds: total number of innocent red commits\n \"\"\"\n not_found_red_tests = [res for res in red_execs if res.score[0] == 0]\n red_ignored_tests = [res for res in red_execs if res.score[0] == -1]\n found_red_tests_at_least_one = [res for res in red_execs if res.score[0] > 0]\n self.red_stats = {\n \"Innocent Reds\": total_innocent_reds,\n \"Only Ignored Tests\": len(red_ignored_tests),\n \"Valid Reds\": len(red_execs),\n \"No\": len(not_found_red_tests) / len(red_execs),\n \"At Least One\": len(found_red_tests_at_least_one) / len(red_execs),\n \"Macro-Precision\": utils.get_macro_precision(red_execs),\n \"Micro-Precision\": utils.get_micro_precision(red_execs),\n \"Macro-Recall\": utils.get_macro_recall(red_execs),\n \"Micro-Recall\": utils.get_micro_recall(red_execs),\n }\n\n def set_solution_size(self, executions: List[RevisionResults]):\n \"\"\"\n Populate solution size map with stats and percentiles values\n\n - Stats: average, min, max, standard deviation\n - Percentiles: 10, 25, 50, 75, 90\n :param executions: list of execution results\n \"\"\"\n sizes = np.array([res.score[3] for res in executions])\n self.solution_size = dict(zip(STATS_KEYS, utils.get_metric_stats(sizes)))\n\n def set_computing_time(self, executions: List[RevisionResults]):\n \"\"\"\n Populate computing time map with stats and percentiles values\n\n - Stats: average, min, max, standard deviation\n - Percentiles: 10, 25, 50, 75, 90\n :param executions: list of execution results\n \"\"\"\n times = np.array(\n [res.computing_time for res in executions if res.computing_time > 0]\n )\n self.computing_time = dict(zip(STATS_KEYS, utils.get_metric_stats(times)))\n\n def set_feedback_time(self, executions: List[RevisionResults]):\n \"\"\"\n Populate feedback time map with stats and percentiles values\n\n - Stats: average, min, max, standard deviation\n - Percentiles: 10, 25, 50, 75, 90\n :param executions: list of execution results\n \"\"\"\n feedback_times = np.array(\n [res.new_feedback_time for res in executions if res.new_feedback_time > 0]\n )\n self.new_feedback_time = dict(\n zip(STATS_KEYS, utils.get_metric_stats(feedback_times))\n )\n\n def recompute_innocent(self):\n \"\"\"\n Recompute all evaluation metrics in this summary using the innocent commit filter\n\n \"\"\"\n results = self.data\n tool_executions = utils.get_tool_executions(results)\n total_innocent_reds = utils.get_total_innocent_reds(results)\n red_executions = [\n res for res in tool_executions if len(res.real_rev_history) > 0\n ]\n not_innocent_red_executions = [\n res for res in red_executions if res.innocent is not True\n ]\n self.set_red_stats(not_innocent_red_executions, total_innocent_reds)\n\n self.set_solution_size(tool_executions)\n self.set_computing_time(tool_executions)\n self.set_feedback_time(tool_executions)\n\n def export_to_text(self):\n \"\"\"\n Export the summary in text format to stdout\n\n \"\"\"\n commits = list(self.commits.values())\n print(f\"# Commits - {commits[0]} (red: {commits[1]} -> {commits[2]*100:.0f}%)\")\n\n execs = list(self.executions.values())\n print(\n f\"Tool Executions: {execs[0]} -> {execs[1]*100:.0f}% \"\n f\" (red: {execs[2]} - {execs[3]*100:.0f}%)\"\n )\n\n for error, [total, red] in self.errors.items():\n print(\n f\"# {error}: {self.errors[error][total]} (red: {self.errors[error][red]})\"\n )\n\n print(\"Tool Found Red Test(s) ?\")\n red_stats = list(self.red_stats.values())\n print(f\"Innocent Reds: {red_stats[0]}\")\n print(f\"Only Ignored Tests: {red_stats[1]}\")\n print(f\"Score Stats (for actual reds)\")\n print(f\"Valid Reds: {red_stats[2]}\")\n print(f\"No: {red_stats[3] * 100:.0f}%\")\n print(f\"Yes, At least one: {red_stats[4] * 100:.0f}%\")\n print(f\"Macro-Precision: {red_stats[5] * 100:.0f}%\")\n print(f\"Micro-Precision: {red_stats[6] * 100:.0f}%\")\n print(f\"Macro-Recall: {red_stats[7] * 100:.0f}%\")\n print(f\"Micro-Recall: {red_stats[8] * 100:.0f}%\")\n\n solution_size = list(self.solution_size.values())\n self.print_metric_stats(\"Solution Size\", solution_size)\n\n computing_time = list(self.computing_time.values())\n self.print_metric_stats(\"Computing Time\", computing_time)\n\n print(f\"Original Feedback Time: {self.orig_feedback_time:.0f}\")\n feedback_time = list(self.new_feedback_time.values())\n self.print_metric_stats(\"New Feedback Time\", feedback_time)\n\n def export_to_pickle(self, file: BinaryIO):\n \"\"\"\n Exports the summary to a pickle file.\n\n :param file: output file descriptor\n \"\"\"\n # Force garbage collection due to memory concerns when handling multiple summaries\n gc.collect()\n pickle.dump(self, file, protocol=pickle.HIGHEST_PROTOCOL)\n\n def export_to_csv_line(self, only_stats: bool = False, prefix: str = None) -> str:\n \"\"\"\n Get a single CSV line representation of the summary using \"|\" (vertical bar) as separator.\n\n :param only_stats: flag indicating if the line should contain only metrics and stats values\n :param prefix: a custom first element for the line, if needed\n :return: the CSV line as a string\n \"\"\"\n line = [prefix] if prefix is not None else []\n if not only_stats:\n line.extend(list(self.commits.values()))\n line.extend(list(self.executions.values()))\n\n for error, [total, red] in self.errors.items():\n line.extend([self.errors[error][total], self.errors[error][red]])\n\n line.extend(list(self.red_stats.values()))\n line.extend(list(self.solution_size.values()))\n line.extend(list(self.computing_time.values()))\n line.extend([int(self.orig_feedback_time)])\n line.extend(list(self.new_feedback_time.values()))\n\n # stringify items\n line = [str(x) for x in line]\n return \"|\".join(line)\n\n @staticmethod\n def print_metric_stats(name: str, data: List):\n \"\"\"\n Print avg, min, max, stdev + percentiles (10, 25, 50, 75, 90)\n\n :param name: name of evaluation metric\n :param data: list of data points\n \"\"\"\n\n def unpack(values):\n # Helper function for unpacking the values into the f-string\n return \",\".join(str(x) for x in values)\n\n stats, percentiles = data[0:4], data[4:]\n print(f\"{name} (avg, min, max, std): ({unpack(stats)})\")\n print(f\"{name} Percentiles (10, 25, 50, 75, 90): ({unpack(percentiles)})\")\n\n def merge_same(self, other: \"ResultsSummary\"):\n \"\"\"\n Merge the results of two summaries from the same evaluation period.\n\n Note: this assumes that the summaries are equal except for stats, which are added up\n\n :param other: the other ResultsSummary object to be merged with\n \"\"\"\n self.red_stats = add_counter(self.red_stats, other.red_stats)\n self.new_feedback_time = add_counter(\n self.new_feedback_time, other.new_feedback_time\n )\n\n def merge_diff(self, other: \"ResultsSummary\"):\n \"\"\"\n Merge the results of two summaries from different evaluation periods.\n\n :param other: the other ResultsSummary object to be merged with\n \"\"\"\n self.commits = add_counter(self.commits, other.commits)\n self.executions = add_counter(self.executions, other.executions)\n for error in self.errors:\n self.errors[error] = add_counter(self.errors[error], other.errors[error])\n self.red_stats = add_counter(self.red_stats, other.red_stats)\n self.solution_size = add_counter(self.solution_size, other.solution_size)\n self.computing_time = add_counter(self.computing_time, other.computing_time)\n self.orig_feedback_time = self.orig_feedback_time + other.orig_feedback_time\n self.new_feedback_time = add_counter(\n self.new_feedback_time, other.new_feedback_time\n )\n\n def normalize_diff(self, n: int):\n \"\"\"\n Normalize (average) results in this summary by a number n\n\n \"\"\"\n self.commits[\"red_p\"] = self.commits[\"red_p\"] / n\n self.executions[\"total_p\"] = self.executions[\"total_p\"] / n\n self.executions[\"red_p\"] = self.executions[\"red_p\"] / n\n for k in self.red_stats:\n self.red_stats[k] = self.red_stats[k] / n\n for k in self.solution_size:\n self.solution_size[k] = int(self.solution_size[k] / n)\n for k in self.computing_time:\n self.computing_time[k] = int(self.computing_time[k] / n)\n self.orig_feedback_time = self.orig_feedback_time / n\n for k in self.new_feedback_time:\n self.new_feedback_time[k] = int(self.new_feedback_time[k] / n)\n\n\ndef add_counter(prop1: dict, prop2: dict):\n \"\"\"\n Helper function to add the Counters of two dicts without breaking in case a key doesn't exist in both dicts.\n\n :param prop1: a dictionary\n :param prop2: another dictionary\n :return: a Counter object with the sum of the two dicts Counters\n \"\"\"\n c = Counter()\n c.update({x: 1 for x in prop1})\n prop1 = c + Counter(prop1) + Counter(prop2)\n for x in prop1:\n prop1[x] -= 1\n return prop1\n" }, { "alpha_fraction": 0.6801801919937134, "alphanum_fraction": 0.6828829050064087, "avg_line_length": 22.617021560668945, "blob_id": "fe0b0ed3004478bead6b755e3af232fef748f0bc", "content_id": "40a5f0b6d8047700dbeb6aec839215fb42ec082b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "permissive", "max_line_length": 67, "num_lines": 47, "path": "/backend/opencover/utils.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\"\"\" Helper methods to access xml elements \"\"\"\n\n\ndef get_module_name(module):\n return next(module.iter(\"ModuleName\")).text\n\n\ndef get_module_tracked_methods(module):\n return next(module.iter(\"TrackedMethods\"))\n\n\ndef get_module_classes(module):\n return next(module.iter(\"Classes\"))\n\n\ndef get_module_files(module):\n return next(module.iter(\"Files\"))\n\n\ndef get_class_methods(clazz):\n return next(clazz.iter(\"Methods\"))\n\n\ndef get_method_name(method):\n return next(method.iter(\"Name\")).text\n\n\ndef get_method_file_ref(method):\n return next(method.iter(\"FileRef\"), None)\n\n\ndef get_method_coverage(method):\n # Get method point tag\n method_point = next(method.iter(\"MethodPoint\"), None)\n if method_point is None or not list(method_point):\n return [None, None]\n\n # Look at tracked method refs\n tracked_refs = method_point[0]\n if not list(tracked_refs):\n return [None, None]\n\n # Return uids of tests that visit the 1st sequence point\n tests_uids = list(map(lambda x: x.attrib[\"uid\"], tracked_refs))\n\n return [get_method_name(method), tests_uids]\n" }, { "alpha_fraction": 0.5101152062416077, "alphanum_fraction": 0.5363866090774536, "avg_line_length": 31.208145141601562, "blob_id": "edf8ac2320f8b6e973356794b7c0cb4ac8aefb6f", "content_id": "7d7cedaf84cca6a0467159dee4470d2a2cfbc8b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7118, "license_type": "permissive", "max_line_length": 106, "num_lines": 221, "path": "/generate_tests.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nCOVERAGE_MAP = {\"ddu\": \"d\", \"norm_coverage\": \"n\"}\n\nHISTORY_MAP = {\"exec_times\": \"e\", \"fails\": \"f\", \"n_tests\": \"t\"}\n\nDATA = [\n (\n \"demo1\",\n \"all_trunk_demo1_tests.in\",\n \"data\\\\jsons\\\\actmatrix_v2_trunk_demo1.json\",\n \"data\\\\poc_demos\\\\trunk_demo1.config\",\n ),\n (\n \"demo2\",\n \"all_trunk_demo2_tests.in\",\n \"data\\\\jsons\\\\actmatrix_v2_trunk_demo2.json\",\n \"data\\\\poc_demos\\\\trunk_demo2.config\",\n ),\n (\n \"demo3\",\n \"all_trunk_demo3_tests.in\",\n \"data\\\\jsons\\\\actmatrix_v2_trunk_demo3.json\",\n \"data\\\\poc_demos\\\\trunk_demo3.config\",\n ),\n (\n \"demo4\",\n \"all_trunk_demo4_tests.in\",\n \"data\\\\jsons\\\\actmatrix_v2_trunk_demo4.json\",\n \"data\\\\poc_demos\\\\trunk_demo4.config\",\n ),\n]\n\nCOMMAND = \"python testsel_pipeline.py demo\"\nRANDOM_COMMAND = \"python testsel_pipeline.py random\"\n\nOUTPUT_PATH = \"data\\\\results\\\\thesis\"\n\n\ndef print_command(metrics, size, data, config, output):\n print(f\"{COMMAND} {metrics} {size} {data} {config} {output}.pickle > {output}.out\")\n\n\ndef print_random_command(\n tests, data, config, output, random_prob, fixed=False, filtered=False\n):\n options = \"\"\n if fixed:\n options += \"--fixed \"\n if filtered:\n options += \"--filtered \"\n print(\n f\"{RANDOM_COMMAND} {options} {random_prob} {tests} {data} {config} {output}.pickle > {output}.out\"\n )\n\n\ndef baseline_tests():\n base = f\"{OUTPUT_PATH}\\\\baseline\\\\base_\"\n metrics, size = \"-o ddu -o fails\", 100\n for (batch, json_data, config) in DATA:\n name = f\"{base}{batch}\"\n print_command(metrics, size, json_data, config, name)\n print()\n\n\ndef metrics_2combos_tests():\n from itertools import permutations, product\n\n base = f\"{OUTPUT_PATH}\\\\metrics_combos\\\\mcombos_\"\n for (batch, _, json_data, config) in DATA:\n for (cov, hist) in product(COVERAGE_MAP.items(), HISTORY_MAP.items()):\n combos = permutations([cov, hist], 2)\n for ((m1_key, m1_name), (m2_key, m2_name)) in combos:\n name = f\"{base}{m1_name}{m2_name}_{batch}\"\n metrics, size = f\"-o {m1_key} -o {m2_key}\", 100\n print_command(metrics, size, json_data, config, name)\n print()\n print()\n\n\ndef metrics_3combos_tests():\n from itertools import permutations, product\n\n base = f\"{OUTPUT_PATH}\\\\metrics_combos\\\\mcombos_\"\n for (batch, _, json_data, config) in DATA:\n combos_done = []\n for (cov, hist1, hist2) in product(\n COVERAGE_MAP.items(), HISTORY_MAP.items(), HISTORY_MAP.items()\n ):\n if hist1 == hist2:\n continue\n combos = permutations([cov, hist1, hist2], 3)\n for ((m1_key, m1_name), (m2_key, m2_name), (m3_key, m3_name)) in combos:\n metrics_name = f\"{m1_name}{m2_name}{m3_name}\"\n if metrics_name in combos_done:\n continue\n name = f\"{base}{metrics_name}_{batch}\"\n metrics, size = f\"-o {m1_key} -o {m2_key} -o {m3_key}\", 100\n print_command(metrics, size, json_data, config, name)\n combos_done.append(metrics_name)\n print()\n print()\n\n\ndef metrics_4combos_tests():\n from itertools import permutations, product\n\n base = f\"{OUTPUT_PATH}\\\\metrics_combos\\\\mcombos_\"\n for (batch, _, json_data, config) in DATA:\n combos_done = []\n for (cov, hist1, hist2, hist3) in product(\n COVERAGE_MAP.items(),\n HISTORY_MAP.items(),\n HISTORY_MAP.items(),\n HISTORY_MAP.items(),\n ):\n if hist1 == hist2 or hist1 == hist3 or hist2 == hist3:\n continue\n combos = permutations([cov, hist1, hist2, hist3], 4)\n for (\n (m1_key, m1_name),\n (m2_key, m2_name),\n (m3_key, m3_name),\n (m4_key, m4_name),\n ) in combos:\n metrics_name = f\"{m1_name}{m2_name}{m3_name}{m4_name}\"\n if metrics_name in combos_done:\n continue\n name = f\"{base}{metrics_name}_{batch}\"\n metrics, size = f\"-o {m1_key} -o {m2_key} -o {m3_key} -o {m4_key}\", 100\n print_command(metrics, size, json_data, config, name)\n combos_done.append(metrics_name)\n print()\n print()\n\n\ndef swarm_size_tests():\n base = f\"{OUTPUT_PATH}\\\\swarm_size\\\\swsize_\"\n metrics = \"-o ddu -o fails\"\n for (batch, _, json_data, config) in DATA:\n sizes = [5, 10, 25, 50, 100, 200, 400]\n for size in sizes:\n name = f\"{base}{size}_{batch}\"\n print_command(metrics, size, json_data, config, name)\n print()\n\n\ndef random_fixed_tests():\n base = f\"{OUTPUT_PATH}\\\\random_fixed\\\\ranfixed_\"\n for (batch, tests, json_data, config) in DATA:\n random_p = [0.10, 0.15, 0.20, 0.25]\n for prob in random_p:\n for i in range(1, 11):\n name = f\"{base}{str(int(prob*100))}_{i}_{batch}\"\n print_random_command(\n tests, json_data, config, name, prob, fixed=True, filtered=False\n )\n print()\n print()\n\n\ndef random_dynamic_tests():\n base = f\"{OUTPUT_PATH}\\\\random_dynamic\\\\randynam_\"\n for (batch, tests, json_data, config) in DATA:\n random_p = [0.10, 0.15, 0.20, 0.25]\n for prob in random_p:\n for i in range(1, 11):\n name = f\"{base}{str(int(prob*100))}_{i}_{batch}\"\n print_random_command(\n tests, json_data, config, name, prob, fixed=False, filtered=False\n )\n print()\n print()\n\n\ndef random_dynamic_filtered_tests():\n base = f\"{OUTPUT_PATH}\\\\random_dynamic_filter\\\\randynamfilter_\"\n for (batch, tests, json_data, config) in DATA:\n random_p = [0.10, 0.15, 0.20, 0.25]\n for prob in random_p:\n for i in range(1, 11):\n name = f\"{base}{str(int(prob*100))}_{i}_{batch}\"\n print_random_command(\n tests, json_data, config, name, prob, fixed=False, filtered=True\n )\n print()\n print()\n\n\ndef random_fixed_filtered_tests():\n base = f\"{OUTPUT_PATH}\\\\random_fixed_filter\\\\ranfixedfilter_\"\n for (batch, tests, json_data, config) in DATA:\n random_p = [0.10, 0.15, 0.20, 0.25]\n for prob in random_p:\n for i in range(1, 11):\n name = f\"{base}{str(int(prob*100))}_{i}_{batch}\"\n print_random_command(\n tests, json_data, config, name, prob, fixed=True, filtered=True\n )\n print()\n print()\n\n\nif __name__ == \"__main__\":\n random_fixed_tests()\n random_fixed_filtered_tests()\n random_dynamic_tests()\n random_dynamic_filtered_tests()\n # baseline\n # baseline_tests()\n\n # swarm size\n # swarm_size_tests()\n\n # metrics 2-combos\n # metrics_2combos_tests()\n\n # metrics 3-combos\n # metrics_3combos_tests()\n\n # metrics 4-combos\n # metrics_4combos_tests()\n" }, { "alpha_fraction": 0.5474095940589905, "alphanum_fraction": 0.5650048851966858, "avg_line_length": 34.6860466003418, "blob_id": "8b0c8d81aa010fc4b4e02b14347ee0a2541dc604", "content_id": "7ad8d3de102ee70d4c7547f969692ca8cfc63241", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3069, "license_type": "permissive", "max_line_length": 86, "num_lines": 86, "path": "/join_randoms.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport os\nimport pickle\nimport re\nfrom typing import Optional, Any\n\nimport click\n\nfrom backend.evaluation.summary import ResultsSummary\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected](\"per_batch\")\[email protected](\"data_dir\", type=click.Path(exists=True))\[email protected](\n \"--innocent\", is_flag=True, help=\"Recompute each sample using innocent filter\"\n)\ndef start(data_dir, innocent):\n for batch in [\"demo1\", \"demo2\", \"demo3\", \"demo4\"]:\n for prob in [str(int(x * 100)) for x in [0.10, 0.15, 0.20, 0.25]]:\n pattern = re.compile(prob + r\"_\\d+_\" + batch + r\".pickle\")\n results = [\n os.path.abspath(os.path.join(data_dir, x))\n for x in os.listdir(data_dir)\n if re.search(pattern, x) is not None\n ]\n aggregated: ResultsSummary = pickle.load(open(results[0], mode=\"rb\"))\n if innocent:\n aggregated.recompute_innocent()\n for file in results[1:]:\n summary = pickle.load(open(file, mode=\"rb\"))\n if innocent:\n summary.recompute_innocent()\n aggregated.merge_same(summary)\n\n for k in aggregated.red_stats:\n aggregated.red_stats[k] = aggregated.red_stats[k] / 10\n for k in aggregated.new_feedback_time:\n aggregated.new_feedback_time[k] = aggregated.new_feedback_time[k] / 10\n print(f\"{aggregated.export_to_csv_line()}\")\n\n\[email protected](\"per_prob\")\[email protected](\"data_dir\", type=click.Path(exists=True))\[email protected](\n \"--innocent\", is_flag=True, help=\"Recompute each sample using innocent filter\"\n)\ndef start(data_dir, innocent):\n for prob in [str(int(x * 100)) for x in [0.10, 0.15, 0.20, 0.25]]:\n prob_results = []\n for batch in [\"demo1\", \"demo2\", \"demo3\", \"demo4\"]:\n pattern = re.compile(prob + r\"_\\d+_\" + batch + r\".pickle\")\n results = [\n os.path.abspath(os.path.join(data_dir, x))\n for x in os.listdir(data_dir)\n if re.search(pattern, x) is not None\n ]\n aggregated: ResultsSummary = pickle.load(open(results[0], mode=\"rb\"))\n if innocent:\n aggregated.recompute_innocent()\n for file in results[1:]:\n summary = pickle.load(open(file, mode=\"rb\"))\n if innocent:\n summary.recompute_innocent()\n aggregated.merge_same(summary)\n\n for k in aggregated.red_stats:\n aggregated.red_stats[k] = aggregated.red_stats[k] / 10\n for k in aggregated.new_feedback_time:\n aggregated.new_feedback_time[k] = aggregated.new_feedback_time[k] / 10\n\n prob_results.append(aggregated)\n\n while len(prob_results) > 1:\n prob_results[0].merge_diff(prob_results.pop())\n prob_final = prob_results.pop()\n prob_final.normalize_diff(4)\n print(f\"{prob_final.export_to_csv_line()}\")\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.6048500537872314, "alphanum_fraction": 0.6073102355003357, "avg_line_length": 33.98360824584961, "blob_id": "7f124e5b0e7f821298a0b3bbad3132a878c7473c", "content_id": "85191c2ed1798e458463ff1d413d22a9a7cb7467", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8536, "license_type": "permissive", "max_line_length": 112, "num_lines": 244, "path": "/backend/selection/problem_data.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport json\nimport re\nfrom typing import List\n\nimport pandas as pd\nfrom collections import defaultdict\nfrom dataclasses import dataclass\n\nimport numpy as np\n\nfrom backend.integrations import database\n\n\ndef normalize_test_name(tests: np.ndarray):\n \"\"\"\n Normalize test names to match database.\n\n - Replace / with + to support dashboard tests\n - Discard return type\n - Only keep namespace and method name\n\n :param tests: list of test names\n :return: array with test names normalized\n \"\"\"\n return map(\n lambda test: \".\".join(\n re.search(r\"(.*)::(.*)\\(\", test.replace(\"/\", \"+\").split(\" \")[1]).groups()\n ),\n tests,\n )\n\n\ndef normalize_iterative_test_name(test: str):\n \"\"\"\n Normalize iterative test name, if necessary\n\n :param test: test name\n :return: normalized test name\n \"\"\"\n if re.match(r\"(.*\\..+)\\+.+\", test):\n return re.match(r\"(.*\\..+)\\+.+\", test).group(1)\n return test\n\n\ndef get_historical_metric_map(query_results: pd.DataFrame) -> dict:\n \"\"\"\n Convert 2-columns query results to a dictionary mapping the test name to the historical metric value\n\n :param query_results: 2-columns pandas dataframe with the query results\n :return: dictionary mapping the test names to the historical metric values\n \"\"\"\n history_metric_map = defaultdict(int)\n for test, time in query_results.values:\n key = normalize_iterative_test_name(test)\n history_metric_map[key] += time\n return history_metric_map\n\n\n@dataclass\nclass ProblemData:\n original_matrix: np.ndarray\n original_tests: np.ndarray\n original_methods: np.ndarray\n activity_matrix: np.ndarray\n tests_index: np.ndarray\n methods_index: np.ndarray\n methods_map: dict\n history_test_fails: dict\n history_test_execution_times: dict\n new_files: dict\n branch: str\n ignore_tests: list\n\n swarm_size: int\n\n def __init__(\n self,\n activity_matrix_path,\n branch,\n fails_start_date,\n from_date,\n to_date,\n ignore_tests=None,\n ):\n \"\"\"\n ProblemData initialization.\n\n - Load JSON data for an activity matrix file\n - Filter tests with no activity (zero rows)\n :param activity_matrix_path: path of the activity matrix JSON file\n \"\"\"\n if ignore_tests is None:\n ignore_tests = []\n self.branch = branch\n self.ignore_tests = ignore_tests\n\n self.load_json_data(activity_matrix_path)\n self.filter_tests_with_no_activity()\n\n # Load historical data\n self.history_test_fails = get_historical_metric_map(\n database.get_test_name_fails(fails_start_date, from_date)\n )\n self.history_test_execution_times = get_historical_metric_map(\n database.get_test_execution_times(from_date, to_date)\n )\n\n self.new_files = {}\n\n def load_json_data(self, activity_matrix):\n \"\"\"\n Loads JSON data for an activity matrix.\n\n The loaded JSON data includes:\n - The binary activity matrix itself\n - The tests considered\n - The methods considered\n\n :param activity_matrix: path of the activity matrix JSON file\n \"\"\"\n print(f\"Loading json data from {activity_matrix}\")\n # Find relative path and timestamp to load tests/methods maps\n actm_pattern = r\"(.*)\\\\actmatrix_(.*)\\.json\"\n path, timestamp = re.search(actm_pattern, activity_matrix).groups()\n\n # activity matrix\n with open(activity_matrix) as actm_file:\n self.activity_matrix = np.array(json.load(actm_file), dtype=bool)\n self.original_matrix = self.activity_matrix\n\n # tests\n with open(f\"{path}\\\\testids_{timestamp}.json\") as tests_file:\n tests = np.array(list(json.load(tests_file).values()))\n self.tests_index = np.array(list(normalize_test_name(tests)))\n self.original_tests = self.tests_index\n\n # methods\n with open(f\"{path}\\\\methodids_{timestamp}.json\") as methods_file:\n self.methods_map = json.load(methods_file)\n # print(f\"methods map: {len(self.methods_map.keys())}\")\n self.methods_index = np.array(list(self.methods_map.values()))\n self.original_methods = self.methods_index\n\n def reset(self):\n \"\"\"\n Reset current activity matrix, tests and methods data to the originally loaded data.\n\n \"\"\"\n self.activity_matrix = self.original_matrix\n self.tests_index = self.original_tests\n self.methods_index = self.original_methods\n\n def filter_tests_with_no_activity(self):\n \"\"\"\n Filter tests with no activity (zero rows).\n \"\"\"\n active_tests = ~np.all(self.activity_matrix == 0, axis=1)\n self.tests_index = self.tests_index[active_tests]\n self.activity_matrix = self.activity_matrix[active_tests]\n\n def filter_methods_with_no_activity(self):\n \"\"\"\n Filter methods with no activity (zero columns)\n \"\"\"\n active_methods = ~np.all(self.activity_matrix == 0, axis=0)\n self.methods_index = self.methods_index[active_methods]\n self.activity_matrix = self.activity_matrix[:, active_methods]\n\n def filter_data_for_commit(self, changed_methods):\n \"\"\"\n Filter matrix and indexes based on commit.\n Also, the changed data is filtered for tests/methods with no activity\n\n :param changed_methods: indexes of methods changed by the commit\n \"\"\"\n self.activity_matrix = self.activity_matrix[:, changed_methods]\n self.methods_index = self.methods_index[changed_methods]\n\n # Filter no activity tests/methods\n self.filter_tests_with_no_activity()\n self.filter_methods_with_no_activity()\n\n def get_changed_indexes_for_changelist(\n self, changelist: List[List], ignore_changes: List\n ) -> object:\n \"\"\"\n Get the changed method indexes in the activity matrix based on the changelist\n\n :param changelist: list of changed files (each element is pair with the type of change and the filename)\n :param ignore_changes: list of file paths to be ignored\n :return: on success, returns a list of changed indexes in the activity matrix.\n on failure, returns a string describing the error case\n \"\"\"\n # Filter changelist before processing\n changelist = [\n change\n for change in changelist\n if not any(\n (ignore in change[1]) or (change[1] == \"/platform/trunk\")\n for ignore in ignore_changes\n )\n ]\n\n # Process changelist\n new_files = []\n changed_files = []\n cs_pattern = self.branch + r\"/(.*)\\.cs$\"\n xaml_cs_pattern = self.branch + r\"/(.*)xaml\\.cs\"\n for x in changelist:\n if re.search(cs_pattern, x[1]):\n # Check if it's not a *.xaml.cs file\n if not re.search(xaml_cs_pattern, x[1]):\n filename = re.search(cs_pattern, x[1]).group(1)\n dot_filename = filename.replace(\"/\", \".\")\n changed_files.append(dot_filename)\n # Check if new file and store in hash table\n if x[0] == \"A\":\n self.new_files[dot_filename] = 123\n new_files.append(dot_filename)\n # Check if modified an already known new file\n elif self.new_files.get(dot_filename) is not None:\n new_files.append(dot_filename)\n\n # Check if no .cs files were changed\n if not changed_files:\n return \"[Error] Changelist contains no covered .cs files\"\n\n # Check if only changed new files\n if len(changed_files) == len(new_files):\n return \"[Error] Changelist contains only new files or modified new files\"\n\n # Map files to method indexes\n changed_indexes = []\n for method in self.methods_map.values():\n if any(changed in method for changed in changed_files):\n matched_methods = np.where(self.methods_index == method)\n changed_indexes.append(matched_methods[0][0])\n\n # Check if there are no method indexes to return\n if not changed_indexes:\n return \"[Error] The provided activity matrix has no coverage data for the changed files\"\n\n return changed_indexes\n" }, { "alpha_fraction": 0.7588294744491577, "alphanum_fraction": 0.7689203023910522, "avg_line_length": 60.875, "blob_id": "a6c4483850a09e82e4eb364a2f908d1028fa64c2", "content_id": "5f8fae5d99a6e94d60c5da8a505f2abe5e4e4526", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3964, "license_type": "permissive", "max_line_length": 274, "num_lines": 64, "path": "/README.md", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# TestSel_MOPSO\nCode for \"A Multi-Objective Test Selection Tool using Test Suite Diagnosability\"\n\n## Dependencies + Assumptions\n### General\n- Python 3.7.2\n- Minified OpenCover (fork) \n - Code Comparison: https://github.com/OpenCover/opencover/compare/4.6.519...danielcorreia96:oc_2016_merged\n - Appveyor Artifact: https://ci.appveyor.com/project/danielcorreia96/opencover/builds/23686399\n- Target source code is stored in SVN repository\n\n### Python Libraries\n- numpy: data manipulation and metrics calculations (https://github.com/numpy/numpy)\n- jMetalPy: BMOPSO implementation (https://github.com/jMetal/jMetalPy)\n- pyodbc: database integration (https://github.com/mkleehammer/pyodbc)\n- PySvn: svn integration (https://github.com/dsoprea/PySvn)\n- joblib: database results caching (https://github.com/joblib/joblib)\n- faker: results anonymization for demonstration (https://github.com/joke2k/faker)\n\n## Tool Usage Overview (3 main components)\n### 1. Coverage Data Profiling (Offline/Ad-Hoc)\n- Execute tests using OpenCover instrumentation and collect coverage data to build the required activity matrix.\n- CLI: cov_profiler.py\n- Input Dependencies:\n - Target source code and test suite is available in the system\n - A directory with lists of tests to be ran with coverage profiling\n - Configuration file: paths to minified OpenCover executable, NUnit test runner, input/output directories; OpenCover filters and cover_by_test to be used (following the docs at https://github.com/OpenCover/opencover/wiki/Usage); threshold value for methods visit counter.\n - A sample configuration file (with instructions) is provided at data/opencover/oc_config.json.sample\n- Output: a set of xml reports at the chosen output directory\n- Example Command: `python cov_profiler.py run data/opencover/oc_config.json`\n\n\n### 2. Build Activity Matrix (Offline/Ad-Hoc)\n- Process the obtained xml coverage reports into an activity matrix and some additional information to be used by the test selection pipeline\n- CLI: parse_xml.py\n- Input Dependencies:\n - Directory with xml coverage reports obtained in the previous component\n - Name/Id for the output json files\n - Name of the folder where the svn repository is stored in the local filesystem\n- Output: 3 json files: an activity matrix, a map of row indices <-> test names and a map of column indices <-> method names\n- Example Command: `python parse_xml data\\reports\\demo1\\ demo1 trunk_demo1`\n\n\n### 3. Test Selection Pipeline (Online)\n- Run test selection pipeline for a given commit/revision id\n- CLI: testsel_pipeline.py\n- Input Dependencies:\n - JSON files from previous component -> only the path to activity matrix is passed as an argument, the other 2 are inferred\n - Database configuration file and SQL queries for metrics (samples provided in data/database)\n - Configuration file to setup branch path, dates range and ignored tests/changes details \n - CLI -o option: provide order of objectives to be used\n - Available metrics:\n - ddu: DDU\n - coverage: method coverage (raw sum)\n - norm_coverage: method coverage (normalized to 0/1)\n - n_tests: total number of tests selected\n - fails: total number of test failures from build history\n - exec_times: total \"expected\" execution time for the selected tests using build history info\n - CLI --masked option: anonymize output results by replacing the file/test names with fake ones\n- Output:\n - Interactive Mode: for the revision id, returns a list of selected tests\n - Batch Mode: for each revision id in the dates range, tries to run the tool (printing tests results at the end or logging error cases) and terminates with a summary of the batch run with some statistics\n- Example command (interactive): `python testsel_pipeline.py single -o ddu -o fails data\\jsons\\actmatrix_demo1.json data\\demo1.config`\n- Example command (batch mode): `python testsel_pipeline.py demo -o ddu -o fails data\\jsons\\actmatrix_demo1.json data\\demo1.config`\n \n\n" }, { "alpha_fraction": 0.6738144159317017, "alphanum_fraction": 0.6837113499641418, "avg_line_length": 35.74242401123047, "blob_id": "c86893d12e7fcd4df5c612f94adafa940a651820", "content_id": "84215eeb8d8058d7f2f26ee14771e533ed35a0d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2425, "license_type": "permissive", "max_line_length": 80, "num_lines": 66, "path": "/backend/selection/test_selection.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport random\nfrom typing import List\n\nfrom jmetal.core.problem import BinaryProblem\nfrom jmetal.core.solution import BinarySolution\nfrom jmetal.operator import BitFlipMutation\nfrom jmetal.util.archive import CrowdingDistanceArchive\nfrom jmetal.util.termination_criterion import StoppingByEvaluations\n\nfrom backend.selection.binary_mopso import BMOPSO\nfrom backend.selection.problem_data import ProblemData\n\n\nclass TestSelection(BinaryProblem):\n def __init__(self, problem_data: ProblemData, objectives: List):\n super(TestSelection, self).__init__()\n self.objectives = objectives\n self.activity_matrix = problem_data.activity_matrix\n self.tests_index = problem_data.tests_index\n self.methods_index = problem_data.methods_index\n self.history_test_fails = problem_data.history_test_fails\n self.history_test_exec_times = problem_data.history_test_execution_times\n\n self.number_of_tests = self.activity_matrix.shape[0]\n # self.number_of_objectives = 2\n self.number_of_objectives = len(objectives)\n self.number_of_variables = 1\n self.number_of_constraints = 0\n\n # self.obj_directions = [self.MAXIMIZE, self.MAXIMIZE]\n # self.obj_labels = [\"DDU\", \"Total Previous Test Failures\"]\n # self.obj_directions = [self.MAXIMIZE, self.MAXIMIZE, self.MINIMIZE]\n # self.obj_labels = ['DDU', '# Test Failures', '# Tests Selected']\n\n def get_name(self) -> str:\n return \"Test Selection Problem\"\n\n def create_solution(self) -> BinarySolution:\n random.seed(123)\n new_solution = BinarySolution(\n number_of_variables=self.number_of_variables,\n number_of_objectives=self.number_of_objectives,\n )\n\n new_solution.variables[0] = [\n True if random.randint(0, 1) == 0 else False\n for _ in range(self.number_of_tests)\n ]\n\n return new_solution\n\n def evaluate(self, solution: BinarySolution) -> BinarySolution:\n solution.objectives = [func(self, solution) for func in self.objectives]\n return solution\n\n\ndef my_binary_mopso(problem: TestSelection, swarm):\n return BMOPSO(\n problem=problem,\n swarm_size=swarm,\n epsilon=0.075,\n mutation=BitFlipMutation(probability=0),\n leaders=CrowdingDistanceArchive(100),\n termination_criterion=StoppingByEvaluations(max=2000),\n )\n" }, { "alpha_fraction": 0.7579365372657776, "alphanum_fraction": 0.7817460298538208, "avg_line_length": 100, "blob_id": "9613113efd81693c80ea597bc21e796a6d99170f", "content_id": "fab4cc5248ec4a95317f28b31aaa6434a286b244", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 504, "license_type": "permissive", "max_line_length": 207, "num_lines": 5, "path": "/papers/README.md", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# Papers associated with MOTSD\nMOTSD was developed in the context of my Master thesis work and the results obtained through its usage were presented in the following accepted short papers at [**ESEC/FSE 2019**](https://esec-fse19.ut.ee/).\n\n1. Tool Demo Track - [\"MOTSD: A Multi-Objective Test Selection Tool using Test Suite Diagnosability\"](./esec-fse19-tooldemo.pdf)\n2. Student Research Competition - [\"An Industrial Application of Test Selection using Test Suite Diagnosability\"](./esec-fse19-src.pdf)" }, { "alpha_fraction": 0.7035509943962097, "alphanum_fraction": 0.7115693092346191, "avg_line_length": 32.83720779418945, "blob_id": "03cf9c353a7693d86ac2061345ab4a3c9199cb6e", "content_id": "66344eec0f4f3d2e713a1da9f3e1d3b6c4ba51e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4365, "license_type": "permissive", "max_line_length": 103, "num_lines": 129, "path": "/backend/selection/objectives.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport numpy as np\n\nfrom jmetal.core.solution import BinarySolution\n\nfrom backend.selection.ddu_metric import ddu\nfrom backend.selection.test_selection import TestSelection\n\n\ndef get_selected_matrix(particle: list, activity_matrix: np.ndarray) -> np.ndarray:\n \"\"\"\n Get subset of the activity matrix selected by the particle.\n\n :param particle: a particle representing a candidate selection\n :param activity_matrix: full activity matrix\n :return: selected subset of the activity matrix\n \"\"\"\n particle = np.array(particle)\n sub_matrix = activity_matrix[particle == 1]\n return sub_matrix\n\n\ndef calculate_ddu(problem: TestSelection, solution: BinarySolution) -> float:\n \"\"\"\n Calculate DDU metric for a candidate solution.\n\n :param problem: the test selection problem instance\n :param solution: a candidate solution\n :return: DDU value\n \"\"\"\n sub_matrix = get_selected_matrix(solution.variables[0], problem.activity_matrix)\n if sub_matrix.size == 0:\n return 0\n\n ddu_value = ddu(sub_matrix)\n return round(-1 * ddu_value, 2)\n\n\ndef calculate_norm_coverage(problem: TestSelection, solution: BinarySolution) -> float:\n \"\"\"\n Calculate normalized coverage for a candidate solution.\n\n Note: the return value is negated to support objective maximization\n\n :param problem: the test selection problem instance\n :param solution: a candidate solution\n :return: normalized coverage value\n \"\"\"\n sub_matrix = get_selected_matrix(solution.variables[0], problem.activity_matrix)\n if sub_matrix.size == 0:\n return 0\n\n sum_tests = np.sum(sub_matrix, axis=0)\n sum_tests[sum_tests > 0] = 1 # normalize to 1/0\n return -1 * (np.sum(sum_tests) / sub_matrix.shape[1])\n\n\ndef calculate_coverage(problem: TestSelection, solution: BinarySolution) -> float:\n \"\"\"\n Calculate coverage without normalization for a candidate solution.\n\n Note: the return value is negated to support objective maximization\n\n :param problem: the test selection problem instance\n :param solution: a candidate solution\n :return: coverage value without normalization\n \"\"\"\n # consider only selected subset of matrix\n sub_matrix = get_selected_matrix(solution.variables[0], problem.activity_matrix)\n if sub_matrix.size == 0:\n return 0\n\n sum_tests = np.sum(sub_matrix, axis=0)\n return -1 * (np.sum(sum_tests) / sub_matrix.shape[1])\n\n\ndef calculate_number_of_tests(problem: TestSelection, solution: BinarySolution) -> int:\n \"\"\"\n Calculate total number of tests selected for a candidate solution.\n\n :param problem: the test selection problem instance\n :param solution: a candidate solution\n :return: total number of tests selected\n \"\"\"\n total_tests = len(problem.tests_index[solution.variables[0]])\n if total_tests == 0:\n total_tests = 123456\n return total_tests\n\n\ndef calculate_test_fails(problem: TestSelection, solution: BinarySolution) -> int:\n \"\"\"\n Calculate total previous test failures for a candidate solution.\n\n Note: the return value is negated to support objective maximization\n\n :param problem: the test selection problem instance\n :param solution: a candidate solution\n :return: total previous test failures\n \"\"\"\n testfails_history = _parse_history_to_list(\n problem.history_test_fails, problem.tests_index[solution.variables[0]]\n )\n return -1 * sum(testfails_history)\n\n\ndef calculate_exec_times(problem: TestSelection, solution: BinarySolution) -> float:\n \"\"\"\n Calculate total execution time for a candidate solution.\n\n :param problem: the test selection problem instance\n :param solution: a candidate solution\n :return: total execution time\n \"\"\"\n test_exec_time_history = _parse_history_to_list(\n problem.history_test_exec_times, problem.tests_index[solution.variables[0]]\n )\n return sum(test_exec_time_history)\n\n\ndef _parse_history_to_list(history_results: dict, selected_tests: np.ndarray) -> list:\n \"\"\"\n Helper method to parse an historical metrics map into a list of values based on the selected tests.\n\n :param history_results: map of historical metrics\n :param selected_tests: list of selected tests names\n :return: list of historical metric values\n \"\"\"\n return [history_results.get(test, 0) for test in selected_tests]\n" }, { "alpha_fraction": 0.6558540463447571, "alphanum_fraction": 0.6751140356063843, "avg_line_length": 29.828125, "blob_id": "96e24bf63f133731ea3ab17a9b4f04deda094c49", "content_id": "c86e1b96aaa04286052414a1b49ff5a77078952a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1973, "license_type": "permissive", "max_line_length": 102, "num_lines": 64, "path": "/backend/selection/ddu_metric.py", "repo_name": "danielcorreia96/MOTSD", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport numpy as np\n\n\ndef ddu(matrix: np.ndarray):\n \"\"\"\n Calculate DDU metric value for given activity matrix.\n\n Reference: Perez, Alexandre, Rui Abreu, and Arie van Deursen. \"A test-suite diagnosability metric for\n spectrum-based fault localization approaches.\" Proceedings of the 39th International Conference on\n Software Engineering. IEEE Press, 2017.\n\n :param matrix: activity matrix\n :return: DDU value\n \"\"\"\n return norm_density(matrix) * diversity(matrix) * uniqueness(matrix)\n\n\ndef norm_density(matrix: np.ndarray):\n \"\"\"\n Calculate normalized density for a given activity matrix.\n\n :param matrix: activity matrix\n :return: normalized density value\n \"\"\"\n return 1 - abs(1 - 2 * (np.count_nonzero(matrix) / matrix.size))\n\n\ndef diversity(matrix: np.ndarray):\n \"\"\"\n Calculate test diversity for a given activity matrix.\n\n :param matrix: activity matrix\n :return: test diversity value\n \"\"\"\n # using numpy magic from https://stackoverflow.com/a/27007787 to count identical rows\n dt = np.dtype((np.void, matrix.dtype.itemsize * matrix.shape[1]))\n b = np.ascontiguousarray(matrix).view(dt)\n _, cnt = np.unique(b, return_counts=True)\n\n numerator = sum(map(lambda x: x * (x - 1), cnt))\n denominator = matrix.shape[0] * (matrix.shape[0] - 1)\n if denominator == 0:\n return 0\n return 1 - numerator / denominator\n\n\ndef uniqueness(matrix: np.ndarray):\n \"\"\"\n Calculate uniqueness for a given activity matrix.\n\n :param matrix: activity matrix\n :return: uniqueness value\n \"\"\"\n # using numpy magic from https://stackoverflow.com/a/27007787 to count identical columns\n dt = np.dtype((np.void, matrix.T.dtype.itemsize * matrix.T.shape[1]))\n b = np.ascontiguousarray(matrix.T).view(dt)\n _, cnt = np.unique(b, return_counts=True)\n\n numerator = len(cnt)\n denominator = matrix.T.shape[0]\n if denominator == 0:\n return 0\n return numerator / denominator\n" } ]
22
VoidMo/programowanie-IR
https://github.com/VoidMo/programowanie-IR
0b4a73bb461a9168a9ddeaa9e8b7c282b710551c
9f7800f1a449c0da2fff51a2cefad8bc23a92b46
85b922c489b75f6a53956ea0e1af7a14095754cc
refs/heads/main
2023-03-12T04:54:12.514164
2021-03-02T16:07:05
2021-03-02T16:07:05
343,813,134
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5633074641227722, "alphanum_fraction": 0.604651153087616, "avg_line_length": 20.55555534362793, "blob_id": "09cdda500b62cd5f8552ddda15f44fc3a46a0817", "content_id": "e94664b6a4944045f523b9c955cda61e21a90bb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/bmi.py", "repo_name": "VoidMo/programowanie-IR", "src_encoding": "UTF-8", "text": "#zadanie bmi\n\nmasa = float(input(\"Podaj wagę [kg]: \"))\nwzrost = float(input(\"Podaj wzrost [m]: \"))\n\nbmi = masa/(wzrost**2)\n\nprint(\"BMI = {0}\".format(bmi))\n\nif(bmi < 18.5):\n print(\"Niedowaga.\")\nelif(bmi >= 18.5 and bmi < 25):\n print(\"Waga prawidłowa.\")\nelif(bmi >= 25 and bmi < 30):\n print(\"Nadwaga.\")\nelif(bmi >=30):\n print(\"Otyłość.\")\ninput(\"Naciśnij enter, aby zakończyć \")" } ]
1
wufeiyeye/myproject01
https://github.com/wufeiyeye/myproject01
3299f2709964aa6be3498f9219f8b290be93d95e
5bf0813af4f12607faf77baad74b89d59cddb111
5ba263e37a05e2c680ef398a7837f450ed6de7aa
refs/heads/master
2020-03-31T08:37:11.452060
2018-10-08T11:19:12
2018-10-08T11:19:12
152,064,693
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 11.5, "blob_id": "e8dde41de9b43c9f1c126b51e6ea92be364437f4", "content_id": "439f0594ed22e414bb4a5c44d705f654ca1338ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/dailyfresh/code.py", "repo_name": "wufeiyeye/myproject01", "src_encoding": "UTF-8", "text": "print(1)\nprint(\"用户模块完成\")\n" } ]
1
yarikoptic/pybetaseries
https://github.com/yarikoptic/pybetaseries
afce80c64e4a62b37e66395a792015f4b1e1f1bd
6c575391b4c9241a84450f1260c97e5e2294c8bb
723a2d8c0e0da7d40ce963119359550fdd05ee9a
refs/heads/master
2021-01-18T06:43:02.808153
2013-08-08T23:35:26
2013-08-08T23:35:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5522254109382629, "alphanum_fraction": 0.5648588538169861, "avg_line_length": 37.06438446044922, "blob_id": "1b553fc4e04ef90dd2d0cf479e95a9158d7d9c81", "content_id": "d21a774c6d401bc2616d688e730151a98e4a1e47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18918, "license_type": "no_license", "max_line_length": 158, "num_lines": 497, "path": "/pybetaseries.py", "repo_name": "yarikoptic/pybetaseries", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"pybetaseries: a module for computing beta-series regression on fMRI data\n\nIncludes:\npybetaseries: main function\nestimate_OLS: helper function to estimate least squares model\nspm_hrf: helper function to generate double-gamma HRF\n\"\"\"\n\nfrom glob import glob\n\nfrom mvpa2.base import verbose\nfrom mvpa2.misc.fsl.base import *\nfrom mvpa2.datasets.mri import fmri_dataset, map2nifti\n\nimport numpy as N\nimport nibabel\nimport scipy.stats\nfrom scipy.ndimage import convolve1d\nfrom scipy.sparse import spdiags\nfrom scipy.linalg import toeplitz\n#from mvpa.datasets.mri import *\nimport os\nfrom os.path import join as pjoin\nfrom copy import copy\n\n\ndef complete_filename(fileprefix):\n \"\"\"Check if provided fileprefix is not pointing to an existing file but there exist a single file with some .extension for it\"\"\"\n if os.path.exists(fileprefix):\n return fileprefix\n filenames = glob(fileprefix + \".*\")\n if len(filenames) > 1:\n # bloody pairs\n if sorted(filenames) == [fileprefix + '.hdr', fileprefix + '.img']:\n return fileprefix + '.hdr'\n raise ValueError(\"There are multiple files available with prefix %s: %s\"\n % (fileprefix, \", \".join(filenames)))\n elif len(filenames) == 1:\n return filenames[0]\n else:\n raise ValueError(\"There are no files for %s\" % fileprefix)\n\ndef get_smoothing_kernel(cutoff, ntp):\n sigN2 = (cutoff/(N.sqrt(2.0)))**2.0\n K = toeplitz(1\n /N.sqrt(2.0*N.pi*sigN2)\n *N.exp((-1*N.array(range(ntp))**2.0/(2*sigN2))))\n K = spdiags(1./N.sum(K.T, 0).T, 0, ntp, ntp)*K\n H = N.zeros((ntp, ntp)) # Smoothing matrix, s.t. H*y is smooth line\n X = N.hstack((N.ones((ntp, 1)), N.arange(1, ntp+1).T[:, N.newaxis]))\n for k in range(ntp):\n W = N.diag(K[k, :])\n Hat = N.dot(N.dot(X, N.linalg.pinv(N.dot(W, X))), W)\n H[k, :] = Hat[k, :]\n\n F = N.eye(ntp) - H\n return F\n\n# yoh:\n# desmat -- theoretically should be \"computed\", not loaded\n# time_up and hrf as sequences -- might better be generated \"inside\"\n# since they are not \"independent\". Note that time_res is now\n# computed inside. RFing in favor of passing TR, time_res\n# TR -- theoretically should be available in data\ndef extract_lsone(data, TR, time_res,\n hrf_gen, F,\n good_ons,\n good_evs, nuisance_evs, withderiv_evs,\n desmat,\n extract_evs=None,\n collapse_other_conditions=True):\n # loop through the good evs and build the ls-one model\n # design matrix for each trial/ev\n\n ntp, nvox = data.shape\n\n hrf = hrf_gen(time_res)\n # Set up the high time-resolution design matrix\n time_up = N.arange(0, TR*ntp+time_res, time_res)\n n_up = len(time_up)\n dm_nuisanceevs = desmat.mat[:, nuisance_evs]\n\n ntrials_total = sum(len(o['onsets']) for o in good_ons)\n verbose(1, \"Have %d trials total to process\" % ntrials_total)\n trial_ctr = 0\n all_conds = []\n beta_maker = N.zeros((ntrials_total, ntp))\n\n if extract_evs is None:\n extract_evs = range(len(good_evs))\n\n for e in extract_evs: # range(len(good_evs)):\n ev = good_evs[e]\n # first, take the original desmtx and remove the ev of interest\n other_good_evs = [x for x in good_evs if x != ev]\n # put the temporal derivatives into other_good_evs\n # start with its own derivative. This accounts for\n # a significant amount of divergence from matlab implementation\n if ev in withderiv_evs:\n other_good_evs.append(ev+1)\n for x in other_good_evs:\n if x in withderiv_evs:\n other_good_evs.append(x+1)\n dm_otherevs = desmat.mat[:, other_good_evs]\n cond_ons = N.array(good_ons[e].onsets)\n cond_dur = N.array(good_ons[e].durations)\n ntrials = len(cond_ons)\n glm_res_full = N.zeros((nvox, ntrials))\n verbose(2, 'processing ev %d: %d trials' % (e+1, ntrials))\n for t in range(ntrials):\n verbose(3, \"processing trial %d\" % t)\n ## ad-hoc warning -- assumes interleaved presence of\n ## derivatives' EVs\n all_conds.append((ev/2)+1)\n ## yoh: handle outside\n ## if cond_ons[t] > max_evtime:\n ## verbose(1, 'TOI: skipping ev %d trial %d: %f %f'\n ## % (ev, t, cond_ons[t], max_evtime))\n ## trial_ctr += 1\n ## continue\n # first build model for the trial of interest at high resolution\n dm_toi = N.zeros(n_up)\n window_ons = [N.where(time_up==x)[0][0]\n for x in time_up\n if (x >= cond_ons[t]) & (x < cond_ons[t] + cond_dur[t])]\n dm_toi[window_ons] = 1\n dm_toi = N.convolve(dm_toi, hrf)[0:ntp/time_res*TR:(TR/time_res)]\n other_trial_ons = cond_ons[N.where(cond_ons!=cond_ons[t])[0]]\n other_trial_dur = cond_dur[N.where(cond_ons!=cond_ons[t])[0]]\n\n dm_other = N.zeros(n_up)\n # process the other trials\n for o in other_trial_ons:\n ## yoh: handle outside\n ## if o > max_evtime:\n ## continue\n # find the timepoints that fall within the window b/w onset and onset + duration\n window_ons = [N.where(time_up==x)[0][0]\n for x in time_up\n if o <= x < o + other_trial_dur[N.where(other_trial_ons==o)[0][0]]]\n dm_other[window_ons] = 1\n\n # Put together the design matrix\n dm_other = N.convolve(dm_other, hrf)[0:ntp/time_res*TR:(TR/time_res)]\n if collapse_other_conditions:\n dm_other = N.hstack((N.dot(F, dm_other[0:ntp, N.newaxis]), dm_otherevs))\n dm_other = N.sum(dm_other, 1)\n dm_full = N.hstack((N.dot(F, dm_toi[0:ntp, N.newaxis]),\n dm_other[:, N.newaxis], dm_nuisanceevs))\n else:\n dm_full = N.hstack((N.dot(F, dm_toi[0:ntp, N.newaxis]),\n N.dot(F, dm_other[0:ntp, N.newaxis]),\n dm_otherevs,\n dm_nuisanceevs))\n dm_full -= dm_full.mean(0)\n dm_full = N.hstack((dm_full, N.ones((ntp, 1))))\n beta_maker_loop = N.linalg.pinv(dm_full)\n beta_maker[trial_ctr, :] = beta_maker_loop[0, :]\n trial_ctr += 1\n # this uses Jeanette's trick of extracting the beta-forming vector for each\n # trial and putting them together, which allows estimation for all trials\n # at once\n\n glm_res_full = N.dot(beta_maker, data.samples)\n\n return all_conds, glm_res_full\n\n\ndef extract_lsall(data, TR, time_res,\n hrf_gen, F,\n good_ons,\n good_evs,\n desmat,\n extract_evs=None):\n ntp, nvox = data.shape\n\n hrf = hrf_gen(time_res)\n # Set up the high time-resolution design matrix\n time_up = N.arange(0, TR*ntp+time_res, time_res)\n\n all_onsets = []\n all_durations = []\n all_conds = [] # condition marker\n\n if extract_evs is None:\n extract_evs = range(len(good_evs))\n\n nuisance_evs = sorted(list(set(range(desmat.mat.shape[1])).difference(\n [good_evs[e] for e in extract_evs])))\n\n for e in extract_evs:\n ev = good_evs[e]\n all_onsets = N.hstack((all_onsets, good_ons[e].onsets))\n all_durations = N.hstack((all_durations, good_ons[e].durations))\n # yoh: ad-hoc warning -- it is marking with (ev/2)+1 (I guess)\n # assuming presence of derivatives EVs\n all_conds = N.hstack((all_conds, N.ones(len(good_ons[e].onsets))*((ev/2)+1)))\n\n #all_onsets=N.round(all_onsets/TR) # round to nearest TR number\n ntrials = len(all_onsets)\n glm_res_full = N.zeros((nvox, ntrials))\n dm_trials = N.zeros((ntp, ntrials))\n dm_full = []\n for t in range(ntrials):\n verbose(2, \"Estimating for trial %d\" % t)\n\n ## yoh: TODO -- filter outside\n ## if all_onsets[t] > max_evtime:\n ## continue\n # build model for each trial\n dm_trial = N.zeros(len(time_up))\n window_ons = [N.where(time_up==x)[0][0]\n for x in time_up\n if all_onsets[t] <= x < all_onsets[t] + all_durations[t]]\n dm_trial[window_ons] = 1\n dm_trial_up = N.convolve(dm_trial, hrf)\n dm_trial_down = dm_trial_up[0:ntp/time_res*TR:(TR/time_res)]\n dm_trials[:, t] = dm_trial_down\n\n # filter the desmtx, except for the nuisance part (which is already filtered)\n # since it is taken from a loaded FSL\n dm_full = N.dot(F, dm_trials)\n\n # mean center trials models\n dm_trials -= dm_trials.mean(0)\n\n if len(nuisance_evs) > 0:\n # and stick nuisance evs if any to the back\n dm_full = N.hstack((dm_full, desmat.mat[:, nuisance_evs]))\n\n dm_full = N.hstack((dm_full, N.ones((ntp, 1))))\n glm_res_full = N.dot(N.linalg.pinv(dm_full), data.samples)\n glm_res_full = glm_res_full[:ntrials]\n\n return all_conds, glm_res_full\n\n\ndef pybetaseries(fsfdir,\n methods=['lsall', 'lsone'],\n time_res=0.1,\n modeldir=None,\n outdir=None,\n designdir=None,\n design_fsf_file='design.fsf',\n design_mat_file='design.mat',\n data_file=None,\n mask_file=None,\n extract_evs=None,\n collapse_other_conditions=True):\n \"\"\"Compute beta-series regression on a feat directory\n\n Required arguments:\n\n fsfdir: full path of a feat directory\n\n Optional arguments:\n\n method: list of methods to be used, can include:\n 'lsone': single-trial iterative least squares estimation from Turner & Ashby\n 'lsall': standard beta-series regression from Rissman et al.\n\n time_res: time resolution of the model used to generate the convolved design matrix\n\n outdir: where to store the results\n designdir: location of design_mat_file (e.g. design.mat). if None -- the same as fsfdir\n collapse_other_conditions: collapse all other conditions into a single regressor for\n the lsone model. Jeanette's analyses suggest that it's better than leaving\n them separate.\n data_file: allows to override path of the 4D datafile instead of specified in design.fsf\n 'feat_files(1)'\n \"\"\"\n\n known_methods = ['lsall', 'lsone']\n assert set(methods).issubset(set(known_methods)), \\\n \"Unknown method(s): %s\" % (set(methods).difference(set(known_methods)))\n\n if not os.path.exists(fsfdir):\n print 'ERROR: %s does not exist!' % fsfdir\n #return\n\n if not fsfdir.endswith('/'):\n fsfdir = ''.join([fsfdir, '/'])\n if modeldir is None:\n modeldir = fsfdir\n\n # load design using pymvpa tools\n\n fsffile = pjoin(fsfdir, design_fsf_file)\n desmatfile = pjoin(modeldir, design_mat_file)\n\n verbose(1, \"Loading design\")\n design = read_fsl_design(fsffile)\n\n desmat = FslGLMDesign(desmatfile)\n\n ntp, nevs = desmat.mat.shape\n\n TR = design['fmri(tr)']\n # yoh: theoretically it should be identical to the one read from\n # the nifti file, but in this sample data those manage to differ:\n # bold_mcf_brain.nii.gz int16 [ 64, 64, 30, 182] 3.12x3.12x5.00x1.00 sform\n # filtered_func_data.nii.gz float32 [ 64, 64, 30, 182] 3.12x3.12x5.00x2.00 sform\n #assert(abs(data.a.imghdr.get_zooms()[-1] - TR) < 0.001)\n # it is the filtered_func_data.nii.gz which was used for analysis,\n # and it differs from bold_mcf_brain.nii.gz ... \n\n # exclude events that occur within two TRs of the end of the run, due to the\n # inability to accurately estimate the response to them.\n\n max_evtime = TR*ntp - 2;\n # TODO: filter out here the trials jumping outside\n\n good_evs = []\n nuisance_evs = []\n # yoh: ev_td marks temporal derivatives (of good EVs or of nuisance -- all)\n # replacing with deriv_evs for consistency\n withderiv_evs = []\n # ev_td = N.zeros(design['fmri(evs_real)'])\n\n good_ons = []\n\n if outdir is None:\n outdir = pjoin(fsfdir, 'betaseries')\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n # create smoothing kernel for design\n cutoff = design['fmri(paradigm_hp)']/TR\n verbose(1, \"Creating smoothing kernel based on the original analysis cutoff %.2f\"\n % cutoff)\n # yoh: Verify that the kernel is correct since it looks\n # quite ...\n F = get_smoothing_kernel(cutoff, ntp)\n\n verbose(1, \"Determining non-motion conditions\")\n # loop through and find the good (non-motion) conditions\n # NB: this assumes that the name of the motion EV includes \"motpar\"\n # ala the openfmri convention.\n # TO DO: add ability to manually specify motion regressors (currently assumes\n # that any EV that includes \"motpar\" in its name is a motion regressor)\n evctr = 0\n\n for ev in range(1, design['fmri(evs_orig)']+1):\n # filter out motion parameters\n evtitle = design['fmri(evtitle%d)' % ev]\n verbose(2, \"Loading EV %s\" % evtitle)\n if not evtitle.startswith('mot'):\n good_evs.append(evctr)\n evctr += 1\n if design['fmri(deriv_yn%d)' % ev] == 1:\n withderiv_evs.append(evctr-1)\n # skip temporal derivative\n evctr += 1\n ev_events = FslEV3(pjoin(fsfdir, design['fmri(custom%d)' % ev]))\n good_ons.append(ev_events)\n else:\n nuisance_evs.append(evctr)\n evctr += 1\n if design['fmri(deriv_yn%d)' % ev] == 1:\n # skip temporal derivative\n withderiv_evs.append(evctr)\n nuisance_evs.append(evctr)\n evctr += 1\n\n # load data\n verbose(1, \"Loading data\")\n\n maskimg = pjoin(fsfdir, mask_file or 'mask.nii.gz')\n # yoh: TODO design['feat_files'] is not the one \"of interest\" since it is\n # the input file, while we would like to operate on pre-processed version\n # which is usually stored as filtered_func_data.nii.gz\n data_file_fullname = complete_filename(\n pjoin(fsfdir, data_file or \"filtered_func_data.nii.gz\"))\n data = fmri_dataset(data_file_fullname, mask=maskimg)\n assert(len(data) == ntp)\n\n for method in methods:\n verbose(1, 'Estimating %(method)s model...' % locals())\n\n if method == 'lsone':\n all_conds, glm_res_full = extract_lsone(\n data, TR, time_res,\n spm_hrf, F,\n good_ons,\n good_evs, nuisance_evs, withderiv_evs,\n desmat,\n extract_evs=extract_evs,\n collapse_other_conditions=collapse_other_conditions)\n elif method == 'lsall':\n all_conds, glm_res_full = extract_lsall(\n data, TR, time_res,\n spm_hrf, F,\n good_ons,\n good_evs,\n desmat,\n extract_evs=extract_evs,\n )\n else:\n raise ValueError(method)\n\n all_conds = N.asanyarray(all_conds) # assure array here\n # map the data into images and save to betaseries directory\n for e in range(1, len(good_evs)+1):\n ni = map2nifti(data, data=glm_res_full[N.where(all_conds==e)[0], :])\n ni.to_filename(pjoin(outdir, 'ev%d_%s.nii.gz' % (e, method)))\n\n\n\ndef spm_hrf(TR, p=[6, 16, 1, 1, 6, 0, 32]):\n \"\"\" An implementation of spm_hrf.m from the SPM distribution\n\n Arguments:\n\n Required:\n TR: repetition time at which to generate the HRF (in seconds)\n\n Optional:\n p: list with parameters of the two gamma functions:\n defaults\n (seconds)\n p[0] - delay of response (relative to onset) 6\n p[1] - delay of undershoot (relative to onset) 16\n p[2] - dispersion of response 1\n p[3] - dispersion of undershoot 1\n p[4] - ratio of response to undershoot 6\n p[5] - onset (seconds) 0\n p[6] - length of kernel (seconds) 32\n\n \"\"\"\n\n p = [float(x) for x in p]\n\n fMRI_T = 16.0\n\n TR = float(TR)\n dt = TR/fMRI_T\n u = N.arange(p[6]/dt + 1) - p[5]/dt\n Gpdf = scipy.stats.gamma.pdf \n hrf = Gpdf(u, p[0]/p[2], scale=1.0/(dt/p[2])) - Gpdf(u, p[1]/p[3], scale=1.0/(dt/p[3]))/p[4]\n good_pts = N.array(range(N.int(p[6]/TR)))*fMRI_T\n hrf = hrf[list(good_pts)]\n # hrf = hrf([0:(p(7)/RT)]*fMRI_T + 1);\n hrf = hrf/N.sum(hrf);\n return hrf\n\n\n\ndef estimate_OLS(desmtx, data, demean=1, resid=0):\n \"\"\"A utility function to compute ordinary least squares\n\n Arguments:\n\n Required:\n desmtx: design matrix\n data: the data\n\n Optional:\n\n demean: demean the data before estimation\n resid: return the residuals\n \"\"\"\n if demean == 1: # use if desmtx doesn't include a constant\n data = data-N.mean(data)\n glm = N.linalg.lstsq(desmtx, data)\n if resid==1: # return residuals as well\n return glm[0], glm[1]\n else:\n return glm[0]\n\nif __name__ == '__main__':\n\n verbose.level = 3\n # #'/usr/share/fsl-feeds/data/fmri.feat/',\n if True:\n topdir = '/home/yoh/proj/pymvpa/pymvpa/3rd/pybetaseries/run001_test_data.feat'\n pybetaseries(topdir,\n time_res=2./16, # just to make matlab code\n methods=['lsone'],\n design_fsf_file='design_yoh.fsf',\n #mask_file='mask_small.hdr',\n extract_evs=[2],\n collapse_other_conditions=False,\n outdir=pjoin(topdir, 'betaseries-yarikcode-3-nocollapse4'))\n else:\n topdir = '/data/famface/nobackup_pipe+derivs+nipymc/famface_level1/firstlevel'\n modelfit_dir = pjoin(topdir, 'modelfit/_subject_id_km00/_fwhm_4.0/')\n mask_file = pjoin(topdir, 'preproc/_subject_id_km00/meanfuncmask/corr_06mar11km_WIP_fMRI_SSh_3mm_sense2_sl35_SENSE_13_1_dtype_mean_brain_mask.nii.gz')\n pybetaseries(\n pjoin(modelfit_dir, 'level1design'),\n design_fsf_file='run0.fsf',\n modeldir=pjoin(modelfit_dir, 'modelgen/mapflow/_modelgen0'),\n design_mat_file='run0.mat',\n mask_file=mask_file,\n #methods=['lsone'],\n outdir='/tmp/betaseries')\n" } ]
1
oooleemandy/hogwarts_lg4
https://github.com/oooleemandy/hogwarts_lg4
0f4f9fddb996264c21184b6fe9767fd23d9f08f8
c6c2ab2ce942a56d281a441903cb7311d6ecaa96
21b8dbc79f00f19abce416d82698cebdb2513ba6
refs/heads/master
2023-02-12T06:41:57.972446
2021-01-09T13:34:39
2021-01-09T13:34:39
305,968,011
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7126696705818176, "alphanum_fraction": 0.7217194437980652, "avg_line_length": 33.07692337036133, "blob_id": "fd6639b4bafe3164d0de32f4d6bc328adadcf12d", "content_id": "9a890237cf508fbb6b7af3d01bd84f5ce6ca69c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "no_license", "max_line_length": 86, "num_lines": 13, "path": "/podemo1/page/index_page.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom selenium.webdriver.common.by import By\nfrom podemo1.page.addmemberpage import AddMemberPage\nfrom podemo1.page.base_page import BasePage\n\n\nclass IndexPage(BasePage):\n _base_url = \"https://work.weixin.qq.com/wework_admin/frame#\"\n\n def click_add_member(self):\n self.find(By.CSS_SELECTOR, \".index_service_cnt_itemWrap:nth-child(1)\").click()\n return AddMemberPage(self.driver)" }, { "alpha_fraction": 0.5991379022598267, "alphanum_fraction": 0.6199712753295898, "avg_line_length": 30.56818199157715, "blob_id": "398b93e38e0293968424dddfc8fbbc92c734b357", "content_id": "1d702d8830004e97ce26b3a8e2989b5d4858008d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1540, "license_type": "no_license", "max_line_length": 126, "num_lines": 44, "path": "/test_selenium/test_js.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "from time import sleep\nfrom selenium import webdriver\n\nclass TestJs():\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_js_scroll(self):\n self.driver.get(\"https://www.baidu.com/\")\n self.driver.find_element_by_id(\"kw\").send_keys(\"selenium测试\")\n\n '''js方式获取”百度一下“按钮,找到后返回给element'''\n element = self.driver.execute_script(\"return document.getElementById('su')\")\n element.click()\n\n '''滑动到最低端,再点击下一页'''\n self.driver.execute_script(\"document.documentElement.scrollTop=10000\")\n sleep(3)\n self.driver.find_element_by_xpath(\"//*[@id='page']/div/a[10]\").click()\n sleep(3)\n\n\n '''打印title,打印性能数据'''\n for code in [\n 'return document.title','return JSON.stringify(performance.timing)'\n ]:\n print(self.driver.execute_script(code))\n\n\n\n\n def test_datetime(self):\n #12306首页\n self.driver.get(\"https://www.12306.cn/index/\")\n #定位时间控件,移除元素的readonly属性,使其可以被修改\n time_element = self.driver.execute_script(\"a = document.getElementById('train_date')\",\"a.removeAttribute('readonly')\")\n #给时间控件赋值新value\n self.driver.execute_script(\"document.getElementById('train_date').value='2020-12-31'\")\n sleep(3)\n\n\n\n" }, { "alpha_fraction": 0.4752851724624634, "alphanum_fraction": 0.500211238861084, "avg_line_length": 15.44444465637207, "blob_id": "9b931a26a9c4d140dba705b5c214b590541a81ff", "content_id": "edb08234d4a50284e868688347d07fe8fe4236c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2697, "license_type": "no_license", "max_line_length": 71, "num_lines": 144, "path": "/test_pytest/tests/test_calc.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import yaml\n\nfrom test_pytest.core.calc import Calc\nimport pytest\nimport allure\n\n\n\n'''yaml文件驱动'''\n\ndef load_data(path='data.yaml'):\n with open(path) as f:\n data = yaml.safe_load(f)\n keys = \",\".join(data[0].keys())\n values = [d.values() for d in data]\n return {'keys':keys,'values':values}\n\nclass TestCalc:\n\n #类执行的时候初始化一次\n def setup_class(cls):\n\n cls.calc=Calc()\n\n\n\n\n @allure.step\n def simple_step(self, step_param1, step_param2=None):\n pass\n\n\n\n #乘法的参数化\n @pytest.mark.parametrize(load_data()['keys'],load_data()['values'])\n #乘法方法\n @allure.story(\"乘法模块正向用例\")\n def test_mult(self,a,b,c):\n #加入图片地址,图片名称,图片类型\n allure.attach.file(\n r'C:\\Users\\limandi\\Pictures\\pic\\Screenshot.jpg',\n \"测试图片\",\n allure.attachment_type.JPG\n )\n self.simple_step(f'{a} {b} {c}')\n assert self.calc.mul(a,b) == c\n\n\n\n\n\n @pytest.mark.parametrize('a,b,c', [\n [1, 2, 3],\n [-1, -1, -1],\n [1, 0, 1]\n ])\n # 乘法方法\n @allure.story(\"乘法模块逆向用例\")\n def test_mulf(self, a, b, c):\n assert self.calc.mul(a, b) == c\n\n\n\n\n\n\n\n @pytest.mark.parametrize('a,b,c', [\n [1, 2, \"jj\"],\n [\"*\", -1, -1],\n ])\n # 乘法方法\n @allure.story(\"乘法模块异常用例\")\n def test_mule(self, a, b, c):\n with pytest.raises(Exception):\n assert self.calc.mul(a, b) == c\n\n\n\n\n\n\n\n\n\n\n\n\n\n #除法的参数化\n @pytest.mark.parametrize('d,e,f',[\n [4,2,2],\n [0.2,0.1,2],\n [0,2,0],\n\n ])\n #除法方法\n @allure.story(\"除法模块正向用例\")\n def test_divt(self,d,e,f):\n assert self.calc.div(d,e) == f\n\n\n\n\n\n\n\n @pytest.mark.parametrize('d,e,f', [\n [36, 6, 4],\n [-10,-2,-5],\n [2.2, 2, 1]\n ])\n # 除法方法\n @allure.story(\"除法模块逆向用例\")\n def test_divf(self, d, e, f):\n assert self.calc.div(d, e) == f\n\n\n\n\n #除数为0\n #除法的参数化\n @pytest.mark.parametrize('d,e,', [\n [2,0],\n [0.2, 0],\n [0,0],\n\n ])\n # 除法方法\n @allure.story(\"除数为0的异常处理\")\n def test_dive(self, d, e):\n #返回除数为0的异常\n with pytest.raises(ZeroDivisionError):\n assert self.calc.div(d, e)\n\n\n\n #流程用例,比如成乘法和除法的两个用例是有调用流程的\n @allure.story(\"流程用例\")\n def test_process(self):\n r1=self.calc.mul(1,2)\n r2=self.calc.div(2,1)\n assert r1 == 2\n assert r2 == 2" }, { "alpha_fraction": 0.6356589198112488, "alphanum_fraction": 0.6356589198112488, "avg_line_length": 31.33333396911621, "blob_id": "aa220a12f7496f374fe58e205f830177ecaa16ba", "content_id": "072c249c7f8523332089cacfbaa6840ac6c59535", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 87, "num_lines": 12, "path": "/test_selenium/test_cssss.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nclass TestCSSS():\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.get(\"https://www.baidu.com/\")\n\n def test_fnd(self):\n self.driver.find_element(By.XPATH, '//*[@id=\"kw\"]').send_keys(\"Hogwarts\")\n self.driver.find_element(By.XPATH, '//*[@id=\"su\"]').click()" }, { "alpha_fraction": 0.5926517844200134, "alphanum_fraction": 0.6107560992240906, "avg_line_length": 20.352272033691406, "blob_id": "da4eda8424ec4b4760847ee009984a66d9956268", "content_id": "71428eafac1ed6ff9b6236b4ee69e86b6ceadba3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2358, "license_type": "no_license", "max_line_length": 73, "num_lines": 88, "path": "/201024homework/Class12345.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "# 1、定义人类 属性有姓名年龄体重 方法有吃饭睡觉加班\nclass Person():\n def __init__(self,name,age,weight):\n self.name = name\n self.age = age\n self.weight = weight\n\n def eat(self):\n print(f\"{self.name}在吃饭\")\n\n def work(self):\n print(f\"{self.name}在加班\")\n\n def sleep(self):\n print(f\"{self.name}在睡觉\")\n\n\np = Person('Mark',30,150)\np.eat()\np.work()\np.sleep()\n\n\n# 2、定义一个箱子类,属性有颜色和大小 方法有装食品装物品\nclass Box:\n def __init__(self,color, size):\n self.color =color\n self.size = size\n\n def food(self):\n print(f\"{self.size}{self.color}的箱子用来装吃的\")\n\n def obj(self):\n print(f\"{self.size}{self.color}的箱子用来装物品\")\n\nbfood = Box('红色','大的')\nbobj = Box('蓝色','小的')\nbfood.food()\nbobj.obj()\n\n#3、定义机票类 属性有出发地 目的地和历经时间\nclass Ticket:\n def __init__(self,departure,destination,time):\n self.departure = departure\n self.destination = destination\n self.time = time\n\n def p(self):\n print(f\"由{self.departure}飞往{self.destination}的飞机历时{self.time}到达\")\n\nt = Ticket('杭州', '深圳','2小时')\nt.p()\n\n#4、定义一个学生类 属性有班级学号分数 方法有期末考试\nclass Student:\n def __init__(self,grade,no,score):\n self.grade = grade\n self.no = no\n self.score = score\n\n def exam(self):\n print(f\"{self.grade}班的学号为{self.no}的同学本次考试成绩为{self.score}\")\n\ns = Student('11','19190808','150')\ns.exam()\n\n\n#5、定义一个书类 书的属性有书名 作者 序列号,有新增图书和借出图书方法\nclass Book:\n def __init__(self,name,author,index):\n self.name = name\n self.author = author\n self.index = index\n\n def addbook(self):\n print(f\"新增一本{self.name}图书,作者为{self.author},序号为{self.index}\")\n\n def lendbook(self):\n print(f\"{self.name}图书,作者为{self.author},序列号为{self.index}被借出去啦\")\n\nclass Library(Book):\n print(\"这里是图书馆\")\n\n\nlibadd = Library('哈利波特与魔法石','J.K.Rowling','85533')\nliblend = Library(\"哈利波特与死亡圣器\",'J.K.Rowling','85566')\nlibadd.addbook()\nliblend.lendbook()" }, { "alpha_fraction": 0.6284152865409851, "alphanum_fraction": 0.6306011080741882, "avg_line_length": 25.08571434020996, "blob_id": "c1f539ef620d3ce2dcb4a31f694647eb2da9da3a", "content_id": "7be46dc265aac5cda376fa7af9cefce1af1224b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 93, "num_lines": 35, "path": "/test_selenium/test_alert.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "from time import sleep\n\nfrom selenium.webdriver import ActionChains\n\nfrom test_pytest.base import Base\n\n\nclass TestAlert(Base):\n def test_alert(self):\n self.driver.get(\"https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable\")\n #切换frame,找到”请拖拽我“这个元素所在的frame,用id取出\n self.driver.switch_to.frame(\"iframeResult\")\n\n #找到要拖拽的两个元素\n drag = self.driver.find_element_by_id(\"draggable\")\n drop = self.driver.find_element_by_id(\"droppable\")\n\n action = ActionChains(self.driver)\n #需要把drag拖拽到drop中\n action.drag_and_drop(drag,drop).perform()\n sleep(3)\n\n\n '''\n 拖拽完以后会有弹框,需要切换到弹框页面再点击确认\n '''\n self.driver.switch_to.alert.accept()\n\n\n '''\n alert点击确认后再返回到默认的frame下,然后点击运行\n '''\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id(\"submitBTN\").click()\n sleep(3)\n\n\n" }, { "alpha_fraction": 0.6076233386993408, "alphanum_fraction": 0.6188340783119202, "avg_line_length": 26.9375, "blob_id": "9d66a46119cbd170a3e16d646dc151def071140a", "content_id": "8db1186a8c0e0242b24c73ef1a18c05586dd439e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 132, "num_lines": 16, "path": "/test_selenium/test_file.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "from time import sleep\n\nfrom test_pytest.base import Base\n\n\nclass TestFile(Base):\n def test_file(self):\n '''\n 1、进入百度图库首页\n 2、点击加图片按钮\n\n '''\n self.driver.get(\"https://image.baidu.com/\")\n self.driver.find_element_by_xpath(\"//*[@id='sttb']/img[1]\").click()\n self.driver.find_element_by_id(\"uploadImg\").send_keys(r\"C:\\Users\\limandi\\PycharmProjects\\hogwarts_lg4\\image\\Screenshot.jpg\")\n sleep(3)" }, { "alpha_fraction": 0.6678571701049805, "alphanum_fraction": 0.6678571701049805, "avg_line_length": 24.545454025268555, "blob_id": "a68762e0740621fa0401c0398c116b78e5467eac", "content_id": "5573a00566755ee5ea5dc1eecb6ddfc7a0944237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 58, "num_lines": 11, "path": "/page/Register.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "'''\n注册页面\n'''\nfrom selenium.webdriver.common.by import By\nfrom page.base_page import BasePage\n\nclass Register(BasePage):\n def register(self):\n self.find(By.ID, \"corp_name\").send_keys(\"填写企业名称\")\n self.find(By.ID,\"manager_name\").send_keys(\"管理员姓名\")\n return True" }, { "alpha_fraction": 0.6491754055023193, "alphanum_fraction": 0.6491754055023193, "avg_line_length": 22.85714340209961, "blob_id": "ef73bc6963c91906b4b9958e34c1bbb94c17b4b9", "content_id": "51461fe36085e130cece040dddb798a71149d23e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 891, "license_type": "no_license", "max_line_length": 84, "num_lines": 28, "path": "/page/base_page.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "'''\n放一些公共的方法。页面的一些都会用到的操作\n'''\nfrom selenium import webdriver\nfrom selenium.webdriver.remote.webdriver import WebDriver\n\n\nclass BasePage:\n #访问的url\n _base_url=\"\"\n\n #初始化\n #构造方法 会自动调用\n def __init__(self,driver:WebDriver=None):\n #如果不传driver,每个页面都要初始化一次driver。下面复用driver,而不是每次调用。当testcase很多时不用每次都新初始化driver\n if driver is None:\n self._driver=webdriver.Chrome()\n else:\n self._driver=driver\n\n #封装url 加一个判断,如果url不为空,就进行一个访问\n if self._base_url !=\"\":\n self._driver.get(self._base_url)\n\n\n #封装一个find方法,传一个by一个定位\n def find(self,by,locator):\n return self._driver.find_element(by,locator)" }, { "alpha_fraction": 0.6744639277458191, "alphanum_fraction": 0.6832358837127686, "avg_line_length": 26.026315689086914, "blob_id": "225d45b4f402b7d172a22c2e686b5d076212c48a", "content_id": "6e53acbe5186a57847a74aa4ae436ec915c51a67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1062, "license_type": "no_license", "max_line_length": 89, "num_lines": 38, "path": "/test_selenium/test_testclick.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "# Generated by Selenium IDE\nimport shelve\n\nfrom selenium import webdriver\nfrom time import sleep\n\nfrom selenium.webdriver.common.by import By\n\n\nclass TestTestclick():\n def setup_method(self, method):\n self.driver = webdriver.Chrome()\n self.vars = {}\n self.driver.implicitly_wait(5)\n\n \n def teardown_method(self, method):\n self.driver.quit()\n \n def test_testclick(self):\n self.driver.get(\"https://ceshiren.com/\")\n self.driver.find_element(By.LINK_TEXT, \"所有分类\").click()\n element=self.driver.find_element(By.LINK_TEXT, \"所有分类\")\n result= element.get_attribute(\"class\")\n assert 'active' ==result\n # self.driver.find_element(By.CSS_SELECTOR, \"#ember129 .category-name\").click()\n # self.driver.execute_script(\"window.scrollTo(0,0)\")\n self.driver.close()\n\n def test_wx(self):\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/loginpage_wx?from=myhome\")\n sleep(10)\n\n\n def test_case2(self):\n # shelve python 自带的对象持久化存储\n db = shelve.open('cookies')\n db['cookies'] = []" }, { "alpha_fraction": 0.5886287689208984, "alphanum_fraction": 0.5886287689208984, "avg_line_length": 19, "blob_id": "581e74c3d43db2b7951fc8d50e47a9c45f618db9", "content_id": "ba029fc9d05caa6ecbfc7a90f7b222321a214cbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 94, "num_lines": 15, "path": "/10thstep/test_demo2.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import requests\nfrom requests.auth import HTTPBasicAuth\n\n\nclass Api:\n data={\n \"method\":\"get\",\n \"url\":\"url\",\n \"headers\":None,\n }\n\n\n #data 是一个请求信息\n def send(self,data:dict):\n requests.request(method=data[\"method\"] , url =data[\"url\"] ,headers = data[\"headers\"] )" }, { "alpha_fraction": 0.610822856426239, "alphanum_fraction": 0.6330615282058716, "avg_line_length": 32.67499923706055, "blob_id": "8b62950b879729b0ebdf5e2722dbb6992290d316", "content_id": "9ab7bb7438baa4560e52e32a1d2bca190b1ddc6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1583, "license_type": "no_license", "max_line_length": 94, "num_lines": 40, "path": "/test_selenium/test_window.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "from time import sleep\n\nfrom test_pytest.base import Base\n\n\nclass TestWindow(Base):\n\n '''\n 百度首页-点击登陆-点击立刻注册\n '''\n def test_window(self):\n self.driver.get(\"https://www.baidu.com/\")\n self.driver.find_element_by_link_text(\"登录\").click()\n #打印出当前窗口\n print(self.driver.current_window_handle)\n self.driver.find_element_by_link_text(\"立即注册\").click()\n #跳出新的窗口需要在新的窗口里找\n print(self.driver.current_window_handle)\n #打印所有窗口\n print(self.driver.window_handles)\n #定义当前的所有窗口\n windows=self.driver.window_handles\n #此时形成一个窗口列表,去倒数第一个窗口里找\n self.driver.switch_to_window(windows[-1])\n #输入用户名密码\n self.driver.find_element_by_id(\"TANGRAM__PSP_4__userName\").send_keys(\"oooleemandy\")\n self.driver.find_element_by_id(\"TANGRAM__PSP_4__phone\").send_keys(\"15566274527\")\n\n #再切换回登陆的窗口,第一个窗口\n self.driver.switch_to_window(windows[0])\n #点击用户名登陆\n self.driver.find_element_by_id(\"TANGRAM__PSP_11__footerULoginBtn\").click()\n #输入用户名密码\n self.driver.find_element_by_id(\"TANGRAM__PSP_11__userName\").send_keys(\"oooleemandy\")\n self.driver.find_element_by_id(\"TANGRAM__PSP_11__password\").send_keys(\"limandi940905\")\n #点击登陆\n self.driver.find_element_by_id(\"TANGRAM__PSP_11__submit\").click()\n\n\n sleep(3)\n\n\n" }, { "alpha_fraction": 0.6796296238899231, "alphanum_fraction": 0.6907407641410828, "avg_line_length": 39.03703689575195, "blob_id": "838ff1bbd38ba0327123f4f20cb38cdf16388def", "content_id": "d8cc5914244f7f6c02d3d66def0060f3c101b632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 135, "num_lines": 27, "path": "/test_selenium/test_wait.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "'''\n打开测试人网站\n点击分类\n查看“最新“是否出现\n如果出现了就点击热门\n'''\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass TestWait:\n def setup(self):\n self.driver=webdriver.Chrome()\n self.driver.get(\"https://ceshiren.com/\")\n #隐式等待\n self.driver.implicitly_wait(3)\n def test_wait(self):\n self.driver.find_element(By.XPATH, '//*[@id=\"ember41\"]/a').click()\n #直到可被点击\n WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(By.XPATH, '//*[class=\"\"table-heading]'))\n #元素是否可见. 当我们需要找到元素,并且该元素也可见。\n WebDriverWait(self.driver, 10).until(expected_conditions.visibility_of_element_located(By.XPATH, '//*[class=\"\"table-heading]'))\n #当我们不关心元素是否可见,只关心元素是否存在在页面中。\n WebDriverWait(self.driver, 10).until(expected_conditions.presence_of_element_located(By.XPATH, '//*[class=\"\"table-heading]'))\n self.driver.find_element(By.XPATH, \"//*[@id='ember195']/a\").click()" }, { "alpha_fraction": 0.609375, "alphanum_fraction": 0.6640625, "avg_line_length": 17.14285659790039, "blob_id": "6ba7b57bb592a887128e8ff3fcd24bc5d764f23d", "content_id": "8204d528aeb84c5aa140b793c45ff4f145a9e392", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 62, "num_lines": 14, "path": "/201024homework/XuZhu.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "\"\"\"\n定义一个XuZhu类,继承于童姥。虚竹宅心仁厚不想打架。所以虚竹只有一个read(念经)的方法。每次调用都会打印“罪过罪过”\n\"\"\"\nclass XuZhu(TongLao):\n def read(self):\n print(\"罪过罪过\")\n\n\ntl = XuZhu(1000,200)\ntl.see_people('WYZ')\ntl.see_people('李秋水')\ntl.see_people('丁春秋')\ntl.fight_zms(1000,200)\ntl.read()\n\n\n" }, { "alpha_fraction": 0.578125, "alphanum_fraction": 0.578125, "avg_line_length": 15.020833015441895, "blob_id": "12806437ff9b1ea0cfff6e1dc53e0df6f3756c87", "content_id": "ec8572c641f046bcbcfaa7a82422dcab9d36b911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 34, "num_lines": 48, "path": "/python_practice/python_class/python_opp.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "#面向对象\nclass House:\n #静态属性 类变量(在类之中,在方法之外)\n door = \"red\"\n floor = \"white\"\n\n #构造函数,在类实例化时直接执行了\n def __init__(self):\n #在方法中调用类变量,需要加self.\n print(self.door)\n #实例变量,类当中,方法中,\"self.变量名\"定义\n self.kitchen = \"cook\"\n\n\n\n #定义动态方法\n def sleep(self):\n #普通变量 在类中 方法中,并且没有self\n bed = \"ximengsi\"\n self.table = \"桌子可以放东西\"\n\n print(f\"在房子里可以躺在{bed}上睡觉\")\n def cook(self):\n print(self.kitchen)\n print(self.table)\n print(\"在房子里做饭\")\n\n\n#把类实例化\n#北欧风\nnorth_house = House()\nnorth_house.cook()\n#中式风\nchina_house = House()\n\n#用类名调用类属性\n# print(House.door)\n# #修改类属性\n# House.door = \"white\"\n# print(House.door)\n#\n#\n#\n# #用实例对象调用类属性\n# print(north_house.door)\n# #\n# north_house = \"black\"\n# print(north_house.door)" }, { "alpha_fraction": 0.5709779262542725, "alphanum_fraction": 0.5709779262542725, "avg_line_length": 23.461538314819336, "blob_id": "9212d80530b6e81794d3cf4b7cb1ad1193ad4feb", "content_id": "3df66def4b95734d0c2d80f03d0eed8e067f0770", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 317, "license_type": "no_license", "max_line_length": 66, "num_lines": 13, "path": "/10thstep/test_requests.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import requests\n\ndef test_demo():\n url =\"http://httpbin.ceshiren.com/cookies\"\n header = {\n 'User-Agent': 'hogwarts'\n }\n cookie_data={\n \"hogwarts\": \"school\",\n \"teacher\":\"AD\"\n }\n r=requests.get(url = url,headers = header,cookies=cookie_data)\n print(r.request.headers)" }, { "alpha_fraction": 0.6517857313156128, "alphanum_fraction": 0.6517857313156128, "avg_line_length": 15, "blob_id": "e4d82daec207052320b6e3d1e9b7b8a61c0d8cf3", "content_id": "a87f4d2756cfc910e1ae4f0056c1066b6ba870e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/python1029_alluredemo/result/test_alluredemo_link_issue_case.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import allure\n\n\[email protected](\"https://www.baidu.com\")\ndef test_with_link():\n print((\"这是一条加了链接的测试\"))\n pass\n" }, { "alpha_fraction": 0.5579661130905151, "alphanum_fraction": 0.5694915056228638, "avg_line_length": 24, "blob_id": "671e2ea4b2799b89283f88b111490c7bb43eccdf", "content_id": "9003f0e6e0a9465c4268ed3e233e6969e695f726", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 97, "num_lines": 59, "path": "/service/test_tag.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import json\nimport requests\nimport pytest\n\n\n#企业标签库接口测试\nfrom service.tag import Tag\n\nclass TestTag():\n\n def setup_class(self):\n # 初始化Tag\n self.tag = Tag()\n # 拿到token\n self.tag.get_token()\n\n def test_tag_list(self):\n # 获取新列表 进行校验\n r = self.tag.list()\n assert r.status_code == 200\n assert r.json()['errcode'] == 0\n\n #参数化\n @pytest.mark.parametrize(\"group_name,tag_names\",[\n [\"group_demo_leemandy2\",[{'name': 'tag_demo_leemandy2'}]],\n [\"group_demo_leemandy2\",[{'name': 'tag_demo_leemandy2'}]],\n [\"group_demo_leemandy2\",[{'name': 'tag_demo_leemandy2'},{'name': 'tag_demo_leemandy3'}]],\n\n ])\n def test_tag_add(self,group_name,tag_names):\n #增加标签组\n r= self.tag.add(group_name, tag_names)\n assert r.status_code == 200\n\n #python列表表达式\n #校验 找taggroup下面有没有新建的groupname\n group=[group for group in r.json()['tag_group'] if group['group_name'] == group_name][0]\n #校验 找taggroup下tag下的name是不是我刚刚新建的\n tags=[{'name':tag['name']} for tag in group['tag'] if tag['name']]\n print(group)\n print(tags)\n assert group['group_name'] == group_name\n assert tags == tag_names\n\n\n #tagname超过31个字符回会报错\n def test_tag_fail(self):\n pass\n\n @pytest.mark.parametrize(\"\",[\n #删除单个标签\n #删除多个标签\n #删除不存在的标签\n #删除标签组\n ]\n\n )\n def test_tag_delete(self,group_id,tag_id):\n self.tag.delete()\n" }, { "alpha_fraction": 0.6465116143226624, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 26, "blob_id": "8e69436e3c39168d97f5bc6df36b8d2787c34477", "content_id": "ab95a5d6047f0c6338b8f78d21e9ee4eb50a7340", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 87, "num_lines": 8, "path": "/shujuqudong/test_main.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import pytest\nimport yaml\n\nclass TestMain:\n @pytest.mark.parametrize(\"value1,value2\", yaml.safe_load(open(\"./test_main.yaml\")))\n def test_main(self, value1, value2):\n print(value1)\n print(value2)" }, { "alpha_fraction": 0.540960431098938, "alphanum_fraction": 0.5564971566200256, "avg_line_length": 18.69444465637207, "blob_id": "9ebf79dfdb48eaf3a501a31a5dcae3b6ef7f7ad8", "content_id": "c8a608797e392e0edc69abf95e13b14139bc43f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 47, "num_lines": 36, "path": "/python_practice/python_class/bicycle.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "class Bicycle:\n def run(self, km):\n print(f\"一共骑行{km}公里\")\n\n#子类继承父类\nclass EBicycle(Bicycle):\n #属性需要传参定义,可以直接放到构造函数中\n def __init__(self,valume):\n self.valume = valume\n\n #充电 方法\n def fill_charge(self,vol):\n #充电后的电量=本身的电量+充电电量\n self.valume = self.valume + vol\n print(f\"充了{vol}度电,现在电量为{self.valume}度\")\n\n def run(self,km):\n\n #1、获取目前电量能电动骑行的历程数\n power_km = self.valume *10\n\n if power_km >= km:\n print(f\"使用电量骑了{km}\")\n else:\n #电量不够了 用脚骑\n print(f\"使用电量骑了{power_km}\")\n super().run(km - power_km)\n\n\nebike = EBicycle(10)\nebike.fill_charge(150)\nebike.run(2)\n\n\n# bike = Bicycle()\n# print(bike.run(10))" }, { "alpha_fraction": 0.4461072087287903, "alphanum_fraction": 0.5179011225700378, "avg_line_length": 61.10588073730469, "blob_id": "75a91ce0103f80b1b2c12faa0fbc16ea8a30acdf", "content_id": "ec0818c02c78cc432599d1fd2c586cb2b4e4fced", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5431, "license_type": "no_license", "max_line_length": 131, "num_lines": 85, "path": "/qiyeweixin1/test_contact.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import shelve\nfrom time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\n\nclass TestWX:\n def setup(self):\n '''复用浏览器,创建option。option制定浏览器启动debug地址。传进option'''\n option = Options()\n option.debugger_address = \"127.0.0.1:9222\"\n\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n\n def test_add_contact(self):\n #cookies = self.driver.get_cookies()\n cookies = [{'domain': '.qq.com', 'httpOnly': False, 'name': 'uin', 'path': '/', 'secure': False,\n 'value': 'o0137787592'},\n {'domain': '.work.weixin.qq.com', 'expiry': 1641444557.818233, 'httpOnly': False,\n 'name': 'wwrtx.c_gdpr', 'path': '/', 'secure': False, 'value': '0'},\n {'domain': '.qq.com', 'httpOnly': False, 'name': 'skey', 'path': '/', 'secure': False,\n 'value': '@2J2LvbQDD'},\n {'domain': '.qq.com', 'expiry': 2147483430.511013, 'httpOnly': False, 'name': 'RK', 'path': '/',\n 'secure': False, 'value': 'JMJcSTgSG7'},\n {'domain': '.qq.com', 'expiry': 2147483430.511117, 'httpOnly': False, 'name': 'ptcz', 'path': '/',\n 'secure': False, 'value': '0c1a882cad52a4cbc5005d9fc4854a9ca4021eb49f19f142d1c2ae1dce46acc0'},\n {'domain': '.work.weixin.qq.com', 'expiry': 1641559039, 'httpOnly': False,\n 'name': 'Hm_lvt_9364e629af24cb52acc78b43e8c9f77d', 'path': '/', 'secure': False,\n 'value': '1609908568,1610023039'},\n {'domain': '.qq.com', 'expiry': 1673097367, 'httpOnly': False, 'name': '_ga', 'path': '/',\n 'secure': False, 'value': 'GA1.2.1128381225.1609908570'},\n {'domain': '.work.weixin.qq.com', 'expiry': 1612617440.930347, 'httpOnly': False,\n 'name': 'wwrtx.i18n_lan', 'path': '/', 'secure': False, 'value': 'zh'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.ref', 'path': '/',\n 'secure': False, 'value': 'direct'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.refid', 'path': '/',\n 'secure': False, 'value': '03184142'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.ltype', 'path': '/',\n 'secure': False, 'value': '1'},\n {'domain': 'work.weixin.qq.com', 'expiry': 1610028127.526147, 'httpOnly': True, 'name': 'ww_rtkey',\n 'path': '/', 'secure': False, 'value': '3kc9kf'},\n {'domain': '.qq.com', 'expiry': 1610111767, 'httpOnly': False, 'name': '_gid', 'path': '/',\n 'secure': False, 'value': 'GA1.2.188972918.1609996592'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wxpay.corpid', 'path': '/',\n 'secure': False, 'value': '1970324943175019'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wxpay.vid', 'path': '/',\n 'secure': False, 'value': '1688854068709900'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wwrtx.vid', 'path': '/',\n 'secure': False, 'value': '1688854068709900'}, {'domain': '.work.weixin.qq.com', 'httpOnly': False,\n 'name': 'Hm_lpvt_9364e629af24cb52acc78b43e8c9f77d',\n 'path': '/', 'secure': False,\n 'value': '1610023039'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wwrtx.d2st', 'path': '/',\n 'secure': False, 'value': 'a9866635'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.sid', 'path': '/',\n 'secure': False, 'value': 'HGCZDgTSb3atjZZild4lXkWMDU5axgCRbaNpnyGp0ooQVCaO9vpYSREdAcEFBt4C'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.vst', 'path': '/',\n 'secure': False,\n 'value': 'h79gUTyxE73XyGRdmnZlXSZGnpv7sceWYz_7-_proe7OZJZki3yhGvHSscbwzbGBohqp0PDpxcfScFPDYPHj8K9Y7muKY9zi8Xnwo3cBGmsi0pO0gQ0IRCkONVp_nwfkGmdQ9nLqqIkmBr3wCPFg8K9L1R8zJJEMRAE8NJmpqrnJdthDwxAwCh1j5tnFRSJlKc9-579wuzIqe6gFSZCtq1vT9v8wIJD2RlPhtftEzUwDOiuYAjiyhk8G-8OTVlfUZmL4JUiVuwqK3Y4_cDf7zA'}]\n #print(cookies)\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame\")\n\n\n '''以上cookie列表中有多个字典,for循环遍历列表,让每一个字典都放进'''\n for cookie in cookies:\n self.driver.add_cookie(cookie)\n\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame\")\n\n #点击添加成员\n self.driver.find_element(By.CSS_SELECTOR, '.index_service_cnt_item_title').click()\n #输入姓名\n self.driver.find_element_by_id(\"username\").send_keys(\"DD\")\n #输入账号\n self.driver.find_element_by_id(\"memberAdd_acctid\").send_keys(\"dd\")\n #输入手机号\n self.driver.find_element_by_id(\"memberAdd_phone\").send_keys(\"13044444444\")\n #点击保存\n self.driver.find_element(By.CSS_SELECTOR, \".qui_btn.ww_btn.js_btn_save\").click()\n assert \"保存成功!\"\n" }, { "alpha_fraction": 0.49708500504493713, "alphanum_fraction": 0.5694998502731323, "avg_line_length": 55.19827651977539, "blob_id": "577b0c5bd1f4fe05e1b9f58aed5b5f999259673d", "content_id": "91b32129ad5946d15144b7d1c587e0600a230141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6696, "license_type": "no_license", "max_line_length": 303, "num_lines": 116, "path": "/qiyeweixin1/test_ xixi.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport shelve\nfrom time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\n\nclass TestWX:\n def setup(self):\n option = Options()\n # 注意 9222 端口要与命令行启动的端口保持一致 --remote-debugging-port=9222\n option.debugger_address = \"127.0.0.1:9222\"\n self.driver = webdriver.Chrome()\n\n def teardown(self):\n self.driver.quit()\n\n def test_case1(self):\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame#index\")\n self.driver.find_element(By.ID, \"menu_contacts\").click()\n\n def test_cookie(self):\n # cookies = self.driver.get_cookies()\n cookies = [\n {'domain': '.qq.com', 'httpOnly': False, 'name': 'uin', 'path': '/', 'secure': False,\n 'value': 'o0137787592'},\n {'domain': '.work.weixin.qq.com', 'expiry': 1641444557.818233, 'httpOnly': False, 'name': 'wwrtx.c_gdpr',\n 'path': '/', 'secure': False, 'value': '0'},\n {'domain': '.qq.com', 'httpOnly': False, 'name': 'skey', 'path': '/', 'secure': False,\n 'value': '@2J2LvbQDD'},\n {'domain': '.qq.com', 'expiry': 2147483430.511013, 'httpOnly': False, 'name': 'RK', 'path': '/',\n 'secure': False, 'value': 'JMJcSTgSG7'},\n {'domain': '.qq.com', 'expiry': 2147483430.511117, 'httpOnly': False, 'name': 'ptcz', 'path': '/',\n 'secure': False, 'value': '0c1a882cad52a4cbc5005d9fc4854a9ca4021eb49f19f142d1c2ae1dce46acc0'},\n {'domain': '.qq.com', 'expiry': 1673079672, 'httpOnly': False, 'name': '_ga', 'path': '/', 'secure': False,\n 'value': 'GA1.2.1128381225.1609908570'},\n {'domain': '.work.weixin.qq.com', 'expiry': 1641532590, 'httpOnly': False,\n 'name': 'Hm_lvt_9364e629af24cb52acc78b43e8c9f77d', 'path': '/', 'secure': False, 'value': '1609908568'},\n {'domain': '.work.weixin.qq.com', 'expiry': 1612600110.970827, 'httpOnly': False, 'name': 'wwrtx.i18n_lan',\n 'path': '/', 'secure': False, 'value': 'zh'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.ref', 'path': '/', 'secure': False,\n 'value': 'direct'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.refid', 'path': '/', 'secure': False,\n 'value': '03184142'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'Hm_lpvt_9364e629af24cb52acc78b43e8c9f77d',\n 'path': '/', 'secure': False, 'value': '1609996590'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.ltype', 'path': '/', 'secure': False,\n 'value': '1'},\n {'domain': 'work.weixin.qq.com', 'expiry': 1610028127.526147, 'httpOnly': True, 'name': 'ww_rtkey',\n 'path': '/', 'secure': False, 'value': '3kc9kf'},\n {'domain': '.qq.com', 'expiry': 1610094072, 'httpOnly': False, 'name': '_gid', 'path': '/', 'secure': False,\n 'value': 'GA1.2.188972918.1609996592'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wxpay.corpid', 'path': '/', 'secure': False,\n 'value': '1970324943175019'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wxpay.vid', 'path': '/', 'secure': False,\n 'value': '1688854068709900'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wwrtx.vid', 'path': '/', 'secure': False,\n 'value': '1688854068709900'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': False, 'name': 'wwrtx.d2st', 'path': '/', 'secure': False,\n 'value': 'a7660320'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.sid', 'path': '/', 'secure': False,\n 'value': 'HGCZDgTSb3atjZZild4lXv7CS-1WJd5q6Skr1MC62vfiPHMZf4S1UGLYNAU301mZ'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.vst', 'path': '/', 'secure': False,\n 'value': 'dQYK81YHVde8EyIoPwvIXSvU4yXdODUSwoohTNR7WAX3xkvlu9E0Jmql5J4B_NA-Vylr4BPeULXITZXXxAdTweWloLFu8ovEE5rXMPcfQHfx_q7yNhAdjqrugW0y36Jf14PQEmCTVWq3NjNoI06ge899qe6yDloCS0fKj0COgZ1EFJm--9uW1F0dQKFpAIKSY9bbE41sQv5Y_jkjkFG0MiSEfrrqH33Drf1faVGArQ-QSYL18ctF3OAcwfyVsOr6qhulnU7Os9jQqjhMwY0gpw'}\n ]\n print(cookies)\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame#index\")\n\n for cookie in cookies:\n if 'expiry' in cookie.keys():\n cookie.pop('expiry')\n self.driver.add_cookie(cookie)\n self.driver.refresh()\n # self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame#index\")\n\n def test_import_contacts(self):\n # shelve 模块, python 自带的对象持久化存储\n db = shelve.open('cookies')\n cookies = db['cookie']\n db.close()\n # 打开无痕新页面\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame#index\")\n # 加入cookie\n for cookie in cookies:\n if 'expiry' in cookie.keys():\n cookie.pop('expiry')\n self.driver.add_cookie(cookie)\n\n # 刷新当前页面,获取登录状态\n self.driver.refresh()\n # 点击【导入联系人】\n self.driver.find_element(By.CSS_SELECTOR, \".index_service_cnt_itemWrap:nth-child(2)\").click()\n\n sleep(5)\n\n cookies = [\n {'domain': '.work.weixin.qq.com', 'expiry': 1612615175.352724, 'httpOnly': False, 'name': 'wwrtx.i18n_lan',\n 'path': '/', 'secure': False, 'value': 'zh'},\n {'domain': '.work.weixin.qq.com', 'expiry': 1641559174.095903, 'httpOnly': False, 'name': 'wwrtx.c_gdpr',\n 'path': '/', 'secure': False, 'value': '0'},\n {'domain': 'work.weixin.qq.com', 'expiry': 1610054710.095798, 'httpOnly': True, 'name': 'ww_rtkey',\n 'path': '/', 'secure': False, 'value': '2afftht'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.ref', 'path': '/', 'secure': False,\n 'value': 'direct'},\n {'domain': '.work.weixin.qq.com', 'httpOnly': True, 'name': 'wwrtx.refid', 'path': '/', 'secure': False,\n 'value': '02601473'}]\n\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame#contacts\")\n '''以上cookie列表中有多个字典,for循环遍历列表,让每一个字典都放进'''\n for cookie in cookies:\n self.driver.add_cookie(cookie)\n\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame#contacts\")" }, { "alpha_fraction": 0.5093509554862976, "alphanum_fraction": 0.5346534848213196, "avg_line_length": 19.659090042114258, "blob_id": "606b93c0aa10c5798b6e54258553978f5edea1a0", "content_id": "f852b8bdac22ca37c3d1e320fe3ae64c386ffb6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1227, "license_type": "no_license", "max_line_length": 52, "num_lines": 44, "path": "/python_practice/game/game_round_fun.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "#定义敌人的血量 敌人的攻击力\nimport random\n\n\ndef fight(enemy_hp, enemy_power):\n #定义自己的血量 自己的攻击力\n my_hp = 1000\n my_power = 200\n\n #打印敌人的血量 敌人的攻击力\n print(f\"敌人的血量为{enemy_hp}, 敌人的攻击力为{enemy_power}\")\n\n #加入循环 进行多轮游戏\n while True:\n my_hp = my_hp - enemy_power\n enemy_hp = enemy_hp - my_power\n\n #判断谁的血量小于等于0\n if my_hp <= 0:\n #打印我和敌人的剩余血量\n print(f\"我的剩余血量为{my_hp}\")\n print(f\"敌人的剩余血量为{enemy_hp}\")\n print(\"我输了\")\n\n #满足条件跳出循环\n break\n elif enemy_hp <= 0:\n print(f\"我的剩余血量为{my_hp}\")\n print(f\"敌人的剩余血量为{enemy_hp}\")\n print(\"我赢了\")\n break\n\n\nif __name__ == \"__main__\":\n #列表推导式生成hp\n hp = [x for x in range(990,1010)]\n\n #让敌人的hp从hp列表中随机取一个值\n enemy_hp = random.choice(hp)\n\n enemy_power = random.randint(190,210)\n\n #调用函数,传入敌人的hp和power\n fight(enemy_hp, enemy_power)\n" }, { "alpha_fraction": 0.6175257563591003, "alphanum_fraction": 0.6288659572601318, "avg_line_length": 28.393939971923828, "blob_id": "a282cca1a36968dbcbebaf5d56993cb352151eea", "content_id": "2e9cc0468b6f274d8c34fc49307f2f77e285b22a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 56, "num_lines": 33, "path": "/test_selenium/test_TouchAction.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver import TouchActions\nfrom time import sleep\n\nclass TestTouchAction:\n def setup(self):\n '''设置w3c标准'''\n option = webdriver.ChromeOptions()\n option.add_experimental_option('w3c',False)\n self.driver = webdriver.Chrome(options=option)\n\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_touchaction_scrollbutton(self):\n self.driver.get(\"https://www.baidu.com/\")\n #定位到文本框\n el = self.driver.find_element_by_id(\"kw\")\n #定位到搜索框\n el_search = self.driver.find_element_by_id(\"su\")\n #对文本框中输入\n el.send_keys(\"selenium测试\")\n action = TouchActions(self.driver)\n #点击搜索\n action.tap(el_search)\n action.perform()\n\n #鼠标滑动,从el这个元素开始划,x轴偏移量为0,y轴偏移量越大越好,想划到底部\n action.scroll_from_element(el,0,10000).perform()\n # sleep(3)\n" }, { "alpha_fraction": 0.658203125, "alphanum_fraction": 0.677734375, "avg_line_length": 27.38888931274414, "blob_id": "79bfbef1020cae1e5e38dfa8ed35b0bdcf09c5fb", "content_id": "9d27fc471f988f3ab20e3936fc6be408838ce658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 554, "license_type": "no_license", "max_line_length": 72, "num_lines": 18, "path": "/qiyeweixin1/test_cookiesdemo.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "from time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\nclass TestWX:\n def setup(self):\n '''复用浏览器,创建option。option制定浏览器启动debug地址。传进option'''\n option = Options()\n option.debugger_address = \"127.0.0.1:9222\"\n self.driver = webdriver.Chrome(options=option)\n\n\n def test_get_cookie(self):\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame\")\n cookies = self.driver.get_cookies()\n print(cookies)\n\n" }, { "alpha_fraction": 0.5313653349876404, "alphanum_fraction": 0.5405904054641724, "avg_line_length": 21.957447052001953, "blob_id": "8b1fec22a41668a4b140d8a86d23f5d0380ca548", "content_id": "fd35d5468604403db26a4d30f484096ac74dbecc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1666, "license_type": "no_license", "max_line_length": 65, "num_lines": 47, "path": "/201024homework/TongLao.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "\"\"\"\n定义一个天山童姥类 ,类名为TongLao,属性有血量,武力值(通过传入的参数得到)。TongLao类里面有2个方法,\nsee_people方法,需要传入一个name参数,如果传入”WYZ”(无崖子),则打印,“师弟!!!!”,\n如果传入“李秋水”,打印“师弟是我的!”,如果传入“丁春秋”,打印“叛徒!我杀了你”\nfight_zms方法(天山折梅手),调用天山折梅手方法会将自己的武力值提升10倍,血量缩减2倍。需要传入敌人的hp,power,\n进行一回合制对打,打完之后,比较双方血量。血多的一方获胜。\n\"\"\"\nclass TongLao:\n # 构造函数\n # 定义我的血量和武力值\n def __init__(self, hp, power):\n self.hp = hp\n self.power = power\n\n # 定义see _people方法\n def see_people(self,name):\n self.name = name\n\n if name == 'WYZ':\n print(\"师弟!!!!\")\n\n elif name == '李秋水':\n print(\"师弟是我的!\")\n\n elif name == '丁春秋':\n print(\"叛徒!我杀了你\")\n\n # 定义天山折梅手方法\n def fight_zms(self, enemy_hp, enemy_power):\n # 自己血量缩减两倍\n self.hp= self.hp / 2\n # 自己武力值提升10倍\n self.power = self.power * 10\n\n # 我的血量和敌人的血量\n self.hp = self.hp - enemy_power\n enemy_hp = enemy_hp - self.power\n\n print(self.hp)\n print(enemy_hp)\n\n # 判断谁的血量小于等于0\n if self.hp < enemy_hp:\n print(\"我输了\")\n\n else:\n print(\"我赢了\")\n\n\n\n\n\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.675000011920929, "avg_line_length": 17.733333587646484, "blob_id": "d9204cf100cab4a8d62799ab405d653f80a1d11c", "content_id": "89536632d9d96739c8fb4ef7594a2e4ba13e2b90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 38, "num_lines": 15, "path": "/test_pytest/tests/test_fixture_demo.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom test_pytest.core.calc import Calc\n\n\[email protected](scope='module')\ndef calc_init():\n print(\"calc_init\")\n return Calc()\n\ndef test_calc_demo(calc_init):\n assert calc_init.mul(1,2) == 2\n\ndef test_calc_demo2(calc_init):\n assert calc_init.mul(1,3) == 3" }, { "alpha_fraction": 0.6437029242515564, "alphanum_fraction": 0.6469321846961975, "avg_line_length": 31.068965911865234, "blob_id": "ae72bc4a00496c9699238f79a1a19d6c74ba70b1", "content_id": "8b17ec8150be730d9c4e3e17f73c0c252adc0f9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 94, "num_lines": 29, "path": "/podemo1/page/addmemberpage.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom selenium.webdriver.common.by import By\nfrom podemo1.page.base_page import BasePage\n\nclass AddMemberPage(BasePage):\n\n '''添加联系人操作'''\n def add_member(self, name, account, phonenum):\n self.find(By.ID, \"username\").send_keys(name)\n self.find(By.ID, \"memberAdd_acctid\").send_keys(account)\n self.find(By.ID, \"memberAdd_phone\").send_keys(phonenum)\n self.find(By.CSS_SELECTOR, \".js_btn_save\").click()\n return True\n\n\n '''判断联系人是否添加成功'''\n def get_member(self, value):\n '''调用显示等待方法,查看checkbok是否可被点击,可被点击说明页面加载完成了'''\n locator = (By.CSS_SELECTOR, \".ww_checkbox\")\n self.wait_for_click(locator)\n\n\n elements = self.finds(By.CSS_SELECTOR, \".member_colRight_memberTable_td:nth-child(2)\")\n '''列表推导式,在element中获取title属性'''\n titles = [element.get_attribute(\"title\") for element in elements]\n\n return titles" }, { "alpha_fraction": 0.6246575117111206, "alphanum_fraction": 0.6260274052619934, "avg_line_length": 20.147058486938477, "blob_id": "0c4fbd7e48d1891071c426758dda5df3377ea383", "content_id": "00c5a6a8c2b7f0a6fca9972727e3efc031463190", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 68, "num_lines": 34, "path": "/1117zhibo1framework/test_demo.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "'''\nweb自动化搜索\n'''\nimport pytest\nimport yaml\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\ndef load_data(path):\n with open(path, encoding='utf-8') as f:w\n return yaml.load(f)\n\ndef test_load_data():\n pass\n\n\n\nclass TestDemo:\n #参数化\n @pytest.mark.parametrize(\"keyword\",load_data(\"test_data.yaml\"))\n def test_search(self,keyword):\n driver = webdriver.Chrome()\n driver.get(\"https://ceshiren.com\")\n driver.find_element(By.ID, 'search-button').click()\n driver.find_element(By.ID, 'search-term').send_keys(keyword)\n\n\n if 'get' in step:\n url = step.get('get')\n driver.get(url)\n\n if 'find_element' in step:\n by = step.get(find)\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6283288598060608, "alphanum_fraction": 0.6325743198394775, "avg_line_length": 31, "blob_id": "bd55e86d0e8e9d09e608528bb24d168d9bac4028", "content_id": "596a598e438d91c4fd2c0d32e79addf274b347b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2787, "license_type": "no_license", "max_line_length": 98, "num_lines": 81, "path": "/test_selenium/test_ActionChains.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom time import sleep\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass TestActionChains():\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n @pytest.mark.skip\n def test_click(self):\n self.driver.get(\"http://sahitest.com/demo/clicks.htm\")\n #分别拿到单击、双击、右键元素\n element_click = self.driver.find_element_by_xpath(\"//input[@value='click me']\")\n element_doubleclick = self.driver.find_element_by_xpath(\"//input[@value='dbl click me']\")\n element_rightclick = self.driver.find_element_by_xpath(\"//input[@value='right click me']\")\n\n #创建action方法\n action = ActionChains(self.driver)\n #分别创建单击、右键、双击方法\n action.click(element_click)\n action.context_click(element_rightclick)\n action.double_click(element_doubleclick)\n sleep(3)\n\n #执行action\n action.perform()\n sleep(3)\n\n @pytest.mark.skip\n def test_movetoelement(self):\n self.driver.get(\"https://www.baidu.com/\")\n #找到设置\n ele = self.driver.find_element_by_link_text(\"设置\")\n action = ActionChains(self.driver)\n #光标移动到设置上\n action.move_to_element(ele)\n action.perform()\n sleep(3)\n\n @pytest.mark.skip\n def test_dragdrop(self):\n self.driver.get(\"http://sahitest.com/demo/dragDropMooTools.htm\")\n drag_element = self.driver.find_element_by_id(\"dragger\")\n drop_element = self.driver.find_element_by_xpath(\"/html/body/div[2]\")\n action = ActionChains(self.driver)\n\n #拖拽\n # action.drag_and_drop(drag_element,drop_element).perform()\n\n #点击某个元素然后释放某个元素\n # action.click_and_hold(drag_element).release(drop_element).perform()\n\n #点击某个元素不放,然后moveto到某个元素上\n action.click_and_hold(drag_element).move_to_element(drop_element).release().perform()\n sleep(3)\n\n def test_keys(self):\n self.driver.get(\"http://sahitest.com/demo/label.htm\")\n ele = self.driver.find_element_by_xpath(\"/html/body/label[1]/input\")\n ele.click()\n\n action = ActionChains(self.driver)\n #输入文字\n action.send_keys(\"username\").pause(1)\n #输入空格\n action.send_keys(Keys.SPACE).pause(1)\n #再输入文字\n action.send_keys(\"tom\").pause(1)\n #操作回删\n action.send_keys(Keys.BACK_SPACE).perform()\n sleep(3)" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 23, "blob_id": "56ee3e34c1595245a7f63b5847721b2d292374af", "content_id": "8252f1bae9a293623d79ee286ab398379df71d7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 76, "num_lines": 27, "path": "/page/main.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "'''\n对企业微信首页建模\n主页功能:登陆 注册\n'''\nfrom selenium.webdriver.common.by import By\nfrom page.Login import Login\nfrom page.Register import Register\nfrom page.base_page import BasePage\n\n\nclass Main(BasePage):\n #声明base url,子类里重写url。企业微信首页网址\n _base_url = \"https://work.weixin.qq.com/\"\n\n #goto注册页面\n def goto_register(self):\n #复制的是class,”.“代表class\n self.find(By.CSS_SELECTOR, \".index_head_info_pCDownloadBtn\").click()\n return Register(self._driver)\n\n\n #goto登陆页面\n def goto_login(self):\n #点击登陆\n self.find(By.CSS_SELECTOR,\".index_top_operation_loginBtn\").click()\n #进入到注册页\n return Login(self._driver)\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 21, "blob_id": "b4d5809af9e9334642246e3003a3819d6d37b985", "content_id": "0631e8b9c6f379e8f68892f267719796c89041a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/README.md", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "## pytest allure作业提交一" }, { "alpha_fraction": 0.6772334575653076, "alphanum_fraction": 0.6772334575653076, "avg_line_length": 20.6875, "blob_id": "5408f18d1ffd7a8e01c1a5f03ebe0009fafa15a2", "content_id": "ac0cab3d960c99db7505e3e659e8e3699c3d765b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 64, "num_lines": 16, "path": "/testcase/test_register.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "'''\n注册测试用例\n'''\nfrom page.main import Main\n\n\nclass TestRegister:\n\n #初始化,setup方法会在下面每个测试用例前执行\n def setup(self):\n self.main=Main()\n\n def test_register(self):\n #链式调用 main方法中的gotoregister,可以return到Register中的register方法\n #assert self.main.goto_register().register()\n self.main.goto_login().goto_register().register()\n" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.6231481432914734, "avg_line_length": 28.16216278076172, "blob_id": "8f15ea269059996b52245548271a0b1150607234", "content_id": "6c929dfc912eb386d06f00addacef148a94ef3e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1198, "license_type": "no_license", "max_line_length": 93, "num_lines": 37, "path": "/test_selenium/test_frame.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import os\nfrom time import sleep\nfrom selenium import webdriver\n\nfrom test_pytest.base import Base\n\n\nclass TestWindow():\n def setup(self):\n #获取传过来的brower参数\n browser = os.getenv(\"browser\")\n #判断browser参数\n if browser == 'firefox':\n self.driver = webdriver.Firefox()\n elif browser == 'headless':\n self.driver = webdriver.PhantomJS()\n else:\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_frame(self):\n self.driver.get(\"https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable\")\n #切换frame,找到”请拖拽我“这个元素所在的frame,用id取出\n self.driver.switch_to.frame(\"iframeResult\")\n #打印”请推拽我“\n print(self.driver.find_element_by_id(\"draggable\").text)\n\n\n #切换回默认frame,想去点击”点击运行“\n self.driver.switch_to.parent_frame()\n #或者\n #self.driver.switch_to.default_content()\n print(self.driver.find_element_by_id(\"submitBTN\").text)\n\n" }, { "alpha_fraction": 0.53751540184021, "alphanum_fraction": 0.5399754047393799, "avg_line_length": 19.325000762939453, "blob_id": "181a94475f30dab833ea054861c2bbc33635ce2d", "content_id": "cf4d496ea3501220bf30b83757b0feb2d561a37c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "no_license", "max_line_length": 39, "num_lines": 40, "path": "/python1029_alluredemo/result/test_feature_story.py", "repo_name": "oooleemandy/hogwarts_lg4", "src_encoding": "UTF-8", "text": "import pytest\nimport allure\n\[email protected](\"登陆模块\")\nclass TestLogin():\n\n @allure.story(\"登陆成功\")\n def test_login_success(self):\n print(\"登陆用例 登陆成功\")\n pass\n\n @allure.story(\"登陆失败\")\n def test_login_success_a(self):\n print(\"登陆用例 登陆成功a\")\n\n @allure.story(\"用户名缺失\")\n def test_login_success_b(self):\n print(\"用户名缺失\")\n\n\n @allure.story(\"密码缺失\")\n def test_login_failture(self):\n with allure.step(\"点击用户名\"):\n print(\"请输入用户名\")\n with allure.step(\"点击密码\"):\n print(\"请输入密码\")\n print(\"点击登陆\")\n with allure.step(\"点击登陆之后登陆失败\"):\n assert '1'==1\n print(\"登陆失败\")\n pass\n\n @allure.story(\"登陆失败\")\n def test_login_failure(self):\n print(\"登陆用例 登陆失败\")\n pass\n\n\nif __name__ == '__main__':\n pytest.main()\n" } ]
35
xr71/ibm-ai-engineering
https://github.com/xr71/ibm-ai-engineering
0c9e5d0d220594c8b027a4532f1b945b28ffefe7
25127d4dba3bbcf2c5c9aa0b5326135dd64c7559
1254ffff997d167ddf510c8d4cc15b4eb7ae5994
refs/heads/master
2022-06-18T06:29:36.853368
2020-05-04T13:28:28
2020-05-04T13:28:28
258,537,613
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6339375376701355, "alphanum_fraction": 0.6565961837768555, "avg_line_length": 23.469135284423828, "blob_id": "c467f595b649b382a3bfc76f3940c62de0eebf2c", "content_id": "ede9f7ad6205346a5c6260032a70fac1d946ea21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1986, "license_type": "no_license", "max_line_length": 73, "num_lines": 81, "path": "/p4_deep_nn_pytorch/06_01_softmax.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\n\ncriterion = nn.CrossEntropyLoss()\n# print(criterion)\n\nclass SoftMax(nn.Module):\n def __init__(self, in_size, out_size):\n super(SoftMax, self).__init__()\n self.linear = nn.Linear(in_size, out_size)\n\n def forward(self, x):\n out = self.linear(x)\n return out\n\n\nmymodel = SoftMax(2, 3)\nprint(list(mymodel.parameters()))\n\nprint()\nprint(\"manual test...\")\nx = torch.tensor([[1.0, 2.0]])\nz = mymodel(x)\nprint(\"softmax values are:\", z)\nvalue, index = z.max(1)\nprint(\"yhat index is:\", index)\nprint(\"yhat value is:\", value)\n\n\n\n# pytorch builtin softmax\n# use builtin datasets\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nfrom torch import optim\n\ndata_path = \"/home/xuren\"\n\ntrain_data = dsets.MNIST(root=data_path, train=True, download=True,\n transform=transforms.ToTensor())\nvalidation_data = dsets.MNIST(root=data_path, train=False, download=True,\n transform=transforms.ToTensor())\n\nprint(train_data)\nprint()\n# print(train_data[0])\nprint(train_data[0][0].size())\nprint(train_data[0][0].shape)\n\nmodel = SoftMax(28*28, 10)\nprint(model)\nprint(list(model.parameters()))\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)\n\ntrainloader = DataLoader(train_data, batch_size=32)\nvalidloader = DataLoader(validation_data, batch_size=32)\n\nfor e in range(2):\n for x,y in trainloader:\n optimizer.zero_grad()\n\n yhat = model(x.view(-1, 28*28))\n loss = criterion(yhat, y)\n loss.backward()\n optimizer.step()\n\n print(loss)\n\n # eval\n print()\n print(\"evaluating...\")\n correct = 0\n for x_test, y_test in validloader:\n yhat = model(x_test.view(-1, 28*28))\n values, indices = torch.max(yhat.data, 1)\n # print((indices == y_test).sum().item())\n correct += (indices == y_test).sum().item()\n print(correct / 10000)\n\n\n\n\n" }, { "alpha_fraction": 0.5776450634002686, "alphanum_fraction": 0.5964163541793823, "avg_line_length": 19.875, "blob_id": "0ca9de96da3f3d2722633121fca0bb830f602229", "content_id": "d0b8243af5132cf0d494bb7d8e3f3ddb875b6763", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1172, "license_type": "no_license", "max_line_length": 73, "num_lines": 56, "path": "/p4_deep_nn_pytorch/03_02_mini_batch_sgd.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import Dataset, DataLoader\n\nclass MyData():\n def __init__(self):\n self.x = torch.arange(-3, 3, 0.1, requires_grad=True).view(-1, 1)\n f = -3 * self.x + torch.randn(self.x.size()) * 0.1\n self.y = torch.tensor(f, requires_grad=True)\n self.len = self.x.shape[0]\n\n # getter\n def __getitem__(self, index):\n return self.x[index], self.y[index]\n\n # len\n def __len__(self):\n return self.len\n\n\ndataset = MyData()\nprint(dataset[:5])\nprint(len(dataset))\n\n\n# create dataloader\ntrainloader = DataLoader(dataset=dataset, batch_size=5)\nprint(trainloader)\n\n# for batch in trainloader:\n# print(batch)\n\n\ndef forward(x):\n return w*x + b\n\ndef criterion(yhat, y):\n return torch.mean( (yhat-y)**2 )\n\nlr = 0.01\n# training in batches\nw = torch.tensor(0.0, requires_grad=True)\nb = torch.tensor(0.0, requires_grad=True)\nfor e in range(10):\n for x,y in trainloader:\n yhat = forward(x)\n loss = criterion(yhat, y)\n loss.backward()\n\n w.data -= lr*w.grad.data\n b.data -= lr*b.grad.data\n\n w.grad.data.zero_()\n b.grad.data.zero_()\n\n\n print(w, b)\n\n\n\n" }, { "alpha_fraction": 0.53861004114151, "alphanum_fraction": 0.584942102432251, "avg_line_length": 31.375, "blob_id": "33de3053cd7c4910fe53a915215277755bb63954", "content_id": "df1d6fa55c91ea8f5e4002c163980393af07033a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 518, "license_type": "no_license", "max_line_length": 73, "num_lines": 16, "path": "/p3_nn_keras/04_keras_cnn.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom tensorflow import keras\n\nmodel = keras.Sequential()\n\nmodel.add(tf.keras.layers.Conv2D(16, kernel_size=(2, 2), strides=(1, 1),\n activation='relu',\n input_shape=(128, 128, 3)\n ))\n\n\nmodel.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(tf.keras.layers.Dense(100, activation='relu'))\nmodel.add(tf.keras.layers.Dense(10, activation='softmax'))\n\nprint(model.summary())\n" }, { "alpha_fraction": 0.6313099265098572, "alphanum_fraction": 0.6555910706520081, "avg_line_length": 29.076923370361328, "blob_id": "d76c975722f6a914b4396700e195b2773bad97d1", "content_id": "6ab9a252b665bd9ea0bb562aa05f12dced45b0f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1565, "license_type": "no_license", "max_line_length": 95, "num_lines": 52, "path": "/p4_deep_nn_pytorch/07_01_fc_neural_networks_pytorch.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torchvision import transforms\nimport torchvision.datasets as dsets\nfrom torch.utils.data import Dataset, DataLoader\n\n\ndatapath = \"/home/xuren\"\ntrain_data = dsets.MNIST(datapath, download=True, train=True, transform=transforms.ToTensor())\nprint(train_data)\n\nvalid_data = dsets.MNIST(datapath, download=True, train=False, transform=transforms.ToTensor())\nprint(valid_data)\n\nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Net, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n yhat = torch.sigmoid(self.linear1(x))\n yhat = torch.sigmoid(self.linear2(yhat))\n return yhat\n\n\nmodel = Net(28*28, 128, 10)\ntrainloader = DataLoader(train_data, batch_size=32)\nvalidloader = DataLoader(valid_data, batch_size=32)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\nfor e in range(2):\n for x,y in trainloader:\n yhat = model(x.view(-1, 28*28))\n loss = criterion(yhat, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print(loss)\n\n # eval\n print()\n print(\"evaluating...\")\n correct = 0\n for x_test, y_test in validloader:\n yhat = model(x_test.view(-1, 28*28))\n values, indices = torch.max(yhat.data, 1)\n # print((indices == y_test).sum().item())\n correct += (indices == y_test).sum().item()\n print(correct / 10000)\n\n" }, { "alpha_fraction": 0.5930232405662537, "alphanum_fraction": 0.6175030469894409, "avg_line_length": 26.694915771484375, "blob_id": "045a587df2020516c78c8360eb8802a2c71a688a", "content_id": "88062b886e693c825434a9c052c42fcf10f4165b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1634, "license_type": "no_license", "max_line_length": 70, "num_lines": 59, "path": "/p4_deep_nn_pytorch/08_03_nn_dropout.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn, optim\nfrom torchvision import transforms\nimport torchvision.datasets as dsets\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass Net(nn.Module):\n def __init__(self, input_dim, hidden_dim1, output_dim, p=0):\n super(Net, self).__init__()\n\n # create a dropout object\n self.drop = nn.Dropout(p=p)\n self.linear1 = nn.Linear(input_dim, hidden_dim1)\n self.linear2 = nn.Linear(hidden_dim1, output_dim)\n\n def forward(self, x):\n x = torch.relu(self.linear1(x))\n x = self.drop(x)\n x = self.linear2(x)\n\n return x\n\n\nmodel = Net(28*28, 196, 10, p=0.2)\nprint(model)\nprint(list(model.parameters()))\n\ntrain_data = dsets.MNIST(\"/home/xuren\", download=False,\n train=True, transform=transforms.ToTensor())\nvalid_data = dsets.MNIST(\"/home/xuren\", download=False,\n train=False, transform=transforms.ToTensor())\n\ntrainloader = DataLoader(train_data, batch_size=32)\nvalidloader = DataLoader(valid_data, batch_size=32)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.01)\n\nprint()\nfor e in range(10):\n model.train()\n for x, y in trainloader:\n yhat = model(x.view(-1, 28*28))\n loss = criterion(yhat, y)\n optimizer.zero_grad()\n\n loss.backward()\n optimizer.step()\n\n print()\n print(\"manual eval...\")\n correct = 0\n model.eval()\n for x, y in validloader:\n yhat = model(x.view(-1, 28*28))\n _, labels = torch.max(yhat, 1)\n correct += (y == labels).sum().item()\n print(correct / len(valid_data))\n" }, { "alpha_fraction": 0.5996240377426147, "alphanum_fraction": 0.6184210777282715, "avg_line_length": 20.714284896850586, "blob_id": "c9adb499b9eb40f6c8727469b89d6bc047ba1b6a", "content_id": "69591f1478866ff6522136bc28dc8e3384dc085a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "no_license", "max_line_length": 73, "num_lines": 49, "path": "/p4_deep_nn_pytorch/03_03_pytorch_optimization.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch import nn, optim\n\n# optim contains optimizers\n\nmodel = nn.Linear(1, 1)\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\nprint(model, optimizer)\nprint()\nprint(list(model.parameters()))\nprint()\n\n\nclass MyData():\n def __init__(self):\n self.x = torch.arange(-3, 3, 0.1, requires_grad=True).view(-1, 1)\n f = -3 * self.x + torch.randn(self.x.size()) * 0.1\n self.y = torch.tensor(f, requires_grad=True)\n self.len = self.x.shape[0]\n\n # getter\n def __getitem__(self, index):\n return self.x[index], self.y[index]\n\n # len\n def __len__(self):\n return self.len\n\ndataset = MyData()\nprint(dataset[:5])\n\ntrainloader = DataLoader(dataset, batch_size=5)\n\ndef criterion(yhat, y):\n return torch.mean( (yhat-y)**2 )\n\n\nfor e in range(10):\n for x,y in trainloader:\n yhat = model(x)\n loss=criterion(yhat, y)\n loss.backward()\n optimizer.step()\n\n optimizer.zero_grad()\n\n print(model.weight.data, model.bias.data)\n" }, { "alpha_fraction": 0.4899713397026062, "alphanum_fraction": 0.5702005624771118, "avg_line_length": 10.633333206176758, "blob_id": "1b98e43861d2d2b4247bc2e822f16e8de247f5af", "content_id": "733373bf11aacf63cbb2ce8fafb285a733ccbbf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 46, "num_lines": 30, "path": "/p3_nn_keras/01_numpy_forward_prop.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import numpy as np\n\nx1 = 0.5\nx2 = -0.35\n\nw1 = 0.55\nw2 = 0.45\n\nb1 = 0.15\n\ninput_layer = np.array([x1, x2])\nhidden_layer = np.array([w1, w2])\n\n\ndef sigmoid(x):\n\n return 1.0 / (1.0 + np.exp(-x))\n\n\ndef main():\n print(input_layer)\n print(hidden_layer)\n\n z = np.dot(input_layer, hidden_layer) + b1\n print(z)\n\n print(sigmoid(z))\n\n\nmain()\n" }, { "alpha_fraction": 0.8309859037399292, "alphanum_fraction": 0.8309859037399292, "avg_line_length": 22.66666603088379, "blob_id": "f8d891f4cd2bd0bfd4ae317fd21b327c06a65d7c", "content_id": "a9d77befc0c25b447fce3845941fbeeb163f1669", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 31, "num_lines": 3, "path": "/p3_nn_keras/04_keras_autoencoders.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n# autoencoders\n# Restricted Boltzmann Machines\n" }, { "alpha_fraction": 0.5268630981445312, "alphanum_fraction": 0.5797227025032043, "avg_line_length": 26.4761905670166, "blob_id": "2e3519dfbcf4d302e783678146a066ac037b3652", "content_id": "9f358cffb28339c1fe30592a03eca483a4425f63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 76, "num_lines": 42, "path": "/p4_deep_nn_pytorch/09_01_convolutions.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n# we have a kernel (W)\n# it is akin to wx + b\n# we output a new matrix\n# the activation map\n\nK = 2\nconv1 = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=K)\nconv1.state_dict()['weight'][0][0] = torch.tensor([[1.0, 1.0], [1.0, 1.0]])\nconv1.state_dict()['bias'][0] = 0.0\nconv1.state_dict()\nprint(conv1)\n\n\nclass CNN(nn.Module):\n def __init__(self, out_1, out_2, kernel_size, padding):\n super(CNN, self).__init__()\n self.cnn1 = nn.Conv2d(\n in_channels=1, out_channels=out_1, kernel_size=5, padding=2)\n self.maxpool1 = nn.MaxPool2d(kernel_size)\n self.cnn2 = nn.Conv2d(\n in_channels=out_1, out_channels=out_2, kernel_size=5, padding=2)\n self.maxpool2 = nn.MaxPool2d(kernel_size)\n self.fc1 = nn.Linear(out_2*4*4, 10)\n\n def forward(self, x):\n x = self.cnn1(x)\n x = torch.relu(x)\n x = self.maxpool1(x)\n x = self.cnn2(x)\n x = torch.relu(x)\n x = self.maxpool2(x)\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n\n return x\n\n\nmodel = CNN(out_1=16, out_2=32, kernel_size=5, padding=2)\nprint(model)\n" }, { "alpha_fraction": 0.700214147567749, "alphanum_fraction": 0.7087794542312622, "avg_line_length": 22.350000381469727, "blob_id": "3fb5fc1aace8db7b4b29c73c256d63aa89e2fbc0", "content_id": "cc68e68732324c75cb252c1c71f4eaf173b8112e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1401, "license_type": "no_license", "max_line_length": 99, "num_lines": 60, "path": "/p3_nn_keras/03_dl_keras_intro.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "from tensorflow import keras\nfrom tensorflow.keras import Sequential\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# tensorflow is from Google, with keras integrated\n# pytorch is from torch which is written in Lua\n# pytorch is gaining popular because it is more pythonic\n# pytorch is supported by Facebook - feels like NumPy\n\n\nprint(keras)\nprint(Sequential)\n\n\ndf = pd.read_csv(\n \"https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/concrete.csv\")\n\n\nprint(df.head())\n\n\n# LR\n\ndef LinearRegression(df):\n model = Sequential()\n\n label = df.pop('strength')\n features = df.copy()\n n_cols = features.shape[1]\n\n model.add(keras.layers.Dense(5, activation='relu', input_shape=(n_cols,)))\n model.add(keras.layers.Dense(1))\n print(model.summary())\n\n model.compile(optimizer='adam', loss='mse')\n\n history = model.fit(features, label, epochs=1000, batch_size=64)\n return history\n\n\nlr = LinearRegression(df)\n\n\n# Classification\n\ndef Classification(df):\n model = Sequential()\n model.add(keras.layers.Dense(8, activation='relu', input_shape=(n_cols,)))\n model.add(keras.layers.Dense(4, activation='relu'))\n model.add(keras.layers.Dense(4, activation='softmax'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n\nclf = Classification()\n" }, { "alpha_fraction": 0.6984924674034119, "alphanum_fraction": 0.713567852973938, "avg_line_length": 12.266666412353516, "blob_id": "023902497ece2ebea3033df8263fb392f69b1d0d", "content_id": "52d282925405ba88437cf6609b9f1285f51a232a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 48, "num_lines": 15, "path": "/p4_deep_nn_pytorch/01_03_differentiation.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nimport pandas as pd\n\n\nx = torch.tensor(2.0, requires_grad=True)\ny = x ** 2\n\nprint(x, y)\n\ny.backward()\nprint(x.grad)\n\n\n# backwards graph that calculates the derivative\n" }, { "alpha_fraction": 0.6094295978546143, "alphanum_fraction": 0.6344586610794067, "avg_line_length": 25.84375, "blob_id": "b9d4b67183be0f08c73532c44b6c134f1abe454c", "content_id": "d1426d25469178fdd1db70570b53f5b02f1f7dbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1718, "license_type": "no_license", "max_line_length": 72, "num_lines": 64, "path": "/p4_deep_nn_pytorch/08_01_deep_nn.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision.datasets as dsets\nfrom torchvision import transforms\nfrom torch import nn, optim\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass Model(nn.Module):\n def __init__(self, input_dim, hidden_dim1, hidden_dim2, output_dim):\n super(Model, self).__init__()\n self.linear1 = nn.Linear(input_dim, hidden_dim1)\n self.linear2 = nn.Linear(hidden_dim1, hidden_dim2)\n self.linear3 = nn.Linear(hidden_dim2, output_dim)\n\n def forward(self, x):\n x = torch.relu(self.linear1(x))\n x = torch.relu(self.linear2(x))\n x = self.linear3(x)\n\n return x\n\n\nmymodel = Model(28*28, 128, 64, 10)\n\nprint(mymodel)\nprint()\n\ndata_path = \"/home/xuren\"\ntrain_data = dsets.MNIST(data_path, download=True,\n train=True, transform=transforms.ToTensor())\nvalid_data = dsets.MNIST(data_path, download=True,\n train=False, transform=transforms.ToTensor())\n\nprint()\nprint(train_data)\nprint(valid_data)\nprint()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(mymodel.parameters(), lr=0.01)\n\ntrainloader = DataLoader(train_data, batch_size=32)\nvalidloader = DataLoader(valid_data, batch_size=32)\n\nfor e in range(4):\n for x, y in trainloader:\n yhat = mymodel(x.view(-1, 28*28))\n loss = criterion(yhat, y)\n optimizer.zero_grad()\n\n loss.backward()\n optimizer.step()\n\n print(loss)\n\n print()\n print(\"manual check on validation set...\")\n correct = 0\n for x, y in validloader:\n yhat = mymodel(x.view(-1, 28*28))\n _, label = torch.max(yhat, 1)\n correct += (label == y).sum().item()\n accuracy = correct / len(valid_data)\n print(accuracy)\n" }, { "alpha_fraction": 0.5712820291519165, "alphanum_fraction": 0.6256410479545593, "avg_line_length": 16.410715103149414, "blob_id": "185294c039d11d59516ac0e4434c71abeb4932ef", "content_id": "798d76239431a9cfd0acb0f5d5c214313f11f281", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 975, "license_type": "no_license", "max_line_length": 112, "num_lines": 56, "path": "/p4_deep_nn_pytorch/01_tensor_intro.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\n\n\na = torch.tensor([0.1, 0.5, 1.1, 1.5, 2.1])\nprint(a)\nprint(a.dtype)\n\nprint()\nb = torch.tensor([1, 2, 3, 4])\nprint(b)\nb = b.type(torch.float64)\nprint(b)\nprint(b.dtype)\nprint(b.numpy())\n\nprint()\nc = np.array([1, 2, 3, 4, 5, 6, 7, 8])\nprint(c, type(c))\nprint(torch.from_numpy(c))\nprint(torch.from_numpy(c).numpy())\nprint(torch.from_numpy(c).tolist())\n\n\nprint()\nprint(\"slicing and dicing\")\nd = b[1:3]\nprint(d)\nd[1] = 99\nprint(d)\nprint(b)\nprint(\" as you can see, be careful with slicing and copying: use deep copy to make edits rather than pointers \")\n\na = torch.tensor([10, 9, 8, 7])\nprint(a[1:3])\n\n\nprint()\nprint(\"linspace...\")\nprint(torch.linspace(-2, 2, steps=100))\n\n\nprint()\nprint(\"dot product...\")\nu = torch.tensor([1, 2])\nv = torch.tensor([0, 1])\nprint(u, v)\nprint(torch.dot(u, v))\n\n\nprint()\nprint(\"different from multiplication...\")\nX = torch.tensor([[1, 0], [0, 1]])\nY = torch.tensor([[2, 1], [1, 2]])\nX_times_Y = X * Y\nprint(X_times_Y)\n" }, { "alpha_fraction": 0.4409330189228058, "alphanum_fraction": 0.5522949695587158, "avg_line_length": 16.486841201782227, "blob_id": "a04caa83ee8008b660aae909fcbc371a1b088c51", "content_id": "01402ea09cdcb83e9584e2edd1ba1b3606595319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 76, "num_lines": 76, "path": "/p3_nn_keras/02_grad_descent.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import numpy as np\n\nx1 = [0.1]\nlabel = [0.25]\n\nw1 = [0.15]\nb1 = [0.40]\n\nw2 = [0.45]\nb2 = [0.65]\n\nz1 = np.dot(x1, w1) + b1\nprint(\"z1 is\", z1)\n\n\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))\n\n\na1 = sigmoid(z1)\nprint(\"a1 is\", a1)\n\nprint()\nprint(\"layer 2...\")\n\nz2 = np.dot(a1, w2) + b2\nprint(\"z2 is\", z2)\na2 = sigmoid(z2)\nprint(\"a2 is\", a2)\n\nprint()\nprint(\"ground truth label is...\", label)\n\nprint()\nprint(\"we need to compute errors and back propagate\")\n\nprint()\nlr = 0.4\nprint(\"learning rate is\", lr)\n\nprint()\ne2 = -(label[0] - a2[0]) * (a2[0] * (1 - a2[0])) * a1[0]\nprint(\"error for layer 2 is\", e2)\ne2lr = lr * e2\nw2 = w2 - e2lr\nprint(\"the new w2 is\", w2)\n\ne2b = -(label[0] - a2[0]) * (a2[0] * (1-a2[0]))\ne2blr = lr * e2b\nb2 = b2 - e2blr\nprint(\"the new b2 is\", b2)\n\n\nprint()\ne1 = -(label[0] - a2[0]) * (a2[0] * (1-a2[0])) * \\\n w2[0] * a1[0] * (1-a1[0]) * x1[0]\nprint(\"error for layer 1 is\", e1)\nw1 = w1 - (e1 * lr)\nprint(\"the new w1 is\", w1)\n\ne1b = -(label[0] - a2[0]) * (a2[0] * (1-a2[0])) * \\\n w2[0] * a1[0] * (1-a1[0]) * 1\nb1 = b1 - (e1b * lr)\nprint(\"the new b1 is\", b1)\n\n\nprint()\nprint(\"now we can forward propagate again\")\nz1 = np.dot(x1, w1) + b1\na1 = sigmoid(z1)\nz2 = np.dot(a1, w2) + b2\na2 = sigmoid(z2)\nprint(z1, a1, z2)\nprint(a2)\n\nprint(\"we have reduced our prediction term to be closer to the truth label\")\n" }, { "alpha_fraction": 0.6198770403862, "alphanum_fraction": 0.6280737519264221, "avg_line_length": 24.6842098236084, "blob_id": "46d0094fecad71cbfddd4fd0d21d9e078ec44b3f", "content_id": "9411c8eefcf85a44de99cbdf3c81e29ce143ed2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "no_license", "max_line_length": 69, "num_lines": 38, "path": "/p4_deep_nn_pytorch/08_02_nn_modulelist.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision.datasets as dsets\nfrom torchvision import transforms\nfrom torch import nn, optim\nfrom torch.utils.data import Dataset, DataLoader\n\nLayers = [2, 3, 4, 3]\n\nprint(list(zip(Layers, Layers[1:])))\n\nfor x, y in zip(Layers, Layers[1:]):\n print(x, y)\n\n\nclass Net(nn.Module):\n\n # Constructor\n def __init__(self, Layers):\n super(Net, self).__init__()\n self.hidden = nn.ModuleList()\n for input_size, output_size in zip(Layers, Layers[1:]):\n self.hidden.append(nn.Linear(input_size, output_size))\n\n # Prediction\n def forward(self, activation):\n L = len(self.hidden)\n for (l, linear_transform) in zip(range(L), self.hidden):\n if l < L - 1:\n activation = torch.relu(linear_transform(activation))\n else:\n activation = linear_transform(activation)\n return activation\n\n\nmymodel = Net(Layers)\n\nprint(mymodel)\nprint(list(mymodel.parameters()))\n" }, { "alpha_fraction": 0.6123287677764893, "alphanum_fraction": 0.6534246802330017, "avg_line_length": 19.22222137451172, "blob_id": "b3544cb9d9c3ab73469a1f490bbcd7448d9a8f1c", "content_id": "9b11f90cbe006f38a1b178053a754ddb2dc125d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 730, "license_type": "no_license", "max_line_length": 72, "num_lines": 36, "path": "/p4_deep_nn_pytorch/04_01_mult_lr.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass LR(nn.Module):\n def __init__(self, input_size, output_size):\n super(LR, self).__init__()\n self.linear = nn.Linear(input_size, output_size)\n\n def forward(self, x):\n return self.linear(x)\n\n\n\nmymodel = LR(input_size=4, output_size=1)\nprint(mymodel)\n\nprint(list(mymodel.parameters()))\n\n\nX = torch.tensor([[11.0, 12.0, 13, 14], [11, 12, 13, 14]])\ny = torch.tensor([[17.0], [17]])\nprint(mymodel(X))\n\nprint()\nprint(\"Mannual training...\")\noptimizer = torch.optim.SGD(mymodel.parameters(), lr=0.01, momentum=0.9)\ncriterion = nn.MSELoss()\n\nyhat = mymodel(X)\nloss = criterion(yhat, y)\nloss.backward()\noptimizer.step()\n\nprint(optimizer)\nprint(list(mymodel.parameters()))\n\n\n" }, { "alpha_fraction": 0.6299019455909729, "alphanum_fraction": 0.6372548937797546, "avg_line_length": 21.66666603088379, "blob_id": "06d0b974ebda85010234dc37ac234871fbc7448a", "content_id": "f74f29fe2f88d864f8baa5291031f388543d96bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 58, "num_lines": 36, "path": "/p4_deep_nn_pytorch/01_04_datasets_and_compose.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import Dataset\nimport torchvision\nfrom torchvision.transforms import transforms, Compose\nimport torchvision.datasets as dsets\n\n\nclass demoset(Dataset):\n def __init__(self, length=100, transform=None):\n self.len = length\n self.x = torch.ones(length, 2)\n self.y = torch.zeros(length, 1)\n self.transform = transform\n\n # getter\n def __getitem__(self, index):\n sample = self.x[index], self.y[index]\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n # len property\n\n def __len__(self):\n return self.len\n\n\nmy_demoset = demoset()\n\nprint(my_demoset)\nprint(my_demoset[3])\n\n\ndataset = dsets.MNIST(root='~', train=True, download=True,\n transform=transforms.ToTensor())\n" }, { "alpha_fraction": 0.6273062825202942, "alphanum_fraction": 0.6420664191246033, "avg_line_length": 17.066667556762695, "blob_id": "f79b9fcf21f7d65295da6ae0ef0dce29f9666943", "content_id": "a15e3052805318a57a6051c0e83d6aec083af8f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "no_license", "max_line_length": 51, "num_lines": 45, "path": "/p4_deep_nn_pytorch/05_01_linear_classifiers.py", "repo_name": "xr71/ibm-ai-engineering", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\n\nsig = nn.Sigmoid()\nprint(sig)\n\nz = torch.arange(-10, 10, 0.1).view(-1, 1)\nyhat = sig(z)\n# print(yhat)\n\n\nprint(z.size())\nprint(z.shape)\n\n\nprint()\nprint(\"Using nn Sequential...\")\nmodel = nn.Sequential(\n nn.Linear(1,1),\n nn.Sigmoid()\n )\n\nprint(model)\nprint(list(model.parameters()))\n\nprint()\nprint(\"Using nn Module class...\")\n\nclass logistic_regression(nn.Module):\n def __init__(self, in_size, out_size):\n super(logistic_regression, self).__init__()\n self.linear = nn.Linear(in_size, out_size)\n\n def forward(self, x):\n x = torch.sigmoid(self.linear(x))\n return x\n\nlr = logistic_regression(1, 1)\nprint(lr)\n\n\n# binary cross entropy loss\ncriterion = nn.BCELoss()\nprint(criterion)\n" } ]
18
ckmachens/spikes
https://github.com/ckmachens/spikes
1893a009dfe59810bc32e55ea8bb4726d182f415
5091269e00fc9761e9373d3a23a81bde52f22034
6583d948daa861d037c0a1474328a6fee32947c4
refs/heads/master
2021-01-11T19:18:23.198913
2021-01-08T16:42:46
2021-01-08T16:42:46
79,347,322
0
0
null
2017-01-18T14:23:28
2017-01-18T14:23:28
2017-01-18T14:33:48
null
[ { "alpha_fraction": 0.6940298676490784, "alphanum_fraction": 0.7481343150138855, "avg_line_length": 47.727272033691406, "blob_id": "17a55619d55eaeebd72493d86abd3b31587a71d4", "content_id": "0442900a443cf81ff71c9efe3e16c4f50d09004b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 536, "license_type": "no_license", "max_line_length": 141, "num_lines": 11, "path": "/README.md", "repo_name": "ckmachens/spikes", "src_encoding": "UTF-8", "text": "# spikes\n\nHere we provide code for various simulations of spike coding networks.\nMatlab or Python Code for the following papers is available:\n\n* OptimalCompensation: Barrett DGT, Deneve S, Machens CK (2016). Optimal compensation for neuron\nloss. eLife Dec 9;5 pii:e12454\n\n* UnsupervisedLearning: Brendel W, Bourdoukan R, Vertechi P, Machens CK, Deneve S (2020). Learning to represent signals spike by spike. PLOS CB, 16(3):e1007692.\n\n* ConvexOptimization: Mancoo A, Keemink SW, Machens CK (2020). Understanding spiking networks through convex optimization. NeurIPS, in press.\n" }, { "alpha_fraction": 0.7398794293403625, "alphanum_fraction": 0.7596899271011353, "avg_line_length": 33.06060791015625, "blob_id": "a8db8f0cdcb0b3f3b668fa3edbb57f5d7e11794a", "content_id": "7de6491b323f312322623e569357c9266ab8a53d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1161, "license_type": "no_license", "max_line_length": 70, "num_lines": 33, "path": "/UnsupervisedLearning/Figure5/readme.txt", "repo_name": "ckmachens/spikes", "src_encoding": "UTF-8", "text": "The program \"overnight\" runs the learning algorithm on the speech\r\ndataset, and plots fig 5. If you launch it, everything should run\r\nsmoothly, and it should take around 6 to 10 hours with a typical\r\ncomputer.\r\n\r\nThe speech data set (38MB) can be downloaded via\r\n\r\nhttps://www.dropbox.com/s/1i5po9dlt7m9n1k/speech.mat?dl=0\r\n\r\nIf you prefer to use the already learnt weights (Icurrent) and just\r\nretrain the network on the new stimulus, set the variable 'learnanew' \r\nto 1, then run fig5Anew. (takes 5 to 10 minutes)\r\n\r\nIf you prefer to use the already retrained weights, set 'learnanew'\r\nto 0, then run fig5Anew.\r\n\r\nIndividual programs:\r\n\r\nnetall runs the learning algorithm on a random speech segment.\r\n\r\nnetrun runs the network on a random speech segment without\r\nplasticity\r\n\r\nclampall clamps all the neurons one by one (i.e. they are prevented\r\nfrom firing) and measure their E and I currents.\r\n\r\nnetruncl runs the clamped network on any input stimulus.\r\n\r\nnetadapt re-train the network on arbitrary input signals. \r\n\r\nfig5Anew plots the same results as fig 5 (since a different\r\nrandom seed is used each time, this will never be exactly like fig\r\n5).\r\n\r\n\r\n" }, { "alpha_fraction": 0.5546391606330872, "alphanum_fraction": 0.5653608441352844, "avg_line_length": 30.506492614746094, "blob_id": "16649c532576171b5e4f645953506462ae954df3", "content_id": "e0ca76b28831f18153da48901a4bdae5f02e7686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2425, "license_type": "no_license", "max_line_length": 85, "num_lines": 77, "path": "/ConvexOptimization/plotting.py", "repo_name": "ckmachens/spikes", "src_encoding": "UTF-8", "text": "import holoviews as hv\nimport numpy as np\n\n# get colors\ncolors = hv.core.options.Cycle.default_cycles['default_colors']\nNcolors = len(colors)\n\ndef spike_plot(times, spikes, base_offset, offset):\n \"\"\"Plots a set of neurons' spikes, given a 2d array of 0's and 1's.\n\n Parameters\n ----------\n times : array\n array of times\n spikes : array\n 2D-array of 0's and 1's (1's being spikes),\n of size (n_cells, n_timepoints)\n base_offset : float\n y-axis offset of all spikes\n offset : float\n y-axis offset between each row of spikes\n\n Returns\n -------\n Holoviews Overlay\n An overlay with all the spikes shown\n \"\"\"\n # make spike plot animation\n out = hv.Overlay()\n for i in range(spikes.shape[0]):\n spiketimes = times[np.where(spikes[i, :]==1)[0]]\n if len(spiketimes)>0:\n opts = hv.opts.Scatter(color=colors[i%len(colors)])\n out *= hv.Scatter(\n zip(spiketimes, np.ones(len(spiketimes))*offset*i+base_offset),\n kdims='Time (s)',\n vdims='Neuron', group='spikes').opts(opts)\n else:\n opts = hv.opts.Scatter(color='w', alpha=0)\n out *= hv.Scatter([],\n kdims='Time (s)',\n vdims='Neuron', group='spikes').opts(opts)\n return out\n\ndef plot_spikes_single(times, spikes, color, alpha=1, s=10, offset=0, base_offset=0):\n \"\"\"Plots a single neuron's spikes.\n\n Parameters\n ----------\n times : array\n array of times\n spikes : array\n 2D-array of 0's and 1's (1's being spikes),\n of size (n_cells, n_timepoints)\n color : string\n the color of the plotted spikes\n alpha : float\n the alpha of the plotted spikes (Between 0 and 1)\n s : int\n size of the plotted spikes\n offset : float\n y-axis offset between each row of spikes\n base_offset : float\n y-axis offset of all spikes\n\n Returns\n -------\n Holoviews Overlay\n An overlay with all the spikes shown\n \"\"\"\n spiketimes = times[np.where(spikes==1)[0]]\n opts = hv.opts.Scatter(color=color, s=spikes, alpha=alpha)\n out = hv.Scatter(\n zip(spiketimes, np.ones(len(spiketimes))*offset+base_offset),\n kdims='Time (s)',\n vdims='Neuron', group='spikes2').opts(opts)\n return out" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 7, "blob_id": "cb4d21aa64fc78fcd710bc8972291b3385397fa6", "content_id": "a809f9fa927b536cefe418198174b42e17402c69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 56, "license_type": "no_license", "max_line_length": 10, "num_lines": 7, "path": "/ConvexOptimization/requirements.txt", "repo_name": "ckmachens/spikes", "src_encoding": "UTF-8", "text": "numpy\nmatplotlib\nnumba\nholoviews\njupyter\nscipy\ncolorcet\n" }, { "alpha_fraction": 0.778372585773468, "alphanum_fraction": 0.7880085706710815, "avg_line_length": 39.60869598388672, "blob_id": "f7c05a34a5905b6c879cf26a315f508625f19c87", "content_id": "37325a95e82ed0cf8fa8c0eecbd6119192d3e6db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 934, "license_type": "no_license", "max_line_length": 173, "num_lines": 23, "path": "/ConvexOptimization/README.md", "repo_name": "ckmachens/spikes", "src_encoding": "UTF-8", "text": "# Understanding spiking networks through convex optimization\n\nThis folder contains the Python source code to generate Figures 3 and 4 in the paper.\n\n## Requirements\n\nTo install requirements:\n\n```setup\npip install -r requirements.txt\n```\n\n\n## Using the code\nThe code is provided as Jupyter notebooks. Figure 3 can be reproduced with `Convex_SNNs_Figure3.ipynb`, and Figure 4 with `Convex_SNNs_Figure4.ipynb`.\n\n`plotting.py` contains some plotting functions for Figure 4, and `snn_cvx.py` contains the simulation functions used for both figures.\n\nTo just peruse the code and resulting outputs without the need for installing any requirements, see the HTML files `Convex_SNNs_Figure3.html` and `Convex_SNNs_Figure3.html`.\n\nFor a general tutorial on using jupyter notebooks see: https://www.datacamp.com/community/tutorials/tutorial-jupyter-notebook.\n\nFor tutorials on how to use the plotting toolbox we used see: https://holoviews.org/.\n" } ]
5
suzhaoen/Customer-specified-predictive-models
https://github.com/suzhaoen/Customer-specified-predictive-models
a8513b5a2d64c03b6995692419241474cb30fb8d
43386e23d092a17c37ea89d5d3bd2cde45d6da4b
6fb13c0a0cfedb78d43b4f66951760ae5f2f296a
refs/heads/master
2018-04-20T03:20:35.221593
2017-05-21T15:12:25
2017-05-21T15:12:25
90,692,038
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.648353636264801, "alphanum_fraction": 0.6532654166221619, "avg_line_length": 41.97600173950195, "blob_id": "e4b12d0eac555c239a516c3b57114dd489bec59f", "content_id": "a4ece2e60e2b66d438e6d7af7d0e8af58072c3f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5497, "license_type": "no_license", "max_line_length": 153, "num_lines": 125, "path": "/random_forest.py", "repo_name": "suzhaoen/Customer-specified-predictive-models", "src_encoding": "UTF-8", "text": "\"\"\"Weighted model based on random forest classification\"\"\"\r\n\r\n# Author: Zhaoen Su <[email protected]>\r\n\r\n# Not converted into a class yet.\r\n# However, with the same data processing function in the k-NN classification file\r\n# the functions below implement my modified random forest method\r\n# Read the report for more details\r\n\r\ndef user_predict(rfclassifier, Xtrain, Ytrain, xtest, weight, bin_horizontal, bin_vertical):\r\n \"\"\"predict the bin of the dropoff location for a single user\r\n Parameters\r\n ===========\r\n Xtrain : pd dataframe, shape (n_test_samples, n_features)\r\n The training feature data\r\n Ytrain : pd dataframe, shape (n_test_samples, n_targets+1)\r\n The training target data\r\n Xtest : pd dataframe, shape (n_test_samples, n_features)\r\n The test features data\r\n weight : float\r\n The weight parameter of the model\r\n lat_min : float\r\n The minium latitude of the bound\r\n lng_min : float\r\n The minium longitude of the bound\r\n width : float\r\n The latitude range of the bound\r\n length : float\r\n The longtitude range of the bound\r\n bin_horizontal : int\r\n The number of bins in horizontal direction\r\n bin_vertical : int\r\n The number of bins in vertical direction\r\n Returns\r\n ========\r\n pred : pd dataframe\r\n The predictive bin of the user\r\n The dataframe has a single column: pred_coor_val\r\n pred_coor_val is the number of the preditive bin\r\n probabilities : list of floats\r\n The probabilities distribtution of the discretized bins\r\n It is ready to be returned.\r\n \"\"\"\r\n #initialize the probabilities for dropoff at each bin as uniformly\r\n proba_outcomes_user = np.array([1/(bin_horizontal*bin_vertical)]*(int(bin_horizontal*bin_vertical)))\r\n\r\n if Xtrain.loc[Xtrain.uid == xtest.uid].shape[0] > 0:\r\n Xtrain_user = Xtrain.loc[Xtrain.uid == xtest.uid, ['begintrip_lat', 'begintrip_lng', 'is_weekend', 'time']]\r\n Ytrain_user = Ytrain.loc[Ytrain.uid == xtest.uid, ['coor_val']]\r\n rfc_user = rfclassifier.fit(Xtrain_user, Ytrain_user.coor_val.ravel())\r\n xtest_user = xtest.drop('uid', axis=0)\r\n xtest_user = xtest_user.reshape(1, -1)\r\n possible_outcomes_user = np.unique(np.array(Ytrain_user.coor_val))\r\n probabilities_user = rfc_user.predict_proba(xtest_user)\r\n \r\n for i in range(len(possible_outcomes_user)):\r\n proba_outcomes_user[int(possible_outcomes_user[i])] += probabilities_user[0][i]\r\n\r\n proba_outcomes_user = proba_outcomes_user**(weight)\r\n \r\n # sampling to speed up\r\n itrain, itest = train_test_split(range(Ytrain.shape[0]), train_size=0.2)\r\n mask=np.ones(Ytrain.shape[0], dtype='int')\r\n mask[itrain]=1\r\n mask[itest]=0\r\n mask = (mask==1)\r\n Xtrain, Ytrain = Xtrain[mask], Ytrain[mask]\r\n \r\n Xtrain_all = Xtrain.loc[:, ['begintrip_lat', 'begintrip_lng', 'is_weekend', 'time']]\r\n Ytrain_all = Ytrain.loc[:, ['coor_val']]\r\n rfc_all = rfclassifier.fit(Xtrain_all, Ytrain_all.coor_val.ravel())\r\n xtest_all = xtest.drop('uid', axis=0)\r\n xtest_all = xtest_all.reshape(1, -1)\r\n possible_outcomes_all = np.unique(np.array(Ytrain_all.coor_val))\r\n probabilities_all = rfc_all.predict_proba(xtest_all)\r\n \r\n #initialize the probabilities for dropoff at each bin as uniformly\r\n proba_outcomes_all = np.array([1/(bin_horizontal*bin_vertical)]*(int(bin_horizontal*bin_vertical)))\r\n \r\n for i in range(len(possible_outcomes_all)):\r\n proba_outcomes_all[int(possible_outcomes_all[i])] += probabilities_all[0][i]\r\n \r\n probabilities = np.multiply(proba_outcomes_user,proba_outcomes_all)\r\n most_likely_coor_val = np.argmax(probabilities)\r\n \r\n normalized the probablilities distribution\r\n probabilities /= sum(probabilities)\r\n return pd.Series({'pred_coor_val': most_likely_coor_val})\r\n\r\ndef rf_predict(Xtrain, Ytrain, Xtest, weight, lat_min, lng_min, width, length, bin_horizontal, bin_vertical):\r\n \"\"\"predict the dropoff location for test samples\r\n Implement the modified rf method\r\n Parameters\r\n ===========\r\n Xtrain : pd dataframe, shape (n_test_samples, n_features)\r\n The training feature data\r\n Ytrain : pd dataframe, shape (n_test_samples, n_targets+1)\r\n The training target data\r\n Xtest : pd dataframe, shape (n_test_samples, n_features)\r\n The test features data\r\n weight : float\r\n The weight parameter of the model\r\n lat_min : float\r\n The minium latitude of the bound\r\n lng_min : float\r\n The minium longitude of the bound\r\n width : float\r\n The latitude range of the bound\r\n length : float\r\n The longtitude range of the bound\r\n bin_horizontal : int\r\n The number of bins in horizontal direction\r\n bin_vertical : int\r\n The number of bins in vertical direction\r\n Returns\r\n ========\r\n pred : pd dataframe\r\n The prediction of the Xtest\r\n The dataframe includes pred_lat, pred_lng, pred_coor_val columns\r\n pred_coor_val is the number of the preditive bin\r\n \"\"\"\r\n rfclassifier = RandomForestClassifier(n_estimators=20, n_jobs=-1)\r\n pred = Xtest.apply(lambda row: user_predict(rfclassifier, Xtrain, Ytrain, row, weight, bin_horizontal, bin_vertical), axis = 1)\r\n pred = pred.join(pred.apply(lambda row: val2coordinates(row.pred_coor_val, lat_min, lng_min, width, length, bin_horizontal, bin_vertical), axis = 1))\r\n return pred\r\n" }, { "alpha_fraction": 0.5723928213119507, "alphanum_fraction": 0.5795032978057861, "avg_line_length": 36.8937873840332, "blob_id": "de09055e001736218534cba244ae411ab6f5923b", "content_id": "a41d0f323a76ab84db84fedaabe5fb4dd3c65d08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19408, "license_type": "no_license", "max_line_length": 215, "num_lines": 499, "path": "/api.py", "repo_name": "suzhaoen/Customer-specified-predictive-models", "src_encoding": "UTF-8", "text": "#!/bin/python3\r\n\"\"\"Weighted model based on k-nearest neighbors classification\"\"\"\r\n\r\n# Author: Zhaoen Su <[email protected]>\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math\r\nimport time\r\nimport datetime\r\nfrom sklearn.neighbors import KDTree\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n\r\ndef check_data_names(rides):\r\n \"\"\"Check if the dataframe has the right columns\r\n Parameters\r\n ===========\r\n rides : pd dataframe\r\n The data\r\n Returns\r\n ========\r\n Raise ValueError if the columns are \r\n 'begintrip_at', \r\n 'begintrip_lat',\r\n 'begintrip_lng',\r\n 'dropoff_lat',\r\n 'dropoff_lng',\r\n 'uid'\r\n Return None given correct names\r\n \"\"\"\r\n correct_names = ['begintrip_at',\r\n 'begintrip_lat',\r\n 'begintrip_lng',\r\n 'dropoff_lat',\r\n 'dropoff_lng',\r\n 'uid']\r\n names = rides.columns.tolist()\r\n if rides.shape[1] != len(correct_names) or correct_names.sort() != names.sort():\r\n raise ValueError(\"The columns should have and only have begintrip_at, begintrip_lat, begintrip_lng, dropoff_lat, dropoff_lng, uid, is_weekend, time.\")\r\n \r\n \r\ndef check_X_names(Xtest):\r\n \"\"\"Check if the dataframe of the feature data has the right columns\r\n Parameters\r\n ===========\r\n Xtest : pd dataframe\r\n The feature data\r\n Returns\r\n ========\r\n Raise ValueError if the columns are\r\n 'begintrip_at', \r\n 'begintrip_lat',\r\n 'begintrip_lng',\r\n 'uid'\r\n Return None given correct names\r\n \"\"\"\r\n correct_names = ['begintrip_lat',\r\n 'begintrip_lng',\r\n 'uid', \r\n 'is_weekend', \r\n 'time']\r\n names = Xtest.columns.tolist()\r\n if Xtest.shape[1] != len(correct_names) or correct_names.sort() != names.sort():\r\n raise ValueError(\"The columns should have and only have begintrip_lat, begintrip_lng, uid, is_weekend, time.\")\r\n \r\n\r\ndef check_Y_names(Ytest):\r\n \"\"\"Check if the dataframe of target data has the right columns\r\n Parameters\r\n ===========\r\n Ytest : pd dataframe\r\n The target data\r\n Returns\r\n ========\r\n Raise ValueError if the columns are\r\n 'dropoff_lat',\r\n 'dropoff_lng',\r\n 'uid'\r\n Return None given correct names\r\n \"\"\"\r\n correct_names = ['dropoff_lat',\r\n 'dropoff_lng',\r\n 'uid']\r\n names = Ytest.columns.tolist()\r\n if Ytest.shape[1] != len(correct_names) or correct_names.sort() != names.sort():\r\n raise ValueError(\"The columns should have and only have dropoff_lat, dropoff_lng and uid.\")\r\n \r\n\r\ndef geo_bound(rides):\r\n \"\"\"Get the latitude and longtitude bounds of the samples\r\n Parameters\r\n ===========\r\n rides : pd dataframe\r\n The data\r\n Returns\r\n ========\r\n (lat_min, lat_max, lng_min, lng_max): tuple of floats\r\n The minimum latitude, the maximum latitude, \r\n the minimum longitude, the maximum longitude in order.\r\n The four values are floats\r\n \"\"\"\r\n lat_min = min(rides.begintrip_lat.min(), rides.dropoff_lat.min())\r\n lat_max = max(rides.begintrip_lat.max(), rides.dropoff_lat.max())\r\n lng_min = min(rides.begintrip_lng.min(), rides.dropoff_lng.min())\r\n lng_max = max(rides.begintrip_lng.max(), rides.dropoff_lng.max())\r\n return (lat_min, lat_max, lng_min, lng_max)\r\n\r\ndef set_bin_number(boundary, bin_number):\r\n \"\"\"Get the numbers of discretized squared bins in the horizontal and vertical directions\r\n Parameters\r\n ===========\r\n boundary : tuple\r\n The boundary of the regime defined by (lat_min, lat_max, lng_min, lng_max)\r\n bin_number: int\r\n The number of discretized squared bins in the horizontal direction\r\n Returns : tuple of ints\r\n ========\r\n The numbers of discretized bins in the horizontal and vertical directions\r\n \"\"\"\r\n lat_min, lat_max, lng_min, lng_max = boundary\r\n horizontal_vertical_ratio = math.cos((lat_max+lat_min)/2/180*(math.pi))\r\n bin_horizontal = bin_number\r\n bin_vertical = bin_number * ((lng_max - lng_min) // (((lat_max-lat_min)*horizontal_vertical_ratio)))\r\n return (bin_horizontal, bin_vertical)\r\n\r\ndef date_time_parser(date_time):\r\n \"\"\"Parse a time-date stamp and get time and date quantitative features\r\n Parameters\r\n ===========\r\n data_time : string\r\n The time-data stamp. The 'begintrip_at' value.\r\n An example is 2015-02-28_20:27:09\r\n Returns\r\n ========\r\n is_weekend : int\r\n 0 if the date is a weekend\r\n 1 if the date is not a weekend\r\n time : float\r\n The time in hour. The minute and second values are converted into hour. \r\n Return None if date_time has the wrong format.\r\n \"\"\"\r\n date_time = date_time.split('_')\r\n if len(date_time) != 2: return\r\n \r\n date = [int(d) for d in date_time[0].split('-')]\r\n if len(date) != 3: return\r\n \r\n time = [int(t) for t in date_time[1].split(':')]\r\n if len(time) != 3: return\r\n \r\n date_object = datetime.date(date[0], date[1], date[2])\r\n # {1,2,...,7} represent {Monday,Tuesday ..., Sunday} in order\r\n day_of_week = date_object.isoweekday()\r\n day_of_week_cos = math.cos(day_of_week / 7 * 2 * math.pi)\r\n day_of_week_sin = math.sin(day_of_week / 7 * 2 * math.pi)\r\n is_weekend = 0\r\n if day_of_week in (6, 7):\r\n is_weekend = 1\r\n \r\n time = time[0] + time[1] / 60 + time[2] / 3600\r\n time_cos = math.cos(time / 24 * 2 * math.pi)\r\n time_sin = math.sin(time / 24 * 2 * math.pi)\r\n \r\n return (is_weekend, time)\r\n\r\n\r\ndef data_clearning(data_frame, boundary):\r\n \"\"\"Check if the latitude and longitude values of the data in the dataframe of are within the boundary \r\n Parameters\r\n ===========\r\n data_frame : pd dataframe\r\n The data\r\n boundary: tuples of floats\r\n The boundary defined by four lagitude and longtitude values: (lat_min, lat_max, lng_min, lng_max)\r\n Returns\r\n ========\r\n data_frame : pd dataframe\r\n The samples whose spatial features are outside the boundary is removed.\r\n \"\"\"\r\n lat_min, lat_max, lng_min, lng_max = boundary\r\n return data_frame[(data_frame.is_weekend != None)\\\r\n & (data_frame.begintrip_lat >= lat_min)\\\r\n & (data_frame.begintrip_lat <= lat_max)\\\r\n & (data_frame.begintrip_lng >= lng_min)\\\r\n & (data_frame.begintrip_lng <= lng_max)\\\r\n & (data_frame.dropoff_lat >= lat_min)\\\r\n & (data_frame.dropoff_lat <= lat_max)\\\r\n & (data_frame.dropoff_lng >= lng_min)\\\r\n & (data_frame.dropoff_lng <= lng_max)]\r\n\r\ndef coordinates2val(lat_min, lng_min, width, length, x, y, bin_horizontal, bin_vertical):\r\n \"\"\"Convert a (lat, lng) pair into a number\r\n The spatial regime is discretized into Parse a bin_horizontal * bin_vertical squared bins\r\n Starting from bottom-left corner, the val of the bin is 0;\r\n Go to the right, bin number is increased by 1 for each bin;\r\n After a row, go up to the next row and the val is increased by 1.\r\n Finally, the top-right corner bin has a value of bin_horizontal * bin_vertical - 1\r\n Parameters\r\n ===========\r\n lat_min : float\r\n The minium latitude of the bound\r\n lng_min : float\r\n The minium longitude of the bound\r\n width : float\r\n The latitude range of the bound\r\n length : float\r\n The longtitude range of the bound\r\n x : float\r\n The latitude of the sample\r\n y : float\r\n The longitude of the sample\r\n bin_horizontal : int\r\n The number of bins in horizontal direction\r\n bin_vertical : int\r\n The number of bins in vertical direction\r\n Returns\r\n ========\r\n val : int\r\n The val of the bin where the latitude and longitude of the sample locate\r\n \"\"\"\r\n i = (x-lat_min) // (width / bin_horizontal)\r\n j = (y-lng_min) // (length / bin_vertical)\r\n\r\n if i == bin_horizontal: i -= 1\r\n if j == bin_vertical: j -= 1\r\n return j*bin_horizontal + i\r\n\r\ndef val2coordinates(val, lat_min, lng_min, width, length, bin_horizontal, bin_vertical):\r\n \"\"\"Convert a the value of the bin back to the (lat, lng)\r\n The spatial regime is discretized into Parse a bin_horizontal * bin_vertical squared bins\r\n Starting from bottom-left corner, the val of the bin is 0;\r\n Go to the right, bin number is increased by 1 for each bin;\r\n After a row, go up to the next row and the val is increased by 1.\r\n Finally, the top-right corner bin has a value of bin_horizontal * bin_vertical - 1\r\n Parameters\r\n ===========\r\n val : int\r\n The val of the bin where the latitude and longitude of the sample locate\r\n lat_min : float\r\n The minium latitude of the bound\r\n lng_min : float\r\n The minium longitude of the bound\r\n width : float\r\n The latitude range of the bound\r\n length : float\r\n The longtitude range of the bound\r\n bin_horizontal : int\r\n The number of bins in horizontal direction\r\n bin_vertical : int\r\n The number of bins in vertical direction\r\n Returns\r\n ========\r\n (x, y) : tuple of floats\r\n x is the latitude of the sample\r\n y is the longitude of the sample\r\n \"\"\"\r\n x = lat_min + (0.5 + val % bin_horizontal) * (width / bin_horizontal)\r\n y = lng_min + (0.5 + val // bin_horizontal) * (length / bin_vertical)\r\n return (x, y)\r\n\r\ndef float_convert(strs):\r\n return float(strs)\r\n \r\n\r\ndef data_preprocess(rides, bin_number):\r\n \"\"\"Preprocess the data and return feature and target data sets.\r\n The following processes are performed:\r\n 0 insure correct data type\r\n 1 get the bound of the regime\r\n 2 set descretization bin numbers in horizontal and vertical directions\r\n 3 parse and add is_weekend and time features\r\n 4 data clearning\r\n 5 split DataFrame into feature and lable sets\r\n 6 normalize the features, except the 'uid' column\r\n Parameters\r\n ===========\r\n rides : pd dataframe\r\n The data\r\n bin_number: int\r\n The number of bins in horizontal direction\r\n Returns\r\n ========\r\n X : pd dataframe\r\n The feature data\r\n Y : pd dataframe\r\n The target data\r\n \"\"\"\r\n check_data_names(rides)\r\n rides['dropoff_lat'] = pd.to_numeric(rides['dropoff_lat'], errors='coerce')\r\n rides['dropoff_lng'] = pd.to_numeric(rides['dropoff_lng'], errors='coerce')\r\n rides['begintrip_lng'] = pd.to_numeric(rides['begintrip_lng'], errors='coerce')\r\n rides['begintrip_lng'] = pd.to_numeric(rides['begintrip_lng'], errors='coerce')\r\n boundary = geo_bound(rides)\r\n bin_numbers = set_bin_number(boundary, bin_number) \r\n \r\n date_time = rides.begintrip_at.apply(date_time_parser).apply(pd.Series)\r\n date_time.columns = ['is_weekend','time']\r\n rides['is_weekend'] = date_time.is_weekend\r\n rides['time'] = date_time.time \r\n \r\n rides = data_clearning(rides, boundary) \r\n X = rides.drop(['begintrip_at', 'dropoff_lat', 'dropoff_lng'], axis=1)\r\n Y = rides.loc[:,['dropoff_lat', 'dropoff_lng', 'uid']]\r\n \r\n cols_to_norm = ['begintrip_lat', 'begintrip_lng', 'is_weekend', 'time']\r\n X[cols_to_norm] = X[cols_to_norm].apply(lambda x: (x - x.mean()) / (x.std()))\r\n return (X, Y)\r\n \r\ndef split_data(X, Y, ratio):\r\n \"\"\"Split the feature and target data into two parts\r\n The order of them is reserved.\r\n Parameters\r\n ===========\r\n X : pd dataframe\r\n The feature data\r\n Y : pd dataframe\r\n The target data\r\n Returns\r\n ========\r\n Xtrain : pd dataframe\r\n The feature data for training\r\n Ytrain : pd dataframe\r\n The target data for training\r\n Xtest : pd dataframe\r\n The feature data for validation\r\n Ytest : pd dataframe\r\n The target data for validation\r\n \"\"\"\r\n itrain, itest = train_test_split(range(X.shape[0]), train_size=ratio)\r\n mask=np.ones(X.shape[0], dtype='int')\r\n mask[itrain]=1\r\n mask[itest]=0\r\n mask = (mask==1)\r\n\r\n Xtrain, Xtest, Ytrain, Ytest = X[mask], X[~mask], Y[mask], Y[~mask]\r\n n_samples = Xtrain.shape[0]\r\n n_features = Xtrain.shape[1]\r\n \r\n return (Xtrain, Ytrain, Xtest, Ytest)\r\n\r\nclass k_nearest_neighbors_modified():\r\n \"\"\"Classifier implementing the k-nearest neighbors weighted vote.\r\n Read more in the report\r\n Parameters\r\n ----------\r\n boundary : tuple of list\r\n The latitude and longitude boundary, defined by (lat_min, lat_max, lng_min, lng_max)\r\n k_neighbors : int, optional (default = 100)\r\n The number of neighbors considered to vote\r\n weight_k_ratio : float, optional (default = 1)\r\n The weight parameter of the importance of the data of this user.\r\n Read more in the report.\r\n bin_number : int, optional (defulat = 10)\r\n The number of bins in horizontal direction\r\n \"\"\"\r\n \r\n def __init__(self,boundary, k_neighbors = 100, weight_k_ratio = 1, bin_number = 10): \r\n self.k_neighbors = k_neighbors\r\n self.weight_k_ratio = weight_k_ratio\r\n self.bin_number = bin_number\r\n self.lat_min, self.lat_max, self.lng_min, self.lng_max = boundary\r\n self.width = self.lat_max - self.lat_min\r\n self.length = self.lng_max - self.lng_min\r\n self.bin_horizontal, self.bin_vertical = set_bin_number(boundary, self.bin_number)\r\n self.y_pred = None\r\n \r\n def fit(self, Xtrain, Ytrain):\r\n \"\"\"Fit the training data and add a 'coor_val' column to Ytrain\r\n 'coor_val' is the value of the bin where the latitude and longitude of the sample locate\r\n Parameters\r\n ----------\r\n Xtrain : pd dataframe, shape (n_samples, n_features)\r\n The traning feature data\r\n Ytrain : pd dataframe, shape (n_samples, n_targets + 1)\r\n The traning target data. The 'uid' column is included.\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n self.Xtrain = Xtrain\r\n self.Ytrain = Ytrain\r\n self.Ytrain['coor_val'] = self.Ytrain.apply(lambda row: coordinates2val(self.lat_min, self.lng_min, self.width, self.length, row.dropoff_lat, row.dropoff_lng, self.bin_horizontal, self.bin_vertical), axis=1)\r\n self.Ytrain = self.Ytrain.loc[:, ['uid', 'coor_val']]\r\n \r\n def user_predict_coor(self, row):\r\n \"\"\"Fit the training data and add a 'coor_val' column to Ytrain\r\n 'coor_val' is the value of the bin where the latitude and longitude of the sample locate\r\n Parameters\r\n ----------\r\n Xtrain : pd dataframe, shape (n_samples, n_features)\r\n The traning feature data\r\n Ytrain : pd dataframe, shape (n_samples, n_targets + 1)\r\n The traning target data. The 'uid' column is included.\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n Xtrain = self.Xtrain.drop('uid', axis=1)\r\n xtest = row.drop('uid', axis=0)\r\n xtest = xtest.reshape(1, -1)\r\n tree = KDTree(Xtrain)\r\n ind = tree.query(xtest, k = self.k_neighbors, return_distance = False)\r\n # ind elements are the indices of the nearest neighbors\r\n cnt_ind = [1]*len(ind[0])# ind is a list of list\r\n vote_cnt = {}\r\n for i in range(len(ind[0])): \r\n if self.Ytrain.loc[ind[0][i]]['uid'] == row['uid']:\r\n cnt_ind[i] += self.k_neighbors * self.weight_k_ratio\r\n label = self.Ytrain.loc[ind[0][i]]['coor_val']\r\n if label in vote_cnt:\r\n vote_cnt[label] += cnt_ind[i]\r\n else:\r\n vote_cnt[label] = cnt_ind[i]\r\n \r\n max_cnt = 0\r\n max_likely_val = 100\r\n for key in vote_cnt:\r\n if vote_cnt[key] > max_cnt:\r\n max_cnt = vote_cnt[key]\r\n max_likely_val = key\r\n\r\n lat, lng = val2coordinates(max_likely_val, self.lat_min, self.lng_min, self.width, self.length, self.bin_horizontal, self.bin_vertical)\r\n return pd.Series({'pred_lat': lat, 'pred_lng': lng, 'pred_coor_val':max_likely_val})\r\n \r\n def predict(self, Xtest):\r\n \"\"\"Use the model to predict the dropped off latitude and longitude\r\n Parameters\r\n ----------\r\n Xtest : pd dataframe, shape (n_test_samples, n_features)\r\n The test feature data\r\n Returns\r\n -------\r\n y_pred : pd dataframe, shape (n_test_samples, n_targets)\r\n The latitude and longtitude of the center of the predictive dropped off bin\r\n \"\"\"\r\n check_X_names(Xtest)\r\n self.Xtrain = self.Xtrain.reset_index(drop=True)\r\n self.Ytrain = self.Ytrain.reset_index(drop=True)\r\n self.y_pred = Xtest.apply(lambda row: self.user_predict_coor(row), axis = 1)\r\n return self.y_pred.loc[:, ['pred_lat', 'pred_lng']]\r\n \r\n def residue_measure(self, Ytest):\r\n \"\"\"Get the residue between the true latitude and longitude and the predictive values\r\n Parameters\r\n ----------\r\n Ytest : pd dataframe, shape (n_test_samples, n_targets)\r\n The test target data\r\n Returns\r\n -------\r\n residue : float\r\n The residue between predictive and true locations\r\n \"\"\"\r\n check_Y_names(Ytest)\r\n Ytest = Ytest.loc[:, ['dropoff_lat', 'dropoff_lng']]\r\n return np.sqrt(mean_squared_error(Ytest, self.y_pred.loc[:, ['pred_lat', 'pred_lng']]))\r\n \r\n def accuracy_measure(self, Ytest):\r\n \"\"\"Get the accuracy that the true latitude and longitude locate in the predictive bin\r\n Parameters\r\n ----------\r\n Ytest : pd dataframe, shape (n_test_samples, n_targets)\r\n The test target data\r\n Returns\r\n ===========\r\n accuracy : float\r\n The accuracy of predicting the correct bins\r\n \"\"\"\r\n check_Y_names(Ytest)\r\n Ytest['coor_val'] = Ytest.apply(lambda row: coordinates2val(self.lat_min, self.lng_min, self.width, self.length, row.dropoff_lat, row.dropoff_lng, self.bin_horizontal, self.bin_vertical), axis=1)\r\n return np.mean(Ytest.coor_val == self.y_pred.pred_coor_val)\r\n \r\n\r\nif __name__ == \"__main__\":\r\n bin_number = 10\r\n k = 100\r\n weight = 10\r\n \r\n rides = pd.read_csv('data/hw1_train.csv')\r\n Xtrain, Ytrain = data_preprocess(rides, bin_number)\r\n boundary = geo_bound(rides)\r\n rides_test = pd.read_csv('data/hw1_test.csv')\r\n Xtest, Ytest = data_preprocess(rides_test, bin_number)\r\n \r\n # split data for validation, if necessary\r\n #Xtrain, Ytrain, Xtest, Ytest = split_data(X, Y, 0.4)\r\n \r\n knnm = k_nearest_neighbors_modified(boundary, k, weight, bin_number)\r\n knnm.fit(Xtrain, Ytrain)\r\n \r\n # use partial number for test\r\n test_num = 200\r\n Xtest = Xtest[0:test_num]\r\n Ytest = Ytest[0:test_num]\r\n yf = knnm.predict(Xtest)\r\n \r\n print(knnm.residue_measure(Ytest))\r\n print(knnm.accuracy_measure(Ytest))\r\n" }, { "alpha_fraction": 0.8006725907325745, "alphanum_fraction": 0.804646909236908, "avg_line_length": 147.59091186523438, "blob_id": "03028d027a651fd0ad543e895527bdd0c871cbf4", "content_id": "1209de987deb427955883bdf4ecdfd4c6e4af35e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3281, "license_type": "no_license", "max_line_length": 727, "num_lines": 22, "path": "/README.md", "repo_name": "suzhaoen/Customer-specified-predictive-models", "src_encoding": "UTF-8", "text": "# Customer-specified-predictive-models\n\n### Abstract\nBuilt models to predict an individual user’s multi-output choice (location). It integrated the few (~8 records for each user on average) but informative records of the given user and abundant but less relevant records of other users based on modified random forest and k-NN classifiers, and reached a accuracy that is 2 times higher than that by standard algorithms. \n\n### Introduction\nDevelop approaches to predict where a Uber or Lyft user would ask to be dropped off, given the user's ID, the time, date, and location of the pickup, based on the trip records of all users in a same city. \n\nThe problem has two challenges. \n\n•\tThe predictive model is user specific. The trip records of the given user is the most informative. However, the average amount of trip records per user is just 8, which is way insufficient. \n•\tThe location to be predicted consists of two values, latitude and longitude. Thus the problem is a non-decomposable multi-output machine learning problem. If the dependence between latitude and longitude is not reserved, the model would loss performance. \n\n### Methods\nWe first preprocess the data by cleaning the data with Python libraries (pandas and numpy) and visualizing the data with matplot and seabone. We then explore various methods to engineer the features, time, date, and locations. The final choices are made based on the performance when the full models are implemented and tested. It is found that, (1) a mapping to a 2D circle is helpful to engineer the time feature. (2) Using a binary value to indicate where a date is a weekday is sufficient. (3) the locations should be inside boundaries such that the model predicts well. After the feature engineering, I further process the data by zeroing the center, normalizing or stretching each features according to their importance.\n\n•\tTo overcome the problem that each user has insufficient records, I make the utmost of the data by building weighted models that integrates the data of the given user that is less but informative and the data of all other users that is large but less relevant. \n\n•\tTo solve the multi-output machine learning problem while maintaining the important dependence between latitude and longitude of locations, I adapt K-nearest neighbors and random forest algorithms that naturally support multi-output problems. \nTo fulfill the weighted model, I modify and develop classification and regression algorithms based on K-nearest neighbors and random forest. For example, I use the KD-tree data structure in the scikit-learning library for the k-NN algorithm such that the time complexity is reduced from O(N*N) to O(NlgN). \n\nTo evaluate and compare the methods, two measures, accuracy and residue, are used. After hyperparameter optimization and testing, I find that accuracy is a more informative measure than residue. Closely related, the classification methods outperform the regression methods. Both the modified classification methods after optimization yield accuracies of 1/5 of predicting the true discretized bin among 300 bins of the whole regime. With visualization of the results, it is found that they are also 2 times better than those obtained with standard machine learning algorithms. \n\n" } ]
3
wunderkid/oreilly-dl
https://github.com/wunderkid/oreilly-dl
157d8d92b571f1818b24e581887fbeb54ee02346
c936ded4598065d87a89ba37d91f3ec8f0247ae9
4e29fc8b389f4642f98abb41fd582460cbc23b22
refs/heads/master
2022-01-27T08:03:11.591653
2019-07-26T16:44:39
2019-07-26T16:44:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6456310749053955, "alphanum_fraction": 0.6504854559898376, "avg_line_length": 19.600000381469727, "blob_id": "026641b84835b8140b11db9144095ef1889bdf0d", "content_id": "eab03d7cd0b223509d4f4086fb126275f36b0c10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 48, "num_lines": 10, "path": "/epub_bundle.py", "repo_name": "wunderkid/oreilly-dl", "src_encoding": "UTF-8", "text": "import os\nfrom shutil import make_archive\nimport sys\n\nfor line in sys.stdin:\n a = line.split(\"\\n\")[0]\n break\nmake_archive(a,\"zip\")\nos.rename(a + \".zip\", os.path.join(a + \".epub\"))\nprint(\"generated!\")\n" }, { "alpha_fraction": 0.6311525106430054, "alphanum_fraction": 0.6400648355484009, "avg_line_length": 39.46721267700195, "blob_id": "80f8418a56066b87aaa490bf9c5638202344ae7e", "content_id": "bf7f9b410b5d56ed34331d42b7c42f80cbdfa1e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4942, "license_type": "no_license", "max_line_length": 354, "num_lines": 122, "path": "/README.md", "repo_name": "wunderkid/oreilly-dl", "src_encoding": "UTF-8", "text": "# OReilly-DL\nThis is a fork from lorenzodifuccia's [repo](https://github.com/lorenzodifuccia/safaribooks).\n\nDownload and generate *EPUB* of your favourite book from [*O'Reilly Books Online*](https://learning.oreilly.com/). \nI'm not responsible for the use of this programme, which is for *personal* and *educational* purposes only. \nNote that I've merged many PRs that work like magic but also performed some tweaks without thoughtful considerations, thus it may present buggy issues and few pesticide could mitigate.\n\n## Overview:\n * [EPUB Format](#epub-format)\n - META-INT\n - mimetype\n - content.opf\n * [Usage](#usage)\n - [Programme options](#programme-options)\n## EPUB FORMAT:\n\nThe EPUB® format provides a means of representing, packaging and encoding structured and semantically enhanced Web content — including HTML, CSS, SVG and other resources — for distribution in a single-file container.\n * META-INF _(container.xml)_\n ```\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">\n <rootfiles>\n <rootfile full-path=\"content.opf\" media-type=\"application/oebps-package+xml\"/>\n </rootfiles>\n </container>\n ```\n * mimetype\n ```\n application/epub+zip\n ```\n * content.opf\n ```\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <package xmlns=\"http://www.idpf.org/2007/opf\"\n xmlns:redirect=\"http://xml.apache.org/xalan/redirect\"\n version=\"3.1\"\n unique-identifier=\"bookid\">\n <metadata xmlns:opf=\"http://www.idpf.org/2007/opf\"\n xmlns:dc=\"http://purl.org/dc/elements/1.1/\">\n <dc:identifier id=\"bookid\">XXXXXXXXXXXXX</dc:identifier>\n ...\n <meta name=\"cover\" content=\"cover-image\"/>\n ...\n </metadata>\n <manifest>\n <item id=\"css\" href=\"../css\" media-type=\"text/css\"/>\n <item id=\"cover\" href=\"cover.xhtml\" media-type=\"application/xhtml+xml\"/>\n <item id=\"cover-image\"\n href=\"images/cover.jpg\"\n media-type=\"image/jpeg\"\n properties=\"cover-image\"/>\n ...\n </manifest>\n <spine toc=\"ncx\">\n <itemref idref=\"f_0077\" />\n ...\n </spine>\n <guide>\n <reference type=\"cover\" title=\"Cover\" href=\"cover.xhtml\"/>\n </guide>\n </package>\n ```\nAnd EPUB is basically a compressed combination of all files above plus other meta-data collections, however here comes another problem where python's `shutil.make_archive(zip)` works in apple's Books whilst unix's zip system doesn't. <s>(There has to be some connections between there two, very peculiar yet I'm curious to know, gotta dig it deeper).</s>\n\nFor more info on EPUB, please check [here](http://www.idpf.org/epub3/latest/packages)\n\n## Usage:\n```\n$ git clone https://github.com/lorenzodifuccia/safaribooks.git or\n$ git clone https://github.com/leignshanie/oreilly-dl.git\nCloning into 'oreilly-dl'...\n\n$ cd oreilly-dl\n$ pip3 install -r requirements.txt\n\n```\n#### Programme options:\n```\n$ python3 oreilly-dl.py --help\nusage: oreilly-dl.py [--cred <EMAIL:PASS>] [--no-cookies] [--no-kindle]\n [--preserve-log] [--help]\n <BOOK ID>\n\nDownload and generate EPUB of your favourite books from O'Reilly Books.\n\npositional arguments:\n <BOOK ID> Book digits ID that you want to download.\n You can find it in the URL (X-es):\n `https://learning.oreilly.com/library/view/book-\n name/XXXXXXXXXXXXX/`\n\noptional arguments:\n --cred <EMAIL:PASS> Credentials used to perform the auth login on Safari\n Books Online.\n Es. ` --cred \"[email protected]:password01\" `.\n --no-cookies Prevent your session data to be saved into\n `cookies.json` file.\n --no-kindle Remove some CSS rules that block overflow on `table`\n and `pre` elements. Use this option if you're not going\n to export the EPUB to E-Readers like Amazon Kindle.\n --preserve-log Leave the `info_XXXXXXXXXXXXX.log` file even if there\n isn't any error.\n --help Show this help message.\n```\n\nFor the first time users, you'll have to specify your O'Reilly Books Online account credentials, which is in the format of \n```\n$ python3 oreilly-dl.py --cred \"[email protected]:password\" XXXXXXXXXXXXX\n```\n * Xs indicate the 13-digit ISBN number, which is available in the Book url, e.g.\n `https://learning.oreilly.com/library/view/how_to_build_a_harem/6666666666666/`\n *Notice* Sometimes ISBN in the book description page doesn't correspond to the url, so always trust the latter.\n * `email:password` with your own.\n *Notice* Use a combination of alphanumerical characters.\n\nLater, you're free to omit the --cred inputs using:\n```\n$ python3 oreilly-dl.py XXXXXXXXXXXXX\n```\n--- \n\n## Cheers!\n" } ]
2
DependableSystemsLab/LLFI
https://github.com/DependableSystemsLab/LLFI
3af987ce13c2d5de53e488ee31219306a28f0d81
8cc8a18b2ec9dcc25913b2ceeb8f8936c072e4d3
3b45200e66f4a92f4eb7d28ea3ee68ba044305af
refs/heads/master
2022-07-25T19:39:15.473938
2022-07-07T01:13:16
2022-07-07T01:13:16
7,228,380
58
43
NOASSERTION
2012-12-18T18:39:45
2022-11-01T13:38:02
2022-07-07T01:13:16
Java
[ { "alpha_fraction": 0.5982996821403503, "alphanum_fraction": 0.6206163763999939, "avg_line_length": 20.88372039794922, "blob_id": "dffbb72e6d599e32df6870c9525dff13f68ec78e", "content_id": "61aa13628dc78f26cd2c33d9bdec867a26e452db", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 941, "license_type": "permissive", "max_line_length": 75, "num_lines": 43, "path": "/test_suite/PROGRAMS/deadlock/deadlock.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <pthread.h>\n\n\nvoid * PrintHello(void*ptr);\nvoid * PrintBye(void*ptr);\npthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;\n\n\nint main()\n{ \n pthread_t thread1, thread2;\n const char *message1= \"Thread1\";\n const char *message2= \"Thread2\";\n int retval1 = pthread_create(&thread1,NULL, PrintHello,(void*) message1);\n \n int retval2 = pthread_create(&thread2,NULL, PrintBye,(void*) message2) ;\n \n printf(\"Thread 1 returns: %d\\n\",retval1);\n printf(\"Thread 2 returns: %d\\n\",retval2);\n pthread_mutex_lock( &mutex1 ); \n pthread_join(thread1,NULL); \n \n pthread_join(thread2,NULL);\n exit(0);\n}\n\n\nvoid * PrintHello(void*ptr)\n{\n char*message;\n message= (char*) ptr;\n printf(\"Hello World! I am , %s\\n\",message);\n}\n\nvoid * PrintBye(void*ptr)\n{\n char*message;\n message= (char*) ptr;\n printf(\"Bye World! I am , %s\\n\",message);\n\n}\n" }, { "alpha_fraction": 0.6335078477859497, "alphanum_fraction": 0.636416494846344, "avg_line_length": 28.152542114257812, "blob_id": "f6557b49e6d0ee00985352ccf1e72a77571ed802", "content_id": "fa8dd159328041e50f1b47101b4068517b82c671", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1719, "license_type": "permissive", "max_line_length": 137, "num_lines": 59, "path": "/web-app/views/src/js/components/mainWindow/mainPannel.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar Reflux = require(\"reflux\");\nvar Tutorial = require(\"./mainPannel/tutorial\");\nvar fileUploadStore = require(\"./../../stores/fileUploadStore\");\nvar fileUploadActions = require(\"./../../actions/fileUploadActions\");\n\nvar MainPannel = React.createClass({\n\tmixins: [Reflux.connect(fileUploadStore,\"fileList\")],\n\tgetInitialState: function() {\n\t\treturn {\n\t\t\tfileList: [],\n\t\t\tfileDisplayIndex: 0\n\t\t};\n\t},\n\trender: function() {\n\t\tvar fileContent = this.state.fileList[this.state.fileDisplayIndex] ? this.state.fileList[this.state.fileDisplayIndex].fileContent : \"\";\n\t\tvar fileList = this.state.fileList.map(function(file, index) {\n\t\t\tif (index == 0) {\n\t\t\t\treturn (\n\t\t\t\t\t<li className=\"active\" key={index}><a onClick={this.onFileChange.bind(this, index)}>{file.fileName}</a></li>\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\treturn (\n\t\t\t\t\t<li key={index}><a onClick={this.onFileChange.bind(this, index)}>{file.fileName}</a></li>\n\t\t\t\t);\n\t\t\t}\n\t\t}.bind(this));\n\n\t\treturn (\n\t\t\t<div className=\"tabbable tabs-left mainPannelContainer\">\n\t\t\t\t<ul className=\"nav nav-tabs left-pannel\">\n\t\t\t\t\t<li className=\"fileListLabel\">List Of Files</li>\n\t\t\t\t\t<li className=\"divider\"></li>\n\t\t\t\t\t{fileList}\n\t\t\t\t</ul>\n\t\t\t\t<div className=\"tab-content file-context\">\n\t\t\t\t\t{fileContent.split(/\\r\\n?|\\n|\\u21B5/g).map(function(item, index) {\n\t\t\t\t\t\treturn (\n\t\t\t\t\t\t\t<span key={index}>\n\t\t\t\t\t\t\t\t{item}\n\t\t\t\t\t\t\t\t<br/>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t);\n\t\t\t\t\t})}\n\t\t\t\t</div>\n\t\t\t\t<Tutorial></Tutorial>\n\t\t\t</div>\n\t\t);\n\t},\n\tonFileChange: function(index, event) {\n\t\tthis.setState({\n\t\t\tfileDisplayIndex: index\n\t\t});\n\t\t$(event.currentTarget).parent().siblings().removeClass(\"active\");\n\t\t$(event.currentTarget).parent().addClass(\"active\");\n\t}\n});\n\nmodule.exports = MainPannel;" }, { "alpha_fraction": 0.7932692170143127, "alphanum_fraction": 0.7932692170143127, "avg_line_length": 31.076923370361328, "blob_id": "099aef04bdbde90ea2014c79fee34592e187a55f", "content_id": "3a03c21be263c7c81fdbb3d548e486042350cf87", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 416, "license_type": "permissive", "max_line_length": 90, "num_lines": 13, "path": "/web-app/views/src/js/stores/selectedTraceRunNumberStore.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\nvar selectedTraceRunNumberActions = require(\"./../actions/selectedTraceRunNumberActions\");\nvar selectedRunNumber = [];\nvar selectedTraceRunNumberStore = Reflux.createStore({\n\tlistenables: [selectedTraceRunNumberActions],\n\n\tonUpdateSelectedRunNumber: function(data) {\n\t\tselectedRunNumber = data;\n\t\tthis.trigger(selectedRunNumber);\n\t},\n});\n\nmodule.exports = selectedTraceRunNumberStore;" }, { "alpha_fraction": 0.7137404680252075, "alphanum_fraction": 0.7213740348815918, "avg_line_length": 19.076923370361328, "blob_id": "ad13677290191d5e097a8fc83549ee2451328250", "content_id": "2fbb9286af702bf7bb588bb254199d5277dc084c", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 262, "license_type": "permissive", "max_line_length": 37, "num_lines": 13, "path": "/config/CMakeLists.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\n\ninclude(../config/copy_utils.cmake)\n\nproject(config)\n\ncopy(__init__.py __init__.py)\ncopy(llvm_paths.py llvm_paths.py)\ncopy(llvm_paths.make llvm_paths.make)\nif(NOT NO_GUI)\n\tcopy(java_paths.py java_paths.py)\nendif()\ngenCopy()\n\n" }, { "alpha_fraction": 0.7507987022399902, "alphanum_fraction": 0.7523961663246155, "avg_line_length": 20.586206436157227, "blob_id": "2e5d195d473fcab39d57c32f6dd2160e464e33f0", "content_id": "831d047fbdb4b4f277b7ab6cf78666d6231c9095", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 626, "license_type": "permissive", "max_line_length": 84, "num_lines": 29, "path": "/llvm_passes/core/ProfilingPass.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "//This pass is run after the transform pass for inserting hooks for fault injection \n#ifndef PROFILING_PASS_H\n#define PROFILING_PASS_H\n\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/Pass.h\"\n#include \"llvm/IR/Module.h\"\n\n#include <iostream>\n\nusing namespace llvm;\nnamespace llfi {\nclass ProfilingPass: public ModulePass {\n public:\n ProfilingPass() : ModulePass(ID) {}\n\tvirtual bool runOnModule(Module &M);\n\tstatic char ID;\n\n private: \n void addEndProfilingFuncCall(Module &M);\n private:\n Constant *getLLFILibProfilingFunc(Module &M);\n Constant *getLLFILibEndProfilingFunc(Module &M);\n};\n\nchar ProfilingPass::ID=0;\n}\n\n#endif\n" }, { "alpha_fraction": 0.5770171284675598, "alphanum_fraction": 0.5770171284675598, "avg_line_length": 14.148148536682129, "blob_id": "e8b5b443cd3bc75fc99374e120b25282ce619ff7", "content_id": "42bae9c690baf6aefd6f3ed7b81fa53d556d2d69", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 409, "license_type": "permissive", "max_line_length": 40, "num_lines": 27, "path": "/test_suite/PROGRAMS/mcf/Makefile", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "## target\nTARGET=MCF\n\n## llvm root and clang\ninclude ../Makefile.common\n\nSRC_FILES = $(wildcard *.c)\nOBJECTS = $(SRC_FILES:.c=.bc)\nLINKED = $(TARGET).bc\nLL_FILE = $(TARGET).ll\n\n## other choice\ndefault: all\n\nall: $(LL_FILE)\n\n$(LL_FILE): $(LINKED)\n\t$(LLVMDIS) $(LINKED) -o $@\n\n$(LINKED): $(OBJECTS)\n\t$(LLVMLD) $(OBJECTS) -o $@\n\n%.bc:%.c\n\t$(LLVMGCC) $(COMPILE_FLAGS) $< -c -o $@\n\nclean:\n\t$(RM) -f *.bc *.ll *.bc\n" }, { "alpha_fraction": 0.716312050819397, "alphanum_fraction": 0.716312050819397, "avg_line_length": 19.285715103149414, "blob_id": "c1e5a65613d775ce744f3382719a8e083eab945c", "content_id": "9e1ff677133e42367b888f891372f334ccd925ac", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 141, "license_type": "permissive", "max_line_length": 44, "num_lines": 7, "path": "/web-app/views/src/js/actions/errorLogActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar errorLogActions = Reflux.createActions([\n 'updateErrorLog'\n ]);\n\nmodule.exports = errorLogActions;" }, { "alpha_fraction": 0.7056410312652588, "alphanum_fraction": 0.7061538696289062, "avg_line_length": 28.545454025268555, "blob_id": "088136a2aaa6707066a4951706c231f830cda1d6", "content_id": "c7b90f80b34ac108c2218fbc6b3c45cbd8ae08f8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1950, "license_type": "permissive", "max_line_length": 78, "num_lines": 66, "path": "/llvm_passes/core/FIInstSelector.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FI_INST_SELECTOR_H\n#define FI_INST_SELECTOR_H\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Instruction.h\"\n\n#include <set>\n\nusing namespace llvm;\n\nnamespace llfi {\nclass FIInstSelector {\n public:\n FIInstSelector(): includebackwardtrace(false), includeforwardtrace(false) {}\n\n public:\n void getFIInsts(Module &M, std::set<Instruction*> *fiinsts);\n virtual void getCompileTimeInfo(std::map<std::string, std::string>& info);\n\n virtual std::string getInstSelectorClass(){\n return std::string(\"Unknown\");\n }\n\n public:\n inline void setIncludeBackwardTrace(bool includebt) {\n includebackwardtrace = includebt;\n }\n inline void setIncludeForwardTrace(bool includeft) {\n includeforwardtrace = includeft;\n }\n \n private:\n // get the initial fault injection instruction without backtrace or forward\n // trace, selection from source code may need to rewrite this function\n virtual void getInitFIInsts(Module &M, std::set<Instruction*> *fiinsts);\n\n virtual bool isInstFITarget(Instruction* inst) = 0;\n\n protected:\n // only get the \"instructions\" that are the backward/forward trace of inst\n void getBackwardTraceofInsts(const std::set<Instruction*> *fiinsts,\n std::set<Instruction*> *bs);\n void getForwardTraceofInsts(const std::set<Instruction*> *fiinsts,\n std::set<Instruction*> *fs);\n void getBackwardTraceofInst(Instruction *inst, \n std::set<Instruction*> *bs);\n void getForwardTraceofInst(Instruction *inst,\n std::set<Instruction*> *fs);\n protected:\n bool includebackwardtrace;\n bool includeforwardtrace;\n}; \n\nclass SoftwareFIInstSelector: public FIInstSelector{\n virtual std::string getInstSelectorClass(){\n return std::string(\"SoftwareFault\");\n }\n};\n\nclass HardwareFIInstSelector: public FIInstSelector{\n virtual std::string getInstSelectorClass(){\n return std::string(\"HardwareFault\");\n }\n};\n}\n\n#endif\n" }, { "alpha_fraction": 0.6805506348609924, "alphanum_fraction": 0.6819626092910767, "avg_line_length": 40.421051025390625, "blob_id": "ce1bcf06587d61b58bb3d03a74015174eb42aedc", "content_id": "68f424d81edbc8909840f2b361ab63749bb796db", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 14165, "license_type": "permissive", "max_line_length": 197, "num_lines": 342, "path": "/web-app/views/src/js/components/mainWindow/instrumentModal.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require('react');\nvar ReactDOM = require('react-dom');\nvar Reflux = require(\"reflux\");\nvar targetFileNameStore = require(\"./../../stores/targetFileNameStore\");\nvar Modal = require('react-bootstrap').Modal;\nvar FormGroup = require('react-bootstrap').FormGroup;\nvar FormControl = require('react-bootstrap').FormControl;\nvar Checkbox = require('react-bootstrap').Checkbox;\nvar Button = require('react-bootstrap').Button;\nvar ControlLabel = require('react-bootstrap').ControlLabel;\nvar FilteredMultiSelect = require('react-filtered-multiselect');\nvar injectionModeActions = require(\"./../../actions/injectionModeActions\");\nvar injectionTypeConfig = require(\"./../../config/config\").injectionType;\n\nvar softwareInjectionTypeOptions = injectionTypeConfig.softwareInjectionTypeOptions;\nvar hardwareInjectionTypeOptions = injectionTypeConfig.hardwareInjectionTypeOptions;\nvar fileUploadActions = require(\"./../../actions/fileUploadActions\");\nvar consoleLogActions = require(\"./../../actions/consoleLogActions\");\nvar errorLogActions = require(\"./../../actions/errorLogActions\");\n\n\nArray.prototype.diff = function(a) {\n\treturn this.filter(function(i) {return a.indexOf(i) < 0;});\n};\n\nvar InstrumentModal = React.createClass({\n\tmixins: [Reflux.connect(targetFileNameStore,\"fileName\")],\n\tgetInitialState() {\n\t\treturn {\n\t\t\tshow: false,\n\t\t\tfileName: '',\n\t\t\tselectedInjectionType: [],\n\t\t\tinjectionMode: \"hardware\",\n\t\t\tinjectionOptions: hardwareInjectionTypeOptions,\n\t\t\ttraceMode: \"fullTrace\"\n\t\t};\n\t},\n\n\tcomponentDidMount () {\n\t\t// Initial status of the UI elements\n\t},\n\tclose() {\n\t\tthis.setState({ show: false });\n\t},\n\n\topen() {\n\t\tvar data = {};\n\t\tdata.fileName = this.state.fileName;\n\t\t$.ajax({\n\t\t\turl: '/preInstrument',\n\t\t\ttype: 'POST',\n\t\t\tdata: JSON.stringify(data),\n\t\t\tprocessData: false,\n\t\t\tcontentType: 'application/json',\n\t\t\tsuccess: function(data){\n\t\t\t\tvar softwareTypes = [];\n\t\t\t\tfor (var i = 0; i< data.length; i++) {\n\t\t\t\t\tvar type = {value: data[i], text: data[i]};\n\t\t\t\t\tsoftwareTypes.push(type);\n\t\t\t\t}\n\t\t\t\tsoftwareInjectionTypeOptions = softwareTypes;\n\t\t\t},\n\t\t\terror: function (error) {\n\t\t\t\tif (error.responseJSON.error) {\n\t\t\t\t\terrorLogActions.updateErrorLog(error.responseJSON.error.cmd);\n\t\t\t\t}\n\t\t\t\tconsole.log(error);\n\t\t\t\twindow.alert(\"An error has occured in preInstrument injection type auto scan process, please refresh the page.\");\n\t\t\t}\n\t\t});\n\t\tthis.setState({ show: true });\n\t},\n\n\trender: function() {\n\t\tvar selectedInjectionType = this.state.selectedInjectionType;\n\t\tvar unselectedInjectionType = this.state.injectionOptions.diff(selectedInjectionType);\n\t\treturn (\n\t\t\t<div class=\"modal-container\" id=\"InstrumentModalID\" onClick={this.open}>\n\t\t\t\t<Modal {...this.props} bsSize=\"large\" aria-labelledby=\"contained-modal-title-lg\" onClick={this.open} show={this.state.show} onHide={this.close}>\n\t\t\t\t\t<Modal.Header closeButton>\n\t\t\t\t\t\t<Modal.Title id=\"contained-modal-title-lg\">Instrument</Modal.Title>\n\t\t\t\t\t</Modal.Header>\n\t\t\t\t\t<Modal.Body>\n\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t<p class=\"boldFont leftFloat\">LLFI Configuration Parameters</p>\n\t\t\t\t\t\t\t<button class=\"rightFloat\">Select Profile...</button>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t<p class=\"boldFont leftFloat\">Injection Type : </p>\n\t\t\t\t\t\t\t<div class=\"radio leftFloat flexDisplay\" data-toggle=\"buttons\">\n\t\t\t\t\t\t\t\t<label class=\"spaceLeft\"><input type=\"radio\" name=\"injectionMode\" value=\"hardware\" defaultChecked={true} onChange={this.injectionModeHandler}/>Hardware Injection</label>\n\t\t\t\t\t\t\t\t<label class=\"spaceLeft\"><input type=\"radio\" name=\"injectionMode\" value=\"software\" onChange={this.injectionModeHandler}/>Software Injection</label>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<button class=\"rightFloat\" disabled>Reset Profile</button>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div><p class=\"boldFont\">Compile Option: </p></div>\n\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t<p class=\"spaceLeft leftFloat\">Instruction Selection Method </p>\n\t\t\t\t\t\t\t<div id=\"instructionSelectionContainer\" class=\"radio radio leftFloat flexDisplay\" data-toggle=\"buttons\">\n\t\t\t\t\t\t\t\t<label class=\"spaceLeft\"><input type=\"radio\" name=\"instructionSelection\" value=\"defaultInstructionType\" defaultChecked={true} onChange={this.intructionTypeHandler}/>Instruction Type</label>\n\t\t\t\t\t\t\t\t<label class=\"spaceLeft\"><input type=\"radio\" name=\"instructionSelection\" value=\"customInstructionType\" onChange={this.intructionTypeHandler}/>Custom Instruction Selector</label>\n\t\t\t\t\t\t\t</div>\t\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t<FormGroup controlId=\"customSelector\" class=\"rightFloat\">\n\t\t\t\t\t\t\t\t<FormControl componentClass=\"select\" disabled>\n\t\t\t\t\t\t\t\t\t<option value=\"notSpecified\">---Select---</option>\n\t\t\t\t\t\t\t\t</FormControl>\n\t\t\t\t\t\t\t</FormGroup>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t\t<Checkbox id=\"selectAllInstructionType\" class=\"rightFloat\" onClick={this.onClickSelectAllInstructions}>\n\t\t\t\t\t\t\t\t\tInclude All\n\t\t\t\t\t\t\t\t</Checkbox>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div class=\"multipleSelectContainer\">\n\t\t\t\t\t\t\t\t<FilteredMultiSelect\n\t\t\t\t\t\t\t\t\tid = \"instructionTypeUnselected\"\n\t\t\t\t\t\t\t\t\tbuttonText = {\"Add\"}\n\t\t\t\t\t\t\t\t\tclassName = \"multipleSelect\"\n\t\t\t\t\t\t\t\t\tsize = {6}\n\t\t\t\t\t\t\t\t\toptions = {this.state.injectionOptions}\n\t\t\t\t\t\t\t\t\tonChange = {this.instructionTypeAddHandler}\n\t\t\t\t\t\t\t\t\tselectedOptions = {selectedInjectionType}\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t<FilteredMultiSelect\n\t\t\t\t\t\t\t\t\tid = \"instructionTypeSelected\"\n\t\t\t\t\t\t\t\t\tbuttonText = {\"Remove\"}\n\t\t\t\t\t\t\t\t\tclassName = \"multipleSelect\"\n\t\t\t\t\t\t\t\t\tsize = {6}\n\t\t\t\t\t\t\t\t\toptions = {this.state.injectionOptions}\n\t\t\t\t\t\t\t\t\tonChange = {this.instructionTypeRemoveHandler}\n\t\t\t\t\t\t\t\t\tselectedOptions = {unselectedInjectionType}\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<hr class=\"boldHr\"/>\n\t\t\t\t\t\t<div id=\"registerSelectOptions\" class=\"rowContainer\">\n\t\t\t\t\t\t\t<p class=\"spaceLeft leftFloat\">Register Selection Method</p>\n\t\t\t\t\t\t\t<div class=\"radio rightFloat flexDisplay\" data-toggle=\"buttons\">\n\t\t\t\t\t\t\t\t<label class=\"spaceLeft\"><input type=\"radio\" name=\"registerLocation\" value=\"defaultRegisterLocation\" defaultChecked={true} onClick={this.registerLocationHandler}/>Register Location</label>\n\t\t\t\t\t\t\t\t<label class=\"spaceLeft\"><input type=\"radio\" name=\"registerLocation\" value=\"customRegisterLocation\" onClick={this.registerLocationHandler}/>Custom Register Selector</label>\n\t\t\t\t\t\t\t</div>\t\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div id=\"registerSelector\" class=\"rowContainer\">\n\t\t\t\t\t\t\t<div class=\"rightFloat\">\n\t\t\t\t\t\t\t\t<FormGroup controlId=\"customRegister\">\n\t\t\t\t\t\t\t\t\t<FormControl componentClass=\"select\" placeholder=\"select\" disabled>\n\t\t\t\t\t\t\t\t\t<option value=\"notSpecified\">---Select---</option>\n\t\t\t\t\t\t\t\t\t</FormControl>\n\t\t\t\t\t\t\t\t</FormGroup>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div class=\"rightFloat registerSelectSpace\">\n\t\t\t\t\t\t\t\t<FormGroup controlId=\"defaultRegister\">\n\t\t\t\t\t\t\t\t\t<FormControl ref={\"registerLocation\"} componentClass=\"select\" placeholder=\"select\">\n\t\t\t\t\t\t\t\t\t<option value=\"dstreg\">dstreg-(DESTINATION_REGISTER)</option>\n\t\t\t\t\t\t\t\t\t<option value=\"allsrcreg\">allsrcreg-(ALL_SOURCE_REGISTERS)</option>\n\t\t\t\t\t\t\t\t\t<option value=\"srcreg1\">srcreg1-(SOURCE_REGISTER_1)</option>\n\t\t\t\t\t\t\t\t\t<option value=\"srcreg2\">srcreg2-(SOURCE_REGISTER_2)</option>\n\t\t\t\t\t\t\t\t\t<option value=\"srcreg3\">srcreg3-(SOURCE_REGISTER_3)</option>\n\t\t\t\t\t\t\t\t\t<option value=\"srcreg4\">srcreg4-(SOURCE_REGISTER_4)</option>\n\t\t\t\t\t\t\t\t\t</FormControl>\n\t\t\t\t\t\t\t\t</FormGroup>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<hr class=\"boldHr\"/>\n\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t<p class=\"spaceLeft leftFloat\">Inject Trace</p>\n\t\t\t\t\t\t\t<div class=\"radio rightFloat flexDisplay\" data-toggle=\"buttons\">\n\t\t\t\t\t\t\t\t<label class=\"traceOptions\"><input type=\"radio\" value=\"noTrace\" name=\"traceType\" onChange={this.traceOptionHandler}/>No Trace</label>\n\t\t\t\t\t\t\t\t<label class=\"traceOptions\"><input type=\"radio\" value=\"fullTrace\" name=\"traceType\" defaultChecked={true} onChange={this.traceOptionHandler}/>Full Trace</label>\n\t\t\t\t\t\t\t\t<label class=\"traceOptions\"><input type=\"radio\" value=\"limitedTrace\" name=\"traceType\" onChange={this.traceOptionHandler}/>Limited Trace</label>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"rowContainer\">\n\t\t\t\t\t\t\t<div class=\"rightFloat flexDisplay\">\n\t\t\t\t\t\t\t\t<label>Max Trace Count</label>\n\t\t\t\t\t\t\t\t<input type=\"number\" id=\"maxTraceCount\" class=\"maxTraceCount\" disabled onChange={this.maxTraceCountHandler} min={1}></input>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div class=\"rightFloat fullTraceOptions\" data-toggle=\"buttons\">\n\t\t\t\t\t\t\t\t<Checkbox id=\"backwardTrace\" defaultChecked={true}>Backward</Checkbox>\n\t\t\t\t\t\t\t\t<Checkbox id=\"forwardTrace\" defaultChecked={true}>Forward</Checkbox>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</Modal.Body>\n\t\t\t\t\t<Modal.Footer>\n\t\t\t\t\t\t<Button onClick={this.onClickInstrument}>Instrument</Button>\n\t\t\t\t\t</Modal.Footer>\n\t\t\t\t</Modal>\n\t\t\t</div>\n\t\t);\n\t},\n\tinjectionModeHandler : function (event) {\n\t\tif (event.target.value === \"software\") {\n\t\t\tthis.enbaleSoftwareInjectionElements();\n\t\t\t// set the states, clear the selected injection type\n\t\t\tthis.setState({\n\t\t\t\tinjectionMode: \"software\",\n\t\t\t\tinjectionOptions: softwareInjectionTypeOptions,\n\t\t\t\tselectedInjectionType: []\n\t\t\t});\n\t\t} else if (event.target.value === \"hardware\") {\n\t\t\tthis.enbaleHardwareInjectionElements();\n\t\t\t// set the states, clear the selected injection type\n\t\t\tthis.setState({\n\t\t\t\tinjectionMode: \"hardware\",\n\t\t\t\tinjectionOptions: hardwareInjectionTypeOptions,\n\t\t\t\tselectedInjectionType: []\n\t\t\t});\n\t\t}\n\t},\n\tenbaleSoftwareInjectionElements : function () {\n\t\t$(\"#selectAllInstructionType\").prop(\"disabled\", true);\n\t\t$(\"#instructionSelectionContainer\").hide();\n\t\t$(\"#customSelector\").hide();\n\t\t$(\"#registerSelectOptions\").hide();\n\t\t$(\"#registerSelector\").hide();\n\t},\n\tenbaleHardwareInjectionElements: function () {\n\t\t$(\"#selectAllInstructionType\").prop(\"disabled\", false);\n\t\t$(\"#instructionSelectionContainer\").show();\n\t\t$(\"#customSelector\").show();\n\t\t$(\"#registerSelectOptions\").show();\n\t\t$(\"#registerSelector\").show();\n\t},\n\ttraceOptionHandler: function (event) {\n\t\tif (event.target.value === \"noTrace\") {\n\t\t\t$(\"#maxTraceCount\").prop(\"disabled\", true);\n\t\t\t$(\"#backwardTrace\").prop(\"disabled\", true);\n\t\t\t$(\"#forwardTrace\").prop(\"disabled\", true);\n\t\t\tthis.setState({ traceMode: \"noTrace\"});\n\t\t} else if (event.target.value === \"fullTrace\") {\n\t\t\t$(\"#maxTraceCount\").prop(\"disabled\", true);\n\t\t\t$(\"#backwardTrace\").prop(\"disabled\", false);\n\t\t\t$(\"#forwardTrace\").prop(\"disabled\", false);\n\t\t\tthis.setState({ traceMode: \"fullTrace\"});\n\t\t} else if (event.target.value === \"limitedTrace\") {\n\t\t\t$(\"#maxTraceCount\").prop(\"disabled\", false);\n\t\t\t$(\"#backwardTrace\").prop(\"disabled\", false);\n\t\t\t$(\"#forwardTrace\").prop(\"disabled\", false);\n\t\t\tthis.setState({ traceMode: \"limitedTrace\"});\n\t\t}\n\t},\n\tintructionTypeHandler: function (event) {\n\t\tif (event.target.value === \"customInstructionType\") {\n\t\t\t$(\"#selectAllInstructionType\").prop(\"disabled\", true);\n\t\t\t$(\"#instructionTypeUnselected\").prop(\"disabled\", true);\n\t\t\t$(\"#instructionTypeSelected\").prop(\"disabled\", true);\n\t\t\t$(\"#customSelector\").prop(\"disabled\", false);\n\t\t} else if (event.target.value === \"defaultInstructionType\") {\n\t\t\t$(\"#selectAllInstructionType\").prop(\"disabled\", false);\n\t\t\t$(\"#instructionTypeUnselected\").prop(\"disabled\", false);\n\t\t\t$(\"#instructionTypeSelected\").prop(\"disabled\", false);\n\t\t\t$(\"#customSelector\").prop(\"disabled\", true);\n\t\t}\n\t},\n\tregisterLocationHandler: function (event) {\n\t\tif (event.target.value === \"defaultRegisterLocation\") {\n\t\t\t$(\"#customRegister\").prop(\"disabled\", true);\n\t\t\t$(\"#defaultRegister\").prop(\"disabled\", false);\n\t\t} else if (event.target.value === \"customRegisterLocation\") {\n\t\t\t$(\"#customRegister\").prop(\"disabled\", false);\n\t\t\t$(\"#defaultRegister\").prop(\"disabled\", true);\n\t\t}\n\t},\n\tonClickInstrument: function (event) {\n\t\tvar me = this;\n\t\tvar data = {};\n\t\tdata.fileName = this.state.fileName;\n\t\tdata.injectionMode = this.state.injectionMode;\n\t\tdata.injectionType = [];\n\t\tfor (var i = 0; i < this.state.selectedInjectionType.length; i++) {\n\t\t\tdata.injectionType.push(this.state.selectedInjectionType[i].value);\n\t\t}\n\t\tdata.traceMode = this.state.traceMode;\n\n\t\t// Pass trace type\n\t\tif (data.traceMode == \"fullTrace\") {\n\t\t\tdata.backwardTrace = $(\"#backwardTrace\").prop(\"checked\");\n\t\t\tdata.forwardTrace = $(\"#forwardTrace\").prop(\"checked\");\n\t\t} else if (data.traceMode == \"limitedTrace\") {\n\t\t\tdata.maxTraceCount = $(\"#maxTraceCount\").val();\n\t\t}\n\n\t\t// Pass register location for hardware mode\n\t\tif (data.injectionMode == \"hardware\") {\n\t\t\tdata.registerLocation = ReactDOM.findDOMNode(this.refs.registerLocation).value;\n\t\t}\n\t\tvar isBatchMode = data.injectionMode == \"software\" && data.injectionType.length > 1 ? true : false;\n\t\tinjectionModeActions.injectionModeChange({isBatchMode: isBatchMode, injectionMode: data.injectionMode, injectionType: data.injectionType});\n\t\t$.ajax({\n\t\t\turl: '/instrument',\n\t\t\ttype: 'POST',\n\t\t\tdata: JSON.stringify(data),\n\t\t\tprocessData: false,\n\t\t\tcontentType: 'application/json',\n\t\t\tsuccess: function(data){\n\t\t\t\tvar consoleLog = data.consoleLog;\n\t\t\t\tvar files = data.files;\n\t\t\t\tconsoleLogActions.updateConsoleLog(consoleLog);\n\t\t\t\tfileUploadActions.addFiles(files);\n\t\t\t\tconsole.log(\"instrument success\");\n\t\t\t\tme.close();\n\t\t\t\twindow.alert(\"Instrument Successful\");\n\t\t\t},\n\t\t\terror: function (error) {\n\t\t\t\tme.close();\n\t\t\t\tif (error.responseJSON.error) {\n\t\t\t\t\terrorLogActions.updateErrorLog(error.responseJSON.error.cmd);\n\t\t\t\t}\n\t\t\t\tconsole.log(error);\n\t\t\t\twindow.alert(\"An error has occured in Instrument, please refresh the page.\");\n\t\t\t}\n\t\t});\n\t},\n\tinstructionTypeAddHandler: function (selectedOptions) {\n\t\t// In software injection mode, only allow one type to be selected\n\t\tvar validOptions = selectedOptions;\n\t\tif (this.state.injectionMode == \"software\") {\n\t\t\tvalidOptions = [selectedOptions[0]];\n\t\t}\n\t\tthis.setState({ selectedInjectionType: validOptions});\n\t},\n\tinstructionTypeRemoveHandler: function (removedOptions) {\n\t\tvar selectedOptions = this.state.injectionOptions.diff(removedOptions);\n\t\tthis.setState({ selectedInjectionType: selectedOptions });\n\t},\n\tonClickSelectAllInstructions: function (e) {\n\t\tif (e.target.checked) {\n\t\t\tthis.setState({ selectedInjectionType: this.state.injectionOptions });\n\t\t} else {\n\t\t\tthis.setState({ selectedInjectionType: []});\n\t\t}\n\t},\n\tmaxTraceCountHandler: function (e) {\n\t\t$(\"#maxTraceCount\").val(e.target.value);\n\t}\n});\n\nmodule.exports = InstrumentModal;" }, { "alpha_fraction": 0.8218085169792175, "alphanum_fraction": 0.8271276354789734, "avg_line_length": 21.058822631835938, "blob_id": "fcfe56ef55b075d8c6a5be9aa2f1c1191be8c1b5", "content_id": "2564f58786413e574709492b0dae346035898f19", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 376, "license_type": "permissive", "max_line_length": 46, "num_lines": 17, "path": "/test_suite/CMakeLists.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\n\ninclude(../config/copy_utils.cmake)\n\nproject(test_suite)\n\ncopydir(HardwareFaults HardwareFaults)\ncopydir(SoftwareFaults SoftwareFaults)\ncopydir(PROGRAMS PROGRAMS)\ncopydir(Traces Traces)\ncopydir(BatchMode BatchMode)\ncopydir(MakefileGeneration MakefileGeneration)\ncopy(test_suite.yaml test_suite.yaml)\n\nadd_subdirectory(SCRIPTS)\n\ngenCopy()\n\n" }, { "alpha_fraction": 0.7798960208892822, "alphanum_fraction": 0.8024263381958008, "avg_line_length": 576, "blob_id": "2482322049ca608a2297d2f4a5d62eab9310165c", "content_id": "100e18acce72be58b19618fa0e3017ee63d92762", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 577, "license_type": "permissive", "max_line_length": 576, "num_lines": 1, "path": "/test_suite/HardwareFaults/multiple-bit-flips-in-multiple-words-controlled/ReadMe.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "Using this yaml file, the user could perform multiple bit-flip injections starting from a specific location. The first bit-flip is injected into the 15th bit of a source register of the instruction that has the fi_cycle of 684347 and fi_index of 417. After this bit-flip, another 2 bits could still be flipped since the fi_max_multiple is equal to 3. NOTE that if any of these bit-flips results in a program crash, no more bits will be flipped. The distance between each of these bit-flips are controlled by the window_len_multiple_startindex and window_len_multiple_endindex. " }, { "alpha_fraction": 0.7378048896789551, "alphanum_fraction": 0.7378048896789551, "avg_line_length": 24.30769157409668, "blob_id": "bd85a23102babf7b22cefde4f6d60bc22d6ae0f3", "content_id": "f3fef78dde6e081aa0836f1db2059a63a62daee7", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 328, "license_type": "permissive", "max_line_length": 66, "num_lines": 13, "path": "/web-app/views/src/js/stores/runOptionsStore.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\nvar runOptionsActions = require(\"./../actions/runOptionsActions\");\nvar runOptions = [];\nvar runOptionsStore = Reflux.createStore({\n\tlistenables: [runOptionsActions],\n\n\tonUpdateRunOptions: function(data) {\n\t\trunOptions = data;\n\t\tthis.trigger(runOptions);\n\t},\n});\n\nmodule.exports = runOptionsStore;" }, { "alpha_fraction": 0.4575645625591278, "alphanum_fraction": 0.509225070476532, "avg_line_length": 23.636363983154297, "blob_id": "4a299132d754e14555a3a844b2ae501f9551dae2", "content_id": "2b06e5728b700bf2b9f627b3e06516018b68bd8d", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1355, "license_type": "permissive", "max_line_length": 75, "num_lines": 55, "path": "/test_suite/PROGRAMS/mcf/pflowup.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nPFLOWUP.C of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Sun Nov 21 16:22:23 2004 by Andreas Loebel (boss.local.de) */\n/* $Id: pflowup.c,v 1.10 2005/02/17 19:42:32 bzfloebe Exp $ */\n\n\n\n#include \"pflowup.h\"\n\n\n\n\n#ifdef _PROTO_\nvoid primal_update_flow( \n node_t *iplus,\n node_t *jplus,\n node_t *w\n )\n#else\nvoid primal_update_flow( iplus, jplus, w )\n node_t *iplus, *jplus;\n node_t *w; \n#endif\n{\n for( ; iplus != w; iplus = iplus->pred )\n {\n if( iplus->orientation )\n iplus->flow = (flow_t)0;\n else\n iplus->flow = (flow_t)1;\n }\n\n for( ; jplus != w; jplus = jplus->pred )\n {\n if( jplus->orientation )\n jplus->flow = (flow_t)1;\n else\n jplus->flow = (flow_t)0;\n }\n}\n" }, { "alpha_fraction": 0.6502260565757751, "alphanum_fraction": 0.6531031727790833, "avg_line_length": 28.31325340270996, "blob_id": "2c9aa8e960cfd44f9e8b17612531a37ec2e71dc4", "content_id": "e6fa7770f91e11f5e7f01ef1cb4e2edf1db6c4db", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2433, "license_type": "permissive", "max_line_length": 91, "num_lines": 83, "path": "/llvm_passes/core/FIInstSelectorManager.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Instructions.h\"\n\n#include \"FIInstSelectorManager.h\"\n\nnamespace llfi {\n\nvoid FIInstSelectorManager::getFIInsts(Module &M,\n std::set<Instruction*> *fiinsts) {\n // Create a set for each selector and print compiletime info\n std::vector<std::set<Instruction*>*> allInsts;\n for(it = selectors.begin(); it != selectors.end(); ++it) {\n allInsts.push_back(new std::set<Instruction*>);\n std::map<std::string, std::string> info;\n (*it)->getCompileTimeInfo(info);\n printCompileTimeInfo(info);\n (*it)->getFIInsts(M, allInsts.back());\n }\n\n // Merge allInsts into fiinsts\n std::set<Instruction*> merge = *(allInsts[0]);\n for(size_t i = 1; i < allInsts.size(); ++i) {\n std::set<Instruction*> tmp;\n tmp.swap(merge);\n std::set_intersection(tmp.begin(), tmp.end(),\n allInsts[i]->begin(), allInsts[i]->end(),\n std::inserter(merge, merge.begin()));\n }\n\n fiinsts->swap(merge);\n\n for(size_t i = 0; i < allInsts.size(); ++i) {\n delete allInsts[i];\n }\n}\n\nint FIInstSelectorManager::printCompileTimeInfo(std::map<std::string, std::string>& info) {\n // print compiletime info returned from inst selector, called by getFIInsts()\n std::ofstream compiletimeinfo_file(\"llfi.config.compiletime.txt\");\n if(compiletimeinfo_file.is_open() == false){\n std::cerr<<\"ERROR: can not open llfi.config.compiletime.txt\\n\";\n compiletimeinfo_file.close();\n return -1;\n }\n compiletimeinfo_file<<\"failure_class=\"<<info[\"failure_class\"]<<\"\\n\";\n compiletimeinfo_file<<\"failure_mode=\"<<info[\"failure_mode\"]<<\"\\n\";\n compiletimeinfo_file<<\"targets=\"<<info[\"targets\"]<<\"\\n\";\n compiletimeinfo_file<<\"injector=\"<<info[\"injector\"]<<\"\\n\";\n compiletimeinfo_file.close();\n return 0;\n}\n\nvoid FIInstSelectorManager::addSelector(FIInstSelector *s)\n{\n selectors.push_back(s);\n}\n\nvoid FIInstSelectorManager::setIncludeBackwardTrace(bool includebt)\n{\n for(size_t i = 0; i < selectors.size(); ++i) {\n selectors[i]->setIncludeBackwardTrace(includebt);\n }\n\n}\nvoid FIInstSelectorManager::setIncludeForwardTrace(bool includeft)\n{\n for(size_t i = 0; i < selectors.size(); ++i) {\n selectors[i]->setIncludeForwardTrace(includeft);\n }\n}\n\nFIInstSelectorManager::FIInstSelectorManager()\n{\n\n}\n\nFIInstSelectorManager::~FIInstSelectorManager()\n{\n for(it = selectors.begin(); it != selectors.end(); ++it) {\n delete *it;\n }\n}\n\n}\n" }, { "alpha_fraction": 0.7536231875419617, "alphanum_fraction": 0.7536231875419617, "avg_line_length": 20.5625, "blob_id": "7beb25c3ffdc2a13baf7a9784eabd60cda0d8493", "content_id": "6c438d97999718a4caf173161f17ded2c3f56fdc", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 690, "license_type": "permissive", "max_line_length": 69, "num_lines": 32, "path": "/llvm_passes/core/FIInstSelectorManager.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FI_INST_SELECTOR_MANAGER_H\n#define FI_INST_SELECTOR_MANAGER_H\n#include <vector>\n#include <string>\n#include <fstream>\n#include <iostream>\n\n#include \"FIInstSelector.h\"\n\nusing namespace llvm;\nnamespace llfi {\n\nclass FIInstSelectorManager {\n public:\n FIInstSelectorManager();\n ~FIInstSelectorManager();\n void addSelector(FIInstSelector *s);\n void getFIInsts(Module &M, std::set<Instruction*> *fiinsts);\n\n void setIncludeBackwardTrace(bool includebt);\n void setIncludeForwardTrace(bool includeft);\n\n private:\n std::vector<FIInstSelector*> selectors;\n std::vector<FIInstSelector*>::iterator it;\n\n int printCompileTimeInfo(std::map<std::string, std::string>& info);\n};\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.7385621070861816, "alphanum_fraction": 0.7385621070861816, "avg_line_length": 21, "blob_id": "5cf27aa427ed09f845283f5203fc7d97653b8dc6", "content_id": "bea21cd2a0d3ad8e61fb5127f66bca158c79e0f4", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 153, "license_type": "permissive", "max_line_length": 50, "num_lines": 7, "path": "/web-app/views/src/js/actions/targetFileNameActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar targetFileNameActions = Reflux.createActions([\n 'changeFileName'\n ]);\n\nmodule.exports = targetFileNameActions;" }, { "alpha_fraction": 0.773858904838562, "alphanum_fraction": 0.773858904838562, "avg_line_length": 21.952381134033203, "blob_id": "3543af3bad972fef8d87d3d5f8d8959a81bd0dfa", "content_id": "fd75d3e0c4994f093c258d417fdfabddd2cec9cb", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 482, "license_type": "permissive", "max_line_length": 75, "num_lines": 21, "path": "/llvm_passes/core/RegLocBasedFIRegSelector.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef REGLOC_BASED_FI_REG_SELECTOR_H\n#define REGLOC_BASED_FI_REG_SELECTOR_H\n\n#include \"Controller.h\"\n#include \"FIRegSelector.h\"\nnamespace llfi {\nclass RegLocBasedFIRegSelector: public HardwareFIRegSelector {\n public:\n RegLocBasedFIRegSelector(FIRegLoc filoc): firegloc(filoc) {}\n\n private:\n virtual bool isRegofInstFITarget(Value *reg, Instruction *inst);\n virtual bool isRegofInstFITarget(Value *reg, Instruction *inst, int pos);\n private:\n FIRegLoc firegloc;\n};\n}\n\n\n\n#endif\n" }, { "alpha_fraction": 0.7132557034492493, "alphanum_fraction": 0.7165834903717041, "avg_line_length": 20.987804412841797, "blob_id": "36a52705febc2c6e453b6da2d476f0b5e0b05b3d", "content_id": "adf5618f1c7f51a38bd4646040bb3674a790f3fe", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1803, "license_type": "permissive", "max_line_length": 72, "num_lines": 82, "path": "/llvm_passes/core/Controller.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef CONFIG_H\n#define CONFIG_H\n#define LLVM_ON_UNIX 1\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Support/FileSystem.h\"\n\n#include <set>\n#include <map>\n#include <list>\n#include <string>\n\n#define DST_REG_POS -1\n\nusing namespace llvm;\n\nnamespace llfi {\nclass FIInstSelectorManager;\nclass FIRegSelector;\n\n\nenum FIInstSelMethod {\n insttype, funcname, sourcecode, custominstselector\n};\n\nenum FIRegSelMethod {\n regloc, customregselector\n};\n\nenum FIRegLoc {\n dstreg, allsrcreg, srcreg1, srcreg2, srcreg3, srcreg4\n};\n\nclass Controller {\n typedef std::map<std::string, unsigned> NameOpcodeMap;\n public:\n static Controller *getInstance(Module &M);\n ~Controller(); \n\n public:\n void getFIInstRegsMap(\n std::map<Instruction*, std::list<int>* > **fiinstreg) {\n *fiinstreg = &fi_inst_regs_map;\n }\n void dump() const;\n\n private:\n Controller() {}\n Controller(Module &M) {\n init(M);\n }\n void init(Module &M);\n void processCmdArgs();\n void processInstSelArgs();\n void processRegSelArgs();\n\n void getOpcodeListofFIInsts(std::set<unsigned> *fi_opcode_set);\n void getFuncList(std::set<std::string> *fi_func_set);\n void getModuleFuncs(Module &M);\n\n // output of the controller\n private:\n // a map of target instructions and a list of inject loc\n // Assumption: changes on instructions do not have temporal relations\n // That's why we can use unordered map instead of list\n // TODO: replace tree-based map to hashtable-based map for performance\n std::map<Instruction*, std::list< int >* > fi_inst_regs_map; \n\n private:\n FIInstSelectorManager *fiinstselector;\n FIRegSelector *firegselector;\n\n // set of functions present in module\n std::set<std::string> func_set;\n\n private:\n static Controller *ctrl;\n};\n\n}\n#endif\n" }, { "alpha_fraction": 0.5162738561630249, "alphanum_fraction": 0.590347945690155, "avg_line_length": 24.457143783569336, "blob_id": "b1f2ecd63338b37b6dd55ce7985dda8fda979f8e", "content_id": "602efbcd90a87ab6de91a431e15dac13caeab7ee", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 891, "license_type": "permissive", "max_line_length": 75, "num_lines": 35, "path": "/test_suite/PROGRAMS/mcf/readmin.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nREADMIN.H of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Sun Nov 21 16:23:07 2004 by Andreas Loebel (boss.local.de) */\n/* $Id: readmin.h,v 1.11 2005/02/17 19:42:21 bzfloebe Exp $ */\n\n\n\n#ifndef _READMIN_H\n#define _READMIN_H\n\n\n#include \"defines.h\"\n#include \"mcfutil.h\"\n#include \"mcflimit.h\"\n\n\nextern long read_min _PROTO_(( network_t * ));\n\n\n#endif\n" }, { "alpha_fraction": 0.6434316635131836, "alphanum_fraction": 0.6434316635131836, "avg_line_length": 28.84000015258789, "blob_id": "5d39f5b049eb4e8ef7a770178beddb301c440d0c", "content_id": "1884bc21acc54fc3ea2164a881a0cfc0eeceb2bf", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 746, "license_type": "permissive", "max_line_length": 75, "num_lines": 25, "path": "/config/llfigui.cmake", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "set(DEFAULT_TARGETS \"\")\n\n#set CMAKE_JAVA_INCLUDE_PATH\ninclude(../config/java_paths.cmake)\n\nmacro(add_java_src SRCCODE)\n STRING(REGEX REPLACE \"[.]java\" \".class\" CLASSFILE \"${SRCCODE}\")\n set(DEFAULT_TARGETS ${DEFAULT_TARGETS} ${CLASSFILE})\n MESSAGE(STATUS \"Output .class file is: ${CLASSFILE}\")\n add_custom_command(\n OUTPUT ${CLASSFILE}\n COMMAND ${JAVAC_EXECUTABLE}\n -classpath \"${CMAKE_JAVA_INCLUDE_PATH}:${CMAKE_CURRENT_SOURCE_DIR}\"\n -d ${CMAKE_CURRENT_BINARY_DIR}\n ${CMAKE_CURRENT_SOURCE_DIR}/${SRCCODE}\n DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${SRCCODE}\n )\nendmacro()\n\nmacro(compileGUI)\n add_custom_target(\n ${PROJECT_NAME} ALL\n DEPENDS ${DEFAULT_TARGETS}\n )\nendmacro()\n" }, { "alpha_fraction": 0.709694504737854, "alphanum_fraction": 0.7227407693862915, "avg_line_length": 49.417110443115234, "blob_id": "05c5897466e5a442919cc90a107c3f1eaf65531a", "content_id": "898faf0e8cca6acf18e7a3301c785378b84fc332", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9428, "license_type": "permissive", "max_line_length": 518, "num_lines": 187, "path": "/README.md", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "LLFI\n====\n\n**NOTE: We're in the process of updating LLFI to use the latest LLVM version, and also support Machine Learning (ML) operations. Please see LLTFI (https://github.com/DependableSystemsLab/LLTFI) for further details. From now on, LLFI development will be restricted to bug fixes and security updates, and any new features will be added only to LLTFI. However, LLTFI doesn't yet support software failure modes.**\n\nLLFI is an LLVM based fault injection tool, that injects faults into the LLVM IR of the application source code. The faults can be injected into specific program points, and the effect can be easily tracked back to the source code. LLFI is typically used to map fault characteristics back to source code, and hence understand source level or program characteristics for various kinds of fault outcomes. Detailed documentation about LLFI can be found at: https://github.com/DependableSystemsLab/LLFI/wiki \n\nPlease join the following Google Groups for asking questions about LLFI that are not answered in the documentation: [email protected]\n\nAuto-Installer\n--------------\nThis is the recommended method for building the LLFI. If you wish to build the LLFI via the auto-installer, you *do not need* to clone the LLFI git repository. Simply download the installer script by itself, and it will fetch the latest version of the git repository for you. The LLFI auto-installer takes the form of a single python script (installer/installLLFI.py). To run the script, simply copy it into the directory where you would like to build the LLFI and, from the command line, run `python3 InstallLLFI.py`.\n \nDependencies:\n 1. 64 Bit Machine\n 2. 64 Bit Linux or OS X\n 3. Cmake (mininum v2.8)\n 4. Python 3 and above\n 5. tcsh (for GUI)\n 6. GraphViz package (for visualizing error propagation)\n 7. Internet Connection\n\nGUI Dependencies:\n 1. JDK7/JDK8 with JavaFX\n 2. tcsh shell\n\nUsage:\nrun \"python3 InstallLLFI.py -h\" to see all running options/guidelines\n 1. Copy the InstallLLFI.py script to where you want to build the LLFI\n 2. Make sure you are _not_ logged in as root\n 2. Run \"python3 InstallLLFI.py\"\n 3. Wait for compilation to finish\n 4. Run the GUI by executing \"./llfi-gui\" under the bin/ folder\n\nAbout tcsh:\n\nThe LLFI-GUI uses tcsh to read environment variables describing the location of the LLFI build. The installer will automatically add those environment variables to your ~/.tcshrc file. You do not need to actively use tcsh as your primary shell, simply having it installed is enough.\n\nManual Install\n---------------\nThis method is also available, and may be more suitable if you want more control over the location of the LLVM build that the LLFI requires (ie, you already have LLVM built and wish to use that build).\n\nDependencies:\n \n 1. 64 Machine with 64 bit Linux or OS X\n 2. CMake (minimum v2.8)\n 3. Python 3 and above\n 4. Python YAML library (PyYAML)\n 5. Clang v3.4\n 6. LLVM v3.4, built with CMake\n * Build llvm-3.4 **WITH CMAKE** using flag `-DLLVM_REQUIRES_RTTI=1`. [Instructions](http://llvm.org/docs/CMake.html)\n * Remember to run `make` in the llvm build directory after running `cmake`.\n 9. GraphViz package (for visualizing error propagation)\n\nGUI Dependencies:\n 1. JDK7/JDK8 with JavaFX\n 2. tcsh shell\n\nBuilding:\n \n Run `./setup --help` for build instructions.\n```\n $ ./setup --help\n\n Usage: setup OPTIONS\n List of options:\n -LLVM_DST_ROOT <LLVM CMake build root dir>:\n Make sure you build LLVM with CMake and pass build root directory here\n -LLVM_SRC_ROOT <LLVM source root dir>\n -LLFI_BUILD_ROOT <path where you want to build LLFI>\n -LLVM_GXX_BIN_DIR <llvm-gcc/g++'s parent dir> (optional):\n You don't need to set it if it is in system path\n -JAVA_HOME_DIR <java home directory for oracle jdk 7 or higher> (optional):\n You don't need to set it if your default jdk is oracle jdk 7 or higher and in system path\n\n\n --help(-h): show help information\n --no_gui: Add this option if you do not want GUI.\n --runTests: Add this option if you want to run all regression tests after building LLFI.\n```\n\n Here is a sample build command if `clang` and `javac` are already in $PATH:\n```\n ./setup -LLFI_BUILD_ROOT $BUILD/LLFI -LLVM_SRC_ROOT $SRC/llvm-3.4 -LLVM_DST_ROOT $BUILD/llvm-3.4\n```\n\nBuild without GUI:\nTo build LLFI without GUI, just add option: `--no_gui` in the command line for setup, for example:\n```\n./setup -LLFI_BUILD_ROOT $BUILD/LLFI -LLVM_SRC_ROOT $SRC/llvm-3.4 -LLVM_DST_ROOT $BUILD/llvm-3.4 --no_gui\n```\n\nRunning tests:\nRunning all regression tests after installation is highly recommended. Note that you may encounter some error messages during the fault injection stage. This is normal. Once all tests have completed and they all passed, LLFI is correctly installed.\n\nVirtualBox Image\n-----------------\n\nIf you want to quickly try out LLFI, an Ubuntu image with LLFI and its dependencies pre-installed \nis available [here](https://drive.google.com/file/d/0B5inNk8m9EfeM096ejdfX2pTTUU/view?usp=sharing) (2.60GB). This image is built with VirtualBox v4.3.26, with Ubuntu 14.04.2 LTS, LLVM v3.4, CMake v3.4 and the current master branch version of LLFI (as of Sep 16th, 2015).\n\nuser: `llfi` \npassword: `root`\n\n`<LLFI_SRC_ROOT>` is located under `~/Desktop/llfisrc/`. \n`<LLFI_BUILD_ROOT>` is located under `~/Desktop/llfi/`. \n`<LLVM_SRC_ROOT>` is located under `~/Desktop/llvmsrc/`. \n`<LLVM_DST_ROOT>` is located under `~/Desktop/llvm/`. \n`<LLVM_GXX_BIN_DIR >` is located under `~/Desktop/llvm/bin/`. \n\nSample tests can be found under `~/Desktop/test/`.\n\nTo run it, open VirtualBox, select `File->Import Appliance...` and navigate to the `.ova` file.\n\nRunning\n-------\nYou can use test programs in the directory `sample_programs/` or `test_suite/PROGRAMS/` to test LLFI. Programs in the `sample_programs` directory already contains a valid `input.yaml` file.\n####Command line\nExample program: `factorial`\n 1. Copy the `sample_programs/factorial/` directory to your project directory. \n 2. Change to your `factorial` directory Build a single IR file with the LLFI tool `GenerateMakefile`\n ```\n <LLFI_BUILD_ROOT>/tools/GenerateMakefile --readable --all -o factorial.ll\n ```\n Alternatively, you can build your own IR file with `clang`.\n 3. Instrument factorial with calls to LLFI libraries and create executables under *llfi* directory\n ```\n <LLFI_BUILD_ROOT>/bin/instrument --readable factorial.ll\n ```\n 4. Run factorial executable with profiling functions instrumented\n ```\n <LLFI_BUILD_ROOT>/bin/profile ./llfi/factorial-profiling.exe 6\n ```\n In file *llfi/baseline/golden_std_output*, you should be able to see 720\n 5. Run `factorial` executable with fault injection functions instrumented\n ```\n <LLFI_BUILD_ROOT>/bin/injectfault ./llfi/factorial-faultinjection.exe 6\n ```\n\n For complete test of whole of LLFI, please use LLFI test suite and refer to wiki page: [Test suite for regression test](https://github.com/DependableSystemsLab/LLFI/wiki/Test-Suite-for-Regression-Test) for details.\n\n####GUI\nIf you have used `./setup` to install LLFI, you need to set new environment variables for tcsh shell before running the GUI for the first time. Open `~/.tcshrc` using your favourite text editor and add `setenv llfibuild <LLFI_BUILD_ROOT>/` and `setenv zgrviewer <LLFI_BUILD_ROOT>/tools/zgrviewer/` to it. [OPTIONAL] Create an environment variable \"COMPARE\" with the path of the SDC check script.\n\nExecute `<LLFI_BUILD_ROOT>/bin/llfi-gui` to start the **GUI**. The outputs will be saved in the directory where you have executed the command.\n\n####Web GUI Development Environment Setup\nDependencies:\nNodejs\nwebpack \n\nSteps to set up the development environment: \n1: Download this project from Git \n2: Download NodeJs \n3: Install libraries: Go to the web-app directory and run \"npm install\" \n4: Install Webpack: In the same directory as step 3, run \"sudo npm install -g webpack\" \n5: Configurate the LLFI root path for the server: \nThe default bevaiour of the program use environment variable $llfibuild as the path of the llfi build directory \nYou can set the environment variable llfibuild in your system to point it to the LLFI build directory in your local machine. \n\nStart the server: \nGo to the /web-app/server folder and run \"node server.js\" \n\nStart the front-end dev tool: \nGo to the web-app directory and run \"webpack\" or \"webpack -w\" \n\nResults\n-------\nAfter fault injection, output from LLFI and the tested application can be found\nin the *llfi* directory.\n\n| Directory | Contents |\n| ----------------------| ---------------------------------------------- |\n| *std_output* | Piped STDOUT from the tested application |\n| *llfi_stat_output* | Fault injection statistics |\n| *error_output* | Failure reports (program crashes, hangs, etc.) |\n| *trace_report_output* | Faults propogation report files and graph |\n\n\nReferences\n----------\n* [LLFI Paper](http://blogs.ubc.ca/karthik/2013/02/15/llfi-an-intermediate-code-level-fault-injector-for-soft-computing-applications/)\n* [FIDL Paper](http://blogs.ubc.ca/karthik/2016/05/05/fidl-a-fault-injection-description-language-for-compiler-based-sfi-tools/)\n* [LLFI Wiki](https://github.com/DependableSystemsLab/LLFI/wiki)\n\n======\t\t\nRead *caveats.txt* for caveats and known problems.\n" }, { "alpha_fraction": 0.7563107013702393, "alphanum_fraction": 0.7572815418243408, "avg_line_length": 26.026315689086914, "blob_id": "4fc0a6889092f2bc61b9345952ebd2dd1542a0d9", "content_id": "dd662209e375a42f78e5a3f84f53279cbee6fbb0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1030, "license_type": "permissive", "max_line_length": 88, "num_lines": 38, "path": "/llvm_passes/software_failures/_SoftwareFaultRegSelectors.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Value.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/Constants.h\"\n#include \"FIInstSelector.h\"\n#include \"FIRegSelector.h\"\n#include \"FICustomSelectorManager.h\"\n\n#include \"llvm/IR/IntrinsicInst.h\"\n#include <fstream>\n#include <iostream>\n#include <sstream>\n\nusing namespace std;\nnamespace llfi {\n\tclass FuncArgRegSelector: public SoftwareFIRegSelector {\n\tpublic:\n\t\tFuncArgRegSelector(int target_arg) : pos_argument(target_arg), specified_arg(true) {};\n\t\tFuncArgRegSelector():pos_argument(0), specified_arg(false) {};\n\tprivate:\n\t\tint pos_argument;\n\t\tbool specified_arg;\n\t\tvirtual bool isRegofInstFITarget(Value *reg, Instruction *inst);\n\t\tvirtual bool isRegofInstFITarget(Value* reg, Instruction* inst, int pos);\n\t};\n\t\n\tclass FuncDestRegSelector: public SoftwareFIRegSelector {\n\tprivate:\n\t\tvirtual bool isRegofInstFITarget(Value *reg, Instruction *inst);\n\t\t\n\t};\n\n\tclass RetValRegSelector: public SoftwareFIRegSelector {\n\tprivate:\n\t\tvirtual bool isRegofInstFITarget(Value *reg, Instruction *inst);\n\t\t\n\t};\n\n}\n\n \n" }, { "alpha_fraction": 0.7003642916679382, "alphanum_fraction": 0.7012750506401062, "avg_line_length": 27.894737243652344, "blob_id": "b76c0a96df665057fbeb164638471021b4433fb2", "content_id": "b25b174072c3efcdad657e222a03b1830ef3fa5b", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1098, "license_type": "permissive", "max_line_length": 76, "num_lines": 38, "path": "/llvm_passes/hardware_failures/LLFIIndexFIInstSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Instructions.h\"\n#include \"llvm/Support/CommandLine.h\"\n\n#include \"FIInstSelector.h\"\n#include \"FICustomSelectorManager.h\"\n#include \"Utils.h\"\n\nusing namespace llvm;\n\nnamespace llfi {\n\nstatic cl::list< std::string > injecttoindex(\"injecttoindex\", \n cl::desc(\"Inject into the specified LLFI index\"), \n cl::ZeroOrMore);\n\n/**\n * LLFI Index instruction selector selects instruction of certain indices\n */\nclass LLFIIndexFIInstSelector: public HardwareFIInstSelector {\n private:\n virtual bool isInstFITarget(Instruction *inst) {\n long llfiindex = getLLFIIndexofInst(inst);\n for (unsigned i = 0; i != injecttoindex.size(); ++i)\n if (atol(injecttoindex[i].c_str()) == llfiindex)\n return true;\n return false;\n }\n public:\n virtual void getCompileTimeInfo(std::map<std::string, std::string>& info){\n info[\"failure_class\"] = \"HardwareFault\";\n info[\"failure_mode\"] = \"SpecifiedLLFIIndex\";\n info[\"targets\"] = \"<include list in yaml>\";\n info[\"injector\"] = \"<fi_type>\";\n }\n};\n\nstatic RegisterFIInstSelector X(\"llfiindex\", new LLFIIndexFIInstSelector());\n}\n" }, { "alpha_fraction": 0.5871912240982056, "alphanum_fraction": 0.596595823764801, "avg_line_length": 29.13572883605957, "blob_id": "95ff45a642860f48375c93b6697bafb21f99a8a2", "content_id": "deca9d81bc8281277a0ed24a6e2974a1d94194b6", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15099, "license_type": "permissive", "max_line_length": 114, "num_lines": 501, "path": "/tools/tracetools.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n\n#traceTools.py\n#Author: Sam Coulter\n#This file contains library functions and classes for the llfi tracing and\n#tracing analyses scripts\n\nimport sys\nimport os\nimport glob\nimport itertools\nimport difflib\n\n\ndebugFlag = 0\n\ndef debug(text, level=5):\n global debugFlag\n if debugFlag == level:\n print(text)\n\ngoldenRemovedCount = []\nfaultyRemovedCount = []\n\nclass diffBlock:\n def __init__(self, lines):\n\n debug(\"\\n\\tCreating a diffBlock\")\n\n origHeader, newHeader = lines[0].replace('@',' ').replace('+',' ').replace('-',' ').split()\n origsplit = origHeader.split(',')\n newsplit = newHeader.split(',')\n self.origStart = int(origsplit[0])\n self.newStart = int(newsplit[0])\n\n self.preDiff = None\n self.postDiff = None\n\n self.origLines = []\n self.newLines = []\n\n if \"+\" not in lines[1] and \"-\" not in lines[1]:\n if \"S\" in lines[1]: #See ugly hack in the diffReport init\n lines[1] = lines[1][2:]\n self.preDiff = lines.pop(1)\n self.origStart += 1\n self.newStart += 1\n\n if \"+\" not in lines[-1] and \"-\" not in lines[-1]:\n self.postDiff = lines.pop(len(lines)-1)\n\n for line in lines[1:]:\n if \"-\" in line:\n self.origLines.append(line)\n if \"+\" in line:\n self.newLines.append(line)\n\n debug(\"\\tDiffblock lines\")\n debug(\"\\t\" + \"\\n\\t\".join(lines))\n\n self.origLength = len(self.origLines)\n self.newLength = len(self.newLines)\n\n debug(\"\\t\" + \"\\n\\t\".join(self.origLines))\n debug(\"\\t\" + \"\\n\\t\".join(self.newLines))\n\n #print some info for debugging\n def printdebug(self):\n print(self.origStart, self.newStart)\n print('\\n'.join(self.origLines))\n print('\\n'.join(self.newLines))\n\n #print the block analysis summary\n def getSummary(self, adj=0):\n origStart = self.origStart + adj\n newStart = self.newStart + adj\n DataDiffs = []\n CtrlDiffs = []\n instanceList = []\n\n izip = itertools.zip_longest(self.origLines, self.newLines)\n\n instance = diffInstance(0,0,0,0)\n for i, (g, f) in enumerate(izip):\n g = diffLine(g)\n f = diffLine(f)\n if g and f:\n if g.ID == f.ID:\n if instance.type != 1:\n if (instance.summary != None):\n instanceList.append(instance.summary())\n instance = diffInstance(1, findAdjustedPosition(origStart, goldenRemovedCount), \\\n findAdjustedPosition(newStart, faultyRemovedCount), i)\n instance.add(\"Data Diff: ID: \" + str(g.ID) + \" OPCode: \" + str(g.OPCode) + \\\n \" Value: \" + str(g.Value) + \" \\\\ \" + str(f.Value))\n instance.incOrigLength()\n instance.incNewLength()\n if (instance.summary != None):\n instanceList.append(instance.summary())\n return instanceList[1]\n\nclass ctrlDiffBlock(diffBlock):\n def getRange(self):\n debug(\"Printing ctrlDiffBlock Range\")\n debug(str(self.origStart) + \" \" + str(self.origLength) + \" \" + str(self.newStart) + \" \" + str(self.newLength))\n return self.origStart, self.origLength, \\\n self.newStart, self.newLength\n\n def getSummary(self, adj=0):\n origStart = self.origStart + adj\n newStart = self.newStart + adj\n DataDiffs = []\n CtrlDiffs = []\n instanceList = []\n\n debug(\"ctrlDiffBlock getSummaryCall: \" + str(adj))\n\n izip = itertools.zip_longest(self.origLines, self.newLines)\n\n instance = diffInstance(0,0,0,0)\n for i, (g, f) in enumerate(izip):\n if g and f:\n if instance.type != 2:\n if (instance.summary != None):\n instanceList.append(instance.summary(self.preDiff, self.postDiff))\n instance = diffInstance(2, origStart, newStart, i)\n instance.add(\"Ctrl Diff: ID: \" + str(g[1:]) + \" \\\\ \" + str(f[1:]))\n instance.incOrigLength()\n instance.incNewLength()\n if g and not f:\n if instance.type != 2:\n if (instance.summary != None):\n instanceList.append(instance.summary(self.preDiff, self.postDiff))\n instance = diffInstance(2, origStart, newStart, i)\n instance.add(\"Ctrl Diff: ID: \" + str(g[1:]) + \" \\\\ None\")\n instance.incOrigLength()\n if f and not g:\n if instance.type != 2:\n if (instance.summary != None):\n instanceList.append(instance.summary(self.preDiff, self.postDiff))\n instance = diffInstance(2, origStart, newStart, i)\n instance.add(\"Ctrl Diff: ID: \" + \"None \\\\ \" + str(f[1:]))\n instance.incNewLength()\n if (instance.summary != None):\n instanceList.append(instance.summary(self.preDiff, self.postDiff))\n return instanceList[1]\n\ndef removeRangeFromLines(goldenLines, faultyLines, xxx_todo_changeme, adj = 0):\n (gStart, gLength, fStart, fLength) = xxx_todo_changeme\n global goldenRemovedCount\n global faultyRemovedCount\n\n debug(\"\\n\\nRemovingRangeFromLines()\")\n\n i = 0\n debug(\"GLen \"+ str(gLength))\n debug(\"GStart \" + str(gStart))\n while (i < gLength):\n goldenLines[gStart+i-1] = \"\"\n i += 1\n debug(str(i))\n goldenRemovedCount.append((gStart + adj, gLength))\n i = 0\n debug(\"FLen \" +str(fLength))\n debug(\"FStart \" + str(fStart))\n debug(\"FLines\")\n debug(\"\\n\".join(faultyLines))\n while (i < fLength):\n faultyLines[fStart+i-1] = \"\"\n i += 1\n debug(str(i))\n faultyRemovedCount.append((fStart + adj, fLength))\n debug(\"\\nGolden after removal::::\")\n debug('\\n'.join(goldenLines))\n debug(\"\\nFaulty After removal::::\")\n debug('\\n'.join(faultyLines))\n return goldenLines, faultyLines\n\ndef findAdjustedPosition(position, remArray):\n i = 0\n while i < len(remArray):\n location, count = remArray[i]\n if position >= location:\n position = position + count\n else:\n return position\n i += 1\n return position\n\n\nclass diffInstance:\n def __init__(self, insttype, origstart, newstart, adj):\n debug(\"\\t\\tCreating a diffInstance\")\n self.origStart = origstart + adj\n self.origLength = 0\n self.origEnd = 0\n self.newStart = newstart + adj\n self.newLength = 0\n self.newEnd = 0\n self.type = insttype\n self.lines = []\n\n def add(self, line):\n self.lines.append(line)\n\n def summary(self, preDiff=None, postDiff=None):\n if len(self.lines) > 0:\n self.origEnd = self.origStart + self.origLength\n self.newEnd = self.newStart + self.newLength\n header = \"\\nDiff@ inst # \" + str(self.origStart) + \"\\\\\" + str(self.newStart) \\\n + \" -> inst # \" + str(self.origEnd) + \"\\\\\" + str(self.newEnd) + \"\\n\"\n if preDiff != None:\n header += \"Pre Diff: ID: \" + str(preDiff) + \"\\n\"\n if postDiff != None:\n final = header + '\\n'.join(self.lines) + \"\\nPost Diff: ID:\" + postDiff\n else:\n final = header + '\\n'.join(self.lines)\n return final\n else:\n return None\n\n def incOrigLength(self):\n self.origLength += 1\n\n def incNewLength(self):\n self.newLength += 1\n\n\nclass diffReport:\n def __init__(self, goldenLines, faultyLines, startPoint, injectedID):\n self.injectedID = injectedID\n debug(\"Starting a diffReport, startpoint = \" + str(startPoint))\n self.startPoint = startPoint\n self.blocks = []\n\n #perform ctrl diff analysis\n goldenIDs = goldenLines[:]\n faultyIDs = faultyLines[:]\n goldenIDs = trimLinesToCtrlIDs(goldenIDs)\n faultyIDs = trimLinesToCtrlIDs(faultyIDs)\n\n #This ugly hack forces the difflib routine to prioritize certain ctrl flow matches.\n# TODO: the fundamental problem here is unix diff is not greedy\n# so we might need to come up with a comprehensive fix\n# The hack might not always work. Jiesheng\n\n i = 0\n while i < len(goldenIDs):\n if i < len(faultyIDs) and goldenIDs[i] == faultyIDs[i]:\n goldenIDs[i] = \"S\" + goldenIDs[i]\n faultyIDs[i] = \"S\" + faultyIDs[i]\n else:\n break\n i += 1\n\n ctrldiff = list(difflib.unified_diff(goldenIDs[:], faultyIDs[:], n=1, lineterm=''))\n\n if ctrldiff:\n ctrldiff.pop(0)\n ctrldiff.pop(0)\n\n debug(\"\\n\".join(ctrldiff))\n debug(\"Length = \" + str(len(ctrldiff)))\n\n i = 0\n length = 1\n start = None\n\n while (i < len(ctrldiff)):\n if \"@@ \" in ctrldiff[i]:\n if start != None:\n debug(\"Calling ctrlDiffBlock constructor \" + str(start) + \" \" + str(length))\n debug(\"\\n\".join(ctrldiff[start:start+length]))\n cblock = ctrlDiffBlock(ctrldiff[start:start+length])\n self.blocks.append(cblock)\n length = 1\n start = i\n else:\n length += 1\n i += 1\n #Dont forget the last block in the diff!\n if start != None:\n debug(\"Calling ctrlDiffBlock constructor \" + str(start) + \" \" + str(length))\n debug(\"\\n\".join(ctrldiff[start:start+length]))\n cblock = ctrlDiffBlock(ctrldiff[start:start+length])\n self.blocks.append(cblock)\n\n debug(\"Golden Lines:\\n\" + \"\\n\".join(goldenLines))\n debug(\"Faulty Lines:\\n\" + \"\\n\".join(faultyLines))\n\n debug(\"Removing ctrldiff ranges from lines\")\n for block in self.blocks:\n goldenLines, faultyLines = removeRangeFromLines(goldenLines, faultyLines, \\\n block.getRange(), self.startPoint)\n debug(\"Golden Lines:\\n\" + \"\\n\".join(goldenLines))\n debug(\"Faulty Lines:\\n\" + \"\\n\".join(faultyLines))\n\n goldenLines = [_f for _f in goldenLines if _f]\n faultyLines = [_f for _f in faultyLines if _f]\n\n\n datadiff = list(difflib.unified_diff(goldenLines, faultyLines, n=0, lineterm=''))\n\n if datadiff:\n datadiff.pop(0)\n datadiff.pop(0)\n\n #perform data diff analysis\n i = 0\n length = 1\n start = None\n\n while (i < len(datadiff)):\n if \"@@ \" in datadiff[i]:\n if start != None:\n self.blocks.append(diffBlock(datadiff[start:start+length]))\n length = 1\n start = i\n else:\n length += 1\n i += 1\n #Dont forget the last block in the diff!\n if start != None:\n self.blocks.append(diffBlock(datadiff[start:start+length]))\n\n\n def printSummary(self):\n #Sort the list of blocks by their starting point (wrt the golden trace)\n self.blocks.sort(key = lambda x: (x.getSummary(self.startPoint)).split(\"\\n\")[1].replace('\\\\',' ').split()[3])\n\n for block in self.blocks:\n if block.preDiff == None:\n block.preDiff = self.injectedID\n print(block.getSummary(self.startPoint))\n\ndef trimLinesToCtrlIDs(lines):\n i = 0\n while (i < len(lines)):\n words = lines[i].split()\n lines[i] = words[1]\n i += 1\n return lines\n\n\n\nclass diffLine:\n def __init__(self, rawLine):\n self.raw = rawLine\n elements = str(rawLine).split()\n #ID: 14\\tOPCode: sub\\tValue: 1336d337\n debug(\"RAWLINE: \" + str(rawLine), 3)\n assert (elements[0] in [\"ID:\",\"-ID:\",\"+ID:\"] and elements[2] == \"OPCode:\" and \\\n elements[4] == \"Value:\"), \"DiffLine constructor called incorrectly\"\n self.ID = int(elements[1])\n self.OPCode = str(elements[3])\n self.Value = 0\n if (len(elements) > 5):\n \tself.Value = int(elements[5],16)\n\n def _print(self):\n print(\"ID:\",self.ID, \"OPCode\", self.OPCode, \"Value:\", self.Value)\n\n def __str__(self):\n return self.raw\n\nclass faultReport:\n def __init__(self, lines):\n self.instNumber = -1\n self.faultCount = -1\n self.faultID = -1\n self.faultOPCode = ''\n self.goldValue = -1\n self.faultValues = []\n self.diffs = []\n\n if lines[0] == \"#FaultReport\\n\":\n header = lines[1].split()\n self.faultCount = int(header[0])\n self.instNumber = header[2]\n\n fault = lines[2].split()\n self.faultID = int(fault[1])\n self.faultOPCode = fault[3]\n self.goldValue = fault[5]\n for i in range(self.faultCount):\n self.faultValues.append(fault[7 + i])\n\n i = 3\n while (i < len(lines)):\n if \"Diff\" not in lines[i]:\n break\n else:\n string = str(lines[i])\n if \"@\" in lines[i]:\n string = \"\\n\" + string\n self.diffs.append(string)\n i += 1\n\n else:\n print(\"ERROR: Not a properly formed faultReport\")\n\n def union(self, other):\n if self.faultID == other.faultID:\n self.faultCount += other.faultCount\n self.diffs.extend(other.diffs)\n self.faultValues.extend(other.faultValues)\n\n def report(self):\n lines = []\n lines.append(\"#FaultReport\\n\")\n header = str(self.faultCount) + \" @ \" + str(self.instNumber) + \"\\n\"\n lines.append(header)\n faultline = \"ID: \" + str(self.faultID) + \" OPCode: \" + str(self.faultOPCode)\n faultline += \" Value: \" + str(self.goldValue) + \" / \"\n for val in self.faultValues:\n faultline += \" \" + str(val)\n faultline += '\\n'\n lines.append(faultline)\n lines.extend(self.diffs)\n return ''.join(lines)\n\n def getAffectedSet(self):\n affectedInsts = set()\n for diff in self.diffs:\n if \"@\" in diff:\n continue\n else:\n split = diff.split()\n if \"Data\" in diff:\n affectedInsts.add(int(split[3]))\n# if \"Ctrl\" in diff: #Commenting out to remove ctrl diff\n# if split[5] != \"None\": #affected instructions from being \n# affectedInsts.add(int(split[5])) #coloured on the graph\n if (int(self.faultID) in affectedInsts):\n affectedInsts.remove(int(self.faultID))\n return affectedInsts\n\n def getAffectedEdgesSet(self):\n affectedEdges = set()\n\n i = 0\n while i+1 < len(self.diffs):\n if \"Diff@\" in self.diffs[i] and \"Pre Diff\" in self.diffs[i+1]:\n csplit = self.diffs[i+2].split()\n edgeStart = int(self.diffs[i+1].split()[3])\n edgeEnd = None\n if csplit[5] != \"None\":\n edgeEnd = int(csplit[5])\n if (i+3 < len(self.diffs)):\n affectedEdges.add((edgeEnd, int(self.diffs[i+3].split()[5])))\n else:\n d = i + 2 #Adjusting so we dont check the find the pre diff of the diff@ instance we\n #are currently on.\n while d < len(self.diffs):\n if \"Post Diff\" in self.diffs[d]:\n edgeEnd = self.diffs[d].split()[3]\n d = len(self.diffs)\n elif \"Pre Diff\" in self.diffs[d]:\n d = len(self.diffs) #If we found a new ctrl diff block before finding a post diff, \n #exit the loop early\n d += 1\n affectedEdges.add((edgeStart, edgeEnd))\n i += 1\n\n return affectedEdges\n\ndef parseFaultReportsfromFile(target):\n reports = []\n reportFile = open(target, 'r')\n fileLines = reportFile.readlines()\n\n #Remove blank lines from list\n i = 0\n length = len(fileLines)\n while i < length:\n if not fileLines[i].strip():\n fileLines.pop(i)\n length -= 1\n i += 1\n\n #Parse the faultReports\n i = 0\n fileLineCount = len(fileLines)\n\n while (i < fileLineCount):\n if \"#FaultReport\" in fileLines[i]:\n temp = []\n temp.append(fileLines[i])\n i += 1\n while (\"#FaultReport\" not in fileLines[i]):\n temp.append(fileLines[i])\n i += 1\n if i >= fileLineCount:\n break\n reports.append(faultReport(temp))\n else:\n i += 1\n if i >= fileLineCount:\n break\n\n return reports\n\n" }, { "alpha_fraction": 0.7650063633918762, "alphanum_fraction": 0.7650063633918762, "avg_line_length": 25.100000381469727, "blob_id": "3be68e182a884ddea0c8cd3511bacfd8fab8aa7b", "content_id": "45fbc4a889be185d892549bafaed3c674990703e", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 783, "license_type": "permissive", "max_line_length": 86, "num_lines": 30, "path": "/gui/application/CompileToIrController.java", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "package application;\n\nimport java.net.URL;\nimport java.util.ResourceBundle;\n\nimport javafx.event.ActionEvent;\nimport javafx.fxml.FXML;\nimport javafx.fxml.Initializable;\nimport javafx.scene.Node;\nimport javafx.scene.control.Label;\nimport javafx.scene.input.KeyEvent;\nimport javafx.stage.Stage;\nimport application.Controller;\n\npublic class CompileToIrController implements Initializable{\n\t@FXML\n\tprivate Label compileFileText;\n\t@FXML\n\tprivate void onClickOkHandler(ActionEvent event){\n\t\tNode source = (Node) event.getSource(); \n\t Stage stage = (Stage) source.getScene().getWindow();\n\t stage.close();\n\t}\n\t@Override\n\tpublic void initialize(URL url, ResourceBundle rb) {\n\t\tcompileFileText.setText(Controller.currentProgramFolder+\".ll has been created !!!\");\n\t // TODO\n\t\t\n\t}\n}\n" }, { "alpha_fraction": 0.6565040946006775, "alphanum_fraction": 0.6585366129875183, "avg_line_length": 22.428571701049805, "blob_id": "b5c5c2c68f74399ce0b786008e54f2d4f45dfba3", "content_id": "61494013d4694c7e3165105bdd0e9af927dc4439", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 984, "license_type": "permissive", "max_line_length": 76, "num_lines": 42, "path": "/llvm_passes/hardware_failures/FuncNameFIInstSelector.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FUNC_NAME_FI_INST_SELECTOR_H\n#define FUNC_NAME_FI_INST_SELECTOR_H\n#include <set>\n#include <string>\n\n#include \"FIInstSelector.h\"\n\nusing namespace llvm;\nnamespace llfi {\n\nclass FuncNameFIInstSelector: public HardwareFIInstSelector {\n public:\n FuncNameFIInstSelector(std::set<std::string> *funclist) {\n this->funclist = funclist;\n }\n FuncNameFIInstSelector() {\n delete funclist;\n }\n virtual void getCompileTimeInfo(std::map<std::string, std::string>& info){\n info[\"failure_class\"] = \"HardwareFault\";\n info[\"failure_mode\"] = \"SpecifiedFunctions\";\n for(std::set<std::string>::iterator SI = funclist->begin();\n SI != funclist->end(); SI++){\n info[\"targets\"] += *SI + \"()/\";\n }\n //remove the '/' at the end\n info[\"targets\"] = info[\"targets\"].substr(0, info[\"targets\"].length()-1);\n info[\"injector\"] = \"<fi_type>\";\n }\n\n private:\n virtual bool isInstFITarget(Instruction* inst);\n\n private:\n std::set<std::string> *funclist;\n};\n\n}\n\n\n\n#endif\n" }, { "alpha_fraction": 0.5550888776779175, "alphanum_fraction": 0.5554119348526001, "avg_line_length": 34.988372802734375, "blob_id": "5bbc010428971fb7322f19eaa62e6ad4e320fc02", "content_id": "796a04831f486660b00ac3599a4ec079cd77c5ec", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3095, "license_type": "permissive", "max_line_length": 110, "num_lines": 86, "path": "/llvm_passes/software_failures/_Timing_HighFrequentEventSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/Pass.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/ADT/Statistic.h\"\n#include \"llvm/Support/CFG.h\"\n#include \"llvm/ADT/DepthFirstIterator.h\"\n#include \"llvm/ADT/GraphTraits.h\"\n\n#include \"FIInstSelector.h\"\n#include \"FICustomSelectorManager.h\"\n#include \"_SoftwareFaultRegSelectors.h\"\n\n#include <fstream>\n#include <iostream>\n#include <set>\n#include <string>\n\nusing namespace llvm;\nnamespace llfi {\n class _Timing_HighFrequentEventInstSelector : public SoftwareFIInstSelector {\n public:\n _Timing_HighFrequentEventInstSelector() {\n if (funcNames.size() == 0) {\n funcNames.insert(std::string(\"fread\"));\n funcNames.insert(std::string(\"fopen\"));\n funcNames.insert(std::string(\"fwrite\"));\n }\n }\n \n virtual void getCompileTimeInfo(std::map<std::string, std::string>& info){\n info[\"failure_class\"] = \"Timing\";\n info[\"failure_mode\"] = \"HighFrequentEvent\";\n for(std::set<std::string>::iterator SI = funcNames.begin(); SI != funcNames.end(); SI++) {\n info[\"targets\"] += *SI + \"()/\";\n }\n info[\"targets\"] += \"return\";\n info[\"injector\"] = \"SleepInjector\";\n }\n\n private:\n static std::set<std::string> funcNames;\n\n virtual bool isInstFITarget(Instruction* inst) {\n if (isa<CallInst>(inst)) {\n CallInst* CI = dyn_cast<CallInst>(inst);\n Function* called_func = CI->getCalledFunction();\n if (called_func == NULL) {\n return false;\n }\n std::string func_name = std::string(called_func->getName());\n if (funcNames.find(func_name) != funcNames.end()) {\n return true;\n } else {\n return false;\n }\n } else {\n return isa<ReturnInst>(inst);\n }\n }\n };\n \n std::set<std::string> _Timing_HighFrequentEventInstSelector::funcNames;\n\n class _Timing_HighFrequentEventRegSelector : public SoftwareFIRegSelector {\n private:\n virtual bool isRegofInstFITarget(Value *reg, Instruction *inst) {\n if (isa<CallInst>(inst)) {\n CallInst* CI = dyn_cast<CallInst>(inst);\n Function* called_func = CI->getCalledFunction();\n if (called_func == NULL) {\n return false;\n }\n return reg == CI; // selects dst register\n } else if (isa<ReturnInst>(inst)) {\n ReturnInst* RI = dyn_cast<ReturnInst>(inst);\n return reg == RI->getReturnValue();\n } else {\n return false;\n }\n }\n };\n \n static RegisterFIInstSelector A(\"HighFrequentEvent(Timing)\", new _Timing_HighFrequentEventInstSelector());\n static RegisterFIRegSelector B(\"HighFrequentEvent(Timing)\", new _Timing_HighFrequentEventRegSelector());\n}\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 17.736841201782227, "blob_id": "ea6a5b65eded7705c5c10620c71f4d85f1b15f6d", "content_id": "9ec1f8c4323a1ffedfc0836cc3c0c7f3019fb205", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 355, "license_type": "permissive", "max_line_length": 56, "num_lines": 19, "path": "/web-app/views/src/js/components/body.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar Tabs = require('./tabs');\nvar Tools = require('./tools');\nvar WindowLayout = require('./mainWindow/windowLayout');\n\nvar Body = React.createClass({\n\trender: function() {\n\t\treturn (\n\t\t\t<div className = \"body\">\n\t\t\t\t<Tabs></Tabs>\n\t\t\t\t<Tools></Tools>\n\t\t\t\t<hr/>\n\t\t\t\t<WindowLayout/>\n\t\t\t</div>\n\t\t);\n\t}\n});\n\nmodule.exports = Body;" }, { "alpha_fraction": 0.7435897588729858, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 21.428571701049805, "blob_id": "2084130a3f37efcde220979a422ba26da903787b", "content_id": "887301202f99d53efd343aa37bca06dfcc7f1825", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 156, "license_type": "permissive", "max_line_length": 49, "num_lines": 7, "path": "/web-app/views/src/js/actions/injectionModeActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar injectionModeActions = Reflux.createActions([\n 'injectionModeChange'\n ]);\n\nmodule.exports = injectionModeActions;" }, { "alpha_fraction": 0.5344827771186829, "alphanum_fraction": 0.5344827771186829, "avg_line_length": 15.619047164916992, "blob_id": "8c9727c14654f572c40d138cf3dc9dd55d25d0cf", "content_id": "f6c54efa1503dda4ad207423e7d7c68d774de7e6", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 348, "license_type": "permissive", "max_line_length": 33, "num_lines": 21, "path": "/web-app/views/src/js/components/tabs.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\n\n\nvar Tabs = React.createClass({\n\trender: function() {\n\t\treturn (\n\t\t\t<div className = \"tabs\">\n\t\t\t\t<ul className=\"nav nav-tabs\">\n\t\t\t\t\t<li className=\"active\">\n\t\t\t\t\t\t<a href=\"#\">Standard</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li className=\"disabled\">\n\t\t\t\t\t\t<a>Advanced</a>\n\t\t\t\t\t</li>\n\t\t\t\t</ul>\n\t\t\t</div>\n\t\t);\n\t}\n});\n\nmodule.exports = Tabs;" }, { "alpha_fraction": 0.6416058540344238, "alphanum_fraction": 0.6518248319625854, "avg_line_length": 28.782608032226562, "blob_id": "eb3f22889f001607d00004cbce5d2855f4469dbb", "content_id": "7d388ebca612d2ef369e6e680328052b02d710cd", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1370, "license_type": "permissive", "max_line_length": 79, "num_lines": 46, "path": "/runtime_lib/ProfilingLib.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <assert.h>\n\n#include \"Utils.h\"\n\nstatic long long opcodecount[OPCODE_CYCLE_ARRAY_LEN] = {0};\n\nvoid doProfiling(int opcode) {\n assert(opcodecount[opcode] >= 0 && \n \"dynamic instruction number too large to be handled by llfi\");\n opcodecount[opcode]++;\n}\n\nvoid endProfiling() {\n FILE *profileFile;\n char profilefilename[80] = \"llfi.stat.prof.txt\";\n profileFile = fopen(profilefilename, \"w\");\n if (profileFile == NULL) {\n fprintf(stderr, \"ERROR: Unable to open profiling result file %s\\n\", \n profilefilename);\n exit(1);\t\n }\n\n int opcode_cycle_arr[OPCODE_CYCLE_ARRAY_LEN];\n getOpcodeExecCycleArray(OPCODE_CYCLE_ARRAY_LEN, opcode_cycle_arr);\n\n unsigned i = 0;\n long long total_cycle = 0;\n for (i = 0; i < 100; ++i) {\n assert(total_cycle >= 0 && \n \"total dynamic instruction cycle too large to be handled by llfi\");\n if (opcodecount[i] > 0) {\t\n assert(opcode_cycle_arr[i] >= 0 && \n \"opcode does not exist, need to update instructions.def\");\n total_cycle += opcodecount[i] * opcode_cycle_arr[i];\n }\n }\n\n fprintf(profileFile, \"# do not edit\\n\");\n fprintf(profileFile, \n \"# cycle considered the execution cycle of each instruction type\\n\");\n fprintf(profileFile, \"total_cycle=%lld\\n\", total_cycle);\n\tfclose(profileFile); \n}\n" }, { "alpha_fraction": 0.7537091970443726, "alphanum_fraction": 0.7537091970443726, "avg_line_length": 27.16666603088379, "blob_id": "4e2f223d7a61aef757c723fa47ad0f2294f677ea", "content_id": "fe17d531ddaf3eba7828184b81f050ac3d2961bc", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 337, "license_type": "permissive", "max_line_length": 74, "num_lines": 12, "path": "/web-app/views/src/js/stores/targetFileNameStore.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\nvar targetFileNameActions = require(\"./../actions/targetFileNameActions\");\nvar fileName ='';\nvar targetFileNameStore = Reflux.createStore({\n\tlistenables: [targetFileNameActions],\n\tonChangeFileName: function(Name) {\n\t\tfileName = Name;\n\t\tthis.trigger(fileName);\n\t}\n});\n\nmodule.exports = targetFileNameStore;" }, { "alpha_fraction": 0.6514523029327393, "alphanum_fraction": 0.652282178401947, "avg_line_length": 23.079999923706055, "blob_id": "f95c7796330267abbb23ff358a984838d198b076", "content_id": "d98f6df3d4a40b10ad652d121f683391e3b41dc0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1205, "license_type": "permissive", "max_line_length": 76, "num_lines": 50, "path": "/llvm_passes/core/GenLLFIIndexPass.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/Pass.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/Support/InstIterator.h\"\n#include \"llvm/IR/Instruction.h\"\n#include <cstdio>\n\n#include \"Utils.h\"\n\nusing namespace llvm;\nnamespace llfi {\nclass GenLLFIIndexPass: public ModulePass {\n public:\n GenLLFIIndexPass() : ModulePass(ID) {}\n\tvirtual bool runOnModule(Module &M);\n\tstatic char ID;\n};\n\nchar GenLLFIIndexPass::ID = 0;\nstatic RegisterPass<GenLLFIIndexPass> X(\n \"genllfiindexpass\", \"Generate a unique LLFI index for each instruction\",\n false, false);\n\nbool GenLLFIIndexPass::runOnModule(Module &M) {\n Instruction *currinst;\n\n for (Module::iterator m_it = M.begin(); m_it != M.end(); ++m_it) {\n if (!m_it->isDeclaration()) {\n //m_it is a function \n for (inst_iterator f_it = inst_begin(m_it); f_it != inst_end(m_it);\n ++f_it) {\n currinst = &(*f_it);\n setLLFIIndexofInst(currinst);\n }\n }\n }\n \n if (currinst) {\n long totalindex = getLLFIIndexofInst(currinst);\n FILE *outputFile = fopen(\"llfi.stat.totalindex.txt\", \"w\");\n if (outputFile)\n fprintf(outputFile, \"totalindex=%ld\\n\", totalindex);\n\n fclose(outputFile);\n }\n\n return true;\n}\n\n}\n\n" }, { "alpha_fraction": 0.6225225329399109, "alphanum_fraction": 0.6225225329399109, "avg_line_length": 25.452381134033203, "blob_id": "0f2aa4cfeb8df504e3c7f6354516709b8fc3277a", "content_id": "a96093c19909254e32d2b79f9b0873657b77ad15", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1110, "license_type": "permissive", "max_line_length": 80, "num_lines": 42, "path": "/web-app/views/src/js/components/mainWindow/bottomPannel/outputTabs/profilingStatus.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar profilingStatusStore = require(\"./../../../../stores/profilingStatusStore\");\nvar Reflux = require(\"reflux\");\n\nvar ProfilingStatus = React.createClass({\n\tmixins: [Reflux.connect(profilingStatusStore,\"profilingStatus\")],\n\tgetInitialState: function() {\n\t\treturn {\n\t\t\tprofilingStatus: []\n\t\t};\n\t},\n\trender: function() {\n\t\tvar className = \"profilingStatus\" + (this.props.shouldDisplay ? \"\" : \" hide\");\n\t\tvar profilingRows = this.state.profilingStatus.map(function(data, index) {\n\t\t\treturn (\n\t\t\t\t<tr key={index}>\n\t\t\t\t\t<td class=\"failureType\">{data.type}</td>\n\t\t\t\t\t<td class=\"lastIndex\">{data.lastIndex}</td>\n\t\t\t\t\t<td class=\"lastCycle\">{data.lastCycle}</td>\n\t\t\t\t</tr>\n\t\t\t);\n\t\t});\n\t\treturn (\n\t\t\t<div class={className}>\n\t\t\t\t<table class=\"table table-hover profilingStatusTable\">\n\t\t\t\t\t<thead>\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<th class=\"failureType\">Failure Type</th>\n\t\t\t\t\t\t\t<th class=\"lastIndex\">Last Index</th>\n\t\t\t\t\t\t\t<th class=\"lastCycle\">Last Cycle</th>\n\t\t\t\t\t\t</tr>\n\t\t\t\t\t</thead>\n\t\t\t\t\t<tbody>\n\t\t\t\t\t\t{profilingRows}\n\t\t\t\t\t</tbody>\n\t\t\t\t</table>\n\t\t\t</div>\n\t\t);\n\t}\n});\n\nmodule.exports = ProfilingStatus;" }, { "alpha_fraction": 0.578820526599884, "alphanum_fraction": 0.5813722610473633, "avg_line_length": 29.669565200805664, "blob_id": "a1d421ce6903d6cc56d07abb87a14a32e990e6be", "content_id": "3206dbfea244b488752aa4af216e367cac4b9102", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7054, "license_type": "permissive", "max_line_length": 134, "num_lines": 230, "path": "/llvm_passes/core/LLFIDotGraphPass.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <vector>\n#include <cmath>\n#include <iostream>\n#include <fstream>\n#include <string>\n\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/DerivedTypes.h\"\n#include \"llvm/IR/GlobalValue.h\"\n#include \"llvm/Pass.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/Support/Debug.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Support/InstIterator.h\"\n#include \"llvm/Support/CommandLine.h\"\n#include \"llvm/IR/DataLayout.h\"\n#include \"llvm/DebugInfo.h\"\n#include \"Utils.h\"\n\n#define DATADEPCOLOUR \"blue\"\n\nusing namespace llvm;\n\nnamespace llfi {\n\nstruct instNode {\n std::string name, label; \n Instruction *raw;\n std::string dotNode();\n instNode(Instruction *target);\n};\n\ninstNode::instNode(Instruction *target) {\n raw = target;\n\n long llfiID = llfi::getLLFIIndexofInst(target);\n name = \"llfiID_\" + longToString(llfiID);\n FILE *outputFile = fopen(\"llfi.index.map.txt\", \"a\");\n\n label = std::string(\" [shape=record,label=\\\"\") + longToString(llfiID);\n label += std::string(\"\\\\n\") + target->getOpcodeName() + \"\\\\n\";\n if (target->getDebugLoc().getLine()) {\n label += \"(Line #: \" + intToString(target->getDebugLoc().getLine()) + \")\\\\n\";\n if (MDNode *N= target->getMetadata(\"dbg\")){\n label += \"(In File: \" + DILocation (N).getFilename().str().substr(DILocation (N).getFilename().str().find_last_of(\"/\\\\\")+1)+\")\";\n }\n if (outputFile)\n fprintf(outputFile, \"%s line_%s\\n\", name.c_str(),intToString(target->getDebugLoc().getLine()).c_str());\n }\n else{\n if (outputFile)\n fprintf(outputFile, \"%s line_N/A\\n\", name.c_str());\n }\n label += \"\\\"]\";\n}\n\nstd::string instNode::dotNode() {\n return name + label;\n}\n\nstruct bBlockGraph {\n BasicBlock* raw;\n std::string name;\n std::string funcName;\n std::vector<instNode> instNodes;\n Instruction* entryInst;\n Instruction* exitInst;\n bBlockGraph(BasicBlock *target);\n bool addInstruction(Instruction* inst);\n bool writeToStream(std::ofstream &target);\n};\n\nbBlockGraph::bBlockGraph(BasicBlock *BB) {\n raw = BB;\n name = BB->getName().str();\n funcName = BB->getParent()->getName().str();\n BasicBlock::iterator lastInst;\n for (BasicBlock::iterator instIterator = BB->begin(),\n lastInst = BB->end();\n instIterator != lastInst;\n ++instIterator) {\n\n Instruction *inst = instIterator;\n\n addInstruction(inst);\n }\n entryInst = &(BB->front());\n exitInst = &(BB->back());\n}\nbool bBlockGraph::addInstruction(Instruction* inst) {\n instNodes.push_back(instNode(inst));\n\n return true;\n}\n\nbool bBlockGraph::writeToStream(std::ofstream &target) {\n target << \"subgraph cluster_\" << funcName << \"_\" << name << \" {\\n\";\n target << \"label = \\\"\" << funcName << \"_\" << name << \"\\\";\\n\";\n for (unsigned int i = 0; i < instNodes.size(); i++) {\n target << instNodes.at(i).dotNode() << \";\\n\";\n }\n target << \"}\\n\";\n for (unsigned int i = 1; i < instNodes.size(); i++) {\n target << instNodes.at(i-1).name << \" -> \" << instNodes.at(i).name << \";\\n\";\n }\n return true;\n}\n\nstruct llfiDotGraph : public FunctionPass {\n static char ID;\n std::ofstream outfs;\n llfiDotGraph() : FunctionPass(ID) {}\n\n virtual bool doInitialization(Module &M) {\n outfs.open(\"llfi.stat.graph.dot\", std::ios::trunc);\n outfs << \"digraph \\\"LLFI Program Graph\\\" {\\n\";\n\n return false;\n }\n\n virtual bool doFinalization(Module &M) {\n outfs << \"{ rank = sink;\"\n \"Legend [shape=none, margin=0, label=<\"\n \"<TABLE BORDER=\\\"0\\\" CELLBORDER=\\\"1\\\" CELLSPACING=\\\"0\\\" CELLPADDING=\\\"4\\\">\"\n \" <TR>\"\n \" <TD COLSPAN=\\\"2\\\"><B>Legend</B></TD>\"\n \" </TR>\"\n \" <TR>\"\n \" <TD>Correct Control Flow</TD>\"\n \" <TD><FONT COLOR=\\\"black\\\"> solid arrow </FONT></TD>\"\n \" </TR>\"\n \" <TR>\"\n \" <TD>Data Dependancy</TD>\"\n \" <TD><FONT COLOR=\\\"blue\\\"> solid arrow </FONT></TD>\"\n \" </TR>\"\n \" <TR>\"\n \" <TD>Error Propogation Flow</TD>\"\n \" <TD><FONT COLOR=\\\"red\\\">solid arrow </FONT></TD>\"\n \" </TR>\"\n \" <TR>\"\n \" <TD>The Affected Instruction(s) by Fault Injection </TD>\"\n \" <TD BGCOLOR=\\\"YELLOW\\\"></TD>\"\n \" </TR>\"\n \" <TR>\"\n \" <TD>The Instruction(s) LLFI Injects Faults to</TD>\"\n \" <TD BGCOLOR=\\\"red\\\"></TD>\"\n \" </TR>\"\n \"</TABLE>\"\n \">];\"\n \"}\";\n outfs << \"}\\n\";\n outfs.close();\n return false;\n }\n\n virtual bool runOnFunction(Function &F) {\n //Create handles to the functions parent module and context\n LLVMContext &context = F.getContext();\n\n std::vector<bBlockGraph> blocks;\n\n Function::iterator lastBlock;\n //iterate through each basicblock of the function\n for (Function::iterator blockIterator = F.begin(), lastBlock = F.end();\n blockIterator != lastBlock; ++blockIterator) {\n\n BasicBlock* block = blockIterator;\n\n bBlockGraph b(block);\n blocks.push_back(b);\n }\n for (unsigned int i = 0; i < blocks.size(); i++) {\n bBlockGraph currBlock = blocks.at(i);\n for (unsigned int i = 0; i < currBlock.instNodes.size(); i++) {\n Instruction *inst = currBlock.instNodes.at(i).raw;\n std::string nodeName = currBlock.instNodes.at(i).name;\n instNode node = currBlock.instNodes.at(i);\n if (!inst->use_empty()) {\n // TODO: optimize the algorithm below later\n for (value_use_iterator<User> useIter = inst->use_begin();\n useIter != inst->use_end(); useIter++) {\n Value* userValue = *useIter;\n for (unsigned int f = 0; f < blocks.size(); f++) {\n bBlockGraph searchBlock = blocks.at(f);\n for (unsigned int d = 0; d < searchBlock.instNodes.size(); d++) {\n Instruction* targetInst = searchBlock.instNodes.at(d).raw;\n if (userValue == targetInst) {\n instNode targetNode = searchBlock.instNodes.at(d);\n outfs << nodeName << \" -> \" << targetNode.name;\n outfs << \" [color=\\\"\" << DATADEPCOLOUR << \"\\\"];\\n\";\n }\n }\n }\n }\n }\n }\n }\n\n for (unsigned int i = 0; i < blocks.size(); i++) {\n bBlockGraph block = blocks.at(i);\n block.writeToStream(outfs);\n if (block.exitInst->getOpcode() == Instruction::Br) {\n BranchInst* exitInst = (BranchInst*)block.exitInst;\n for (unsigned int s = 0; s < exitInst->getNumSuccessors(); s++) {\n BasicBlock* succ = exitInst->getSuccessor(s);\n for (unsigned int d = 0; d < blocks.size(); d++) {\n if (blocks.at(d).raw == succ) {\n std::string from = block.instNodes.back().name;\n std::string to = blocks.at(d).instNodes.front().name;\n outfs << from << \" -> \" << to << \";\\n\";\n }\n }\n }\n }\n }\n\n return false;\n }\n\n};\n\n//Register the pass with the llvm\nchar llfiDotGraph::ID = 0;\nstatic RegisterPass<llfiDotGraph> X(\"dotgraphpass\", \n \"Outputs a dot graph of instruction execution at runtime\", false, false);\n\n}\n" }, { "alpha_fraction": 0.7360000014305115, "alphanum_fraction": 0.7440000176429749, "avg_line_length": 27.923076629638672, "blob_id": "876dbdd69f3fb4e16f7f3ad9910dd528aa2e4c09", "content_id": "cfa45013c7b9e73456619f8429302c7ec5792072", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 375, "license_type": "permissive", "max_line_length": 70, "num_lines": 13, "path": "/web-app/views/src/js/stores/faultSummaryStore.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\nvar faultSummaryActions = require(\"./../actions/faultSummaryActions\");\nvar faultSummary = {SDC: 0, Hanged: 0, Crashed: 0};\nvar faultSummaryStore = Reflux.createStore({\n\tlistenables: [faultSummaryActions],\n\n\tonUpdateFaultSummary: function(data) {\n\t\tfaultSummary = data;\n\t\tthis.trigger(faultSummary);\n\t},\n});\n\nmodule.exports = faultSummaryStore;" }, { "alpha_fraction": 0.7250000238418579, "alphanum_fraction": 0.7250000238418579, "avg_line_length": 40, "blob_id": "36884e1226fd303ff591e60c5cd5fa9822af9a71", "content_id": "8775de27c8f76d0b88bc90f3fe00bc29dfe25ba7", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 40, "license_type": "permissive", "max_line_length": 40, "num_lines": 1, "path": "/web-app/server/utils/config.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "exports.LLFI_BUILD_ROOT = \"$llfibuild/\";" }, { "alpha_fraction": 0.5785340070724487, "alphanum_fraction": 0.5785340070724487, "avg_line_length": 13.692307472229004, "blob_id": "10e595d3f169ec6e811cb6fea585022317d8768f", "content_id": "dee151ec73c089139eafb3603a4192a38ce61091", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 382, "license_type": "permissive", "max_line_length": 40, "num_lines": 26, "path": "/test_suite/PROGRAMS/mpi/Makefile", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "## target\nCLIENT=echoClient\nSERVER=echoServer\n\n## llvm root and clang\ninclude ../Makefile.common\n\nLINKED = $(CLIENT).bc\nLL_FILE = $(CLIENT).ll\nEXE_FILE = $(SERVER).exe\n## other choice\ndefault: all\n\nall: $(LL_FILE) $(EXE_FILE)\n\n%.exe: %.c\n\t$(LLVMGCC) $< -o $@\n\n%.ll: %.bc\n\t$(LLVMDIS) $< -o $@\n\n%.bc:%.c\n\t$(LLVMGCC) $(COMPILE_FLAGS) $< -c -o $@\n\nclean:\n\t$(RM) -f *.bc *.ll *.bc *.exe\n" }, { "alpha_fraction": 0.6985530257225037, "alphanum_fraction": 0.6996248364448547, "avg_line_length": 31.172412872314453, "blob_id": "813110c394dae4c59d59f6062f83ffa56a8f7e71", "content_id": "9aa32ea55e981f47ff2a2b6ed925eb277c55a320", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3732, "license_type": "permissive", "max_line_length": 78, "num_lines": 116, "path": "/llvm_passes/core/FICustomSelectorManager.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/Support/raw_ostream.h\"\n\n#include \"FIInstSelector.h\"\n#include \"FIRegSelector.h\"\n#include \"FICustomSelectorManager.h\"\n\nnamespace llfi {\n\n// fault injection instruction selector manager\nFICustomInstSelectorManager\n *FICustomInstSelectorManager::getCustomInstSelectorManager() {\n static FICustomInstSelectorManager instsel_manager;\n return &instsel_manager; \n}\n\nvoid FICustomInstSelectorManager::addCustomInstSelector(\n const std::string &name, FIInstSelector *instselector) {\n if (optionname_instselector.find(name) == optionname_instselector.end()) {\n optionname_instselector[name] = instselector;\n } else {\n errs() << \"ERROR: Duplicate custom fault injection instruction selector: \"\n << name << \"\\n\";\n exit(1);\n }\n}\n\nvoid FICustomInstSelectorManager::getAllSoftwareSelectors(\n std::set<std::string>& all_software_failure_names){\n for(std::map<const std::string, FIInstSelector* >::iterator it = \n optionname_instselector.begin(); it != optionname_instselector.end(); \n ++it) {\n if(it->second->getInstSelectorClass() == std::string(\"SoftwareFault\")){\n all_software_failure_names.insert(it->first);\n }\n }\n return;\n}\n\nvoid FICustomInstSelectorManager::getAllHardwareSelectors(\n std::set<std::string>& all_hardware_failure_names){\n for(std::map<const std::string, FIInstSelector* >::iterator it = \n optionname_instselector.begin(); it != optionname_instselector.end(); \n ++it) {\n if(it->second->getInstSelectorClass() == std::string(\"HardwareFault\")){\n all_hardware_failure_names.insert(it->first);\n }\n }\n return;\n}\n\nFIInstSelector *FICustomInstSelectorManager::getCustomInstSelector(\n const std::string &name) {\n if (optionname_instselector.find(name) != optionname_instselector.end()) {\n return optionname_instselector[name];\n } else {\n errs() << \"ERROR: Unknown custom fault injection instruction selector: \"\n << name << \"\\n\";\n exit(1);\n }\n}\n\n\n// fault injection register selector manager\nFICustomRegSelectorManager\n *FICustomRegSelectorManager::getCustomRegSelectorManager() {\n static FICustomRegSelectorManager regsel_manager;\n return &regsel_manager;\n}\n\nvoid FICustomRegSelectorManager::addCustomRegSelector(\n const std::string &name, FIRegSelector *regselector) {\n if (optionname_regselector.find(name) == optionname_regselector.end()) {\n optionname_regselector[name] = regselector;\n } else {\n errs() << \"ERROR: Duplicate custom fault injection register selector: \"\n << name << \"\\n\";\n exit(1);\n }\n}\n\nFIRegSelector *FICustomRegSelectorManager::getCustomRegSelector(\n const std::string &name) {\n if (optionname_regselector.find(name) != optionname_regselector.end()) {\n return optionname_regselector[name];\n } else {\n errs() << \"ERROR: Unknown custom fault injection register selector: \" <<\n name << \"\\n\";\n exit(1);\n }\n}\n\nvoid FICustomRegSelectorManager::getAllSoftwareSelectors(\n std::set<std::string>& all_software_failure_names){\n for(std::map<const std::string, FIRegSelector* >::iterator it = \n optionname_regselector.begin(); it != optionname_regselector.end(); \n ++it) {\n if(it->second->getRegSelectorClass() == std::string(\"SoftwareFault\")){\n all_software_failure_names.insert(it->first);\n }\n }\n return;\n}\n\nvoid FICustomRegSelectorManager::getAllHardwareSelectors(\n std::set<std::string>& all_hardware_failure_names){\n for(std::map<const std::string, FIRegSelector* >::iterator it = \n optionname_regselector.begin(); it != optionname_regselector.end(); \n ++it) {\n if(it->second->getRegSelectorClass() == std::string(\"HardwareFault\")){\n all_hardware_failure_names.insert(it->first);\n }\n }\n return;\n}\n\n}\n" }, { "alpha_fraction": 0.6266015768051147, "alphanum_fraction": 0.6269066333770752, "avg_line_length": 39.48147964477539, "blob_id": "e76edf1d3e9b585a1f26a469f157e74c330f3226", "content_id": "1782cfa1cf1a306fe5334d71f7c74d6f5f2b8868", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3278, "license_type": "permissive", "max_line_length": 127, "num_lines": 81, "path": "/llvm_passes/HardwareFailureAutoScanPass.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#define DEBUG_TYPE \"HardwareFailureAutoScanPass\"\n\n#include \"FICustomSelectorManager.h\"\n#include \"Utils.h\"\n#include \"FIInstSelectorManager.h\"\n#include \"FIInstSelector.h\"\n#include \"InstTypeFIInstSelector.h\"\n#include \"FuncNameFIInstSelector.h\"\n#include \"FIRegSelector.h\"\n#include \"RegLocBasedFIRegSelector.h\"\n\n#include \"llvm/Pass.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Support/CommandLine.h\"\n\n#include <fstream>\n#include <iostream>\n\nusing namespace llvm;\nnamespace llfi{\n static cl::opt< std::string > outputpath(\"hardwarescan_outputfilename\",\n cl::desc(\"The path to store a list of applicable software failures\"),\n cl::init(\"llfi.applicable.hardware.selectors.txt\"));\n\n class HardwareFailureAutoScanPass: public ModulePass{\n private:\n std::ofstream selector_record_file;\n public:\n static char ID;\n HardwareFailureAutoScanPass():ModulePass(ID){}\n virtual bool runOnModule(Module &M){\n selector_record_file.open(std::string(outputpath).c_str(), std::ofstream::out);\n\n FICustomInstSelectorManager *im = FICustomInstSelectorManager::getCustomInstSelectorManager();\n FICustomRegSelectorManager *rm = FICustomRegSelectorManager::getCustomRegSelectorManager();\n std::set<std::string> all_hardware_inst_selector_names;\n im->getAllHardwareSelectors(all_hardware_inst_selector_names);\n all_hardware_inst_selector_names.insert(std::string(\"insttype\"));\n all_hardware_inst_selector_names.insert(std::string(\"funcname\"));\n\n std::set<std::string> all_hardware_reg_selector_names;\n rm->getAllHardwareSelectors(all_hardware_reg_selector_names);\n all_hardware_reg_selector_names.insert(std::string(\"regloc\"));\n // errs()<<\"get all soft failures\\n\";\n\n recordString(std::string(\"instSelMethod:\"));\n for(std::set<std::string>::iterator name = all_hardware_inst_selector_names.begin();\n name != all_hardware_inst_selector_names.end(); name++){\n {\n recordString(std::string(\" - \") + *name);\n }\n }\n\n recordString(std::string(\"regSelMethod:\"));\n for(std::set<std::string>::iterator name = all_hardware_reg_selector_names.begin();\n name != all_hardware_reg_selector_names.end(); name++){\n {\n recordString(std::string(\" - \") + *name);\n }\n }\n selector_record_file.close();\n }\n\n void recordString(std::string str){\n if(selector_record_file.is_open() == false){\n std::cerr<<\"ERROR: can not open file to record applicable selectors: \";\n std::cerr<<outputpath<<\"\\n\";\n selector_record_file.close();\n return;\n }\n selector_record_file<<str<<\"\\n\";\n return;\n }\n };\n char HardwareFailureAutoScanPass::ID = 0;\n static RegisterPass<HardwareFailureAutoScanPass> \n X(\"HardwareFailureAutoScanPass\", \"Automatic scanner of hardware failure modes (instruction selectors, reg selectors)\", \n false, false);\n}" }, { "alpha_fraction": 0.6471647024154663, "alphanum_fraction": 0.6480647921562195, "avg_line_length": 35.426231384277344, "blob_id": "ede793e194997d053f14ca2b7969dc5ef576bf5a", "content_id": "92db4b5a9e051ba9944441186bc2296514fd7a24", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2222, "license_type": "permissive", "max_line_length": 93, "num_lines": 61, "path": "/runtime_lib/FaultInjectorManager.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cstdlib>\n#include \"FaultInjector.h\"\n#include \"FaultInjectorManager.h\"\n\nFaultInjectorManager *FaultInjectorManager::getFaultInjectorManager() {\n static FaultInjectorManager fi_manager;\n return &fi_manager;\n}\n\nvoid FaultInjectorManager::addFaultInjector(const std::string &name,\n FaultInjector *fi) {\n //debug((\"enter add fault injector\\n\"));\n if (type_injector.find(name) == type_injector.end()) {\n type_injector.insert(\n std::pair<const std::string, FaultInjector*>(name, fi));\n } else {\n std::cerr << \"ERROR: Duplicated fault injector: \" << name << std::endl;\n exit(1);\n }\n}\n\nFaultInjector *FaultInjectorManager::getFaultInjector(const std::string &name) {\n if (type_injector.find(name) != type_injector.end()) {\n return type_injector[name];\n } else {\n std::cerr << \"ERROR: unknown fault injector: \" << name << std::endl;\n exit(1);\n }\n}\n\nstd::vector<std::string> FaultInjectorManager::getAllInjectorNames(){\n std::vector<std::string> names;\n for(std::map<const std::string, FaultInjector* >::iterator MI = type_injector.begin();\n MI != type_injector.end(); MI++){\n names.push_back(MI->first);\n }\n return names;\n}\n\nstd::vector<std::string> FaultInjectorManager::getInjectorNamesForType(std::string type_str){\n std::vector<std::string> names; \n // std::cout << \"start of getInjectorNamesForType()\\n\";\n for(std::map<const std::string, FaultInjector* >::iterator MI = type_injector.begin();\n MI != type_injector.end(); MI++){\n // std::cout << \"checking:\" << MI->first << \"pointer addr: \" << (MI->second) << \"\\n\"; \n // std::cout << \" type: \" << MI->second->getFaultInjectorType() << \"\\n\";\n if(type_str == MI->second->getFaultInjectorType())\n names.push_back(MI->first);\n }\n // std::cout << \"end of getInjectorNamesForType()\\n\";\n return names;\n}\n\n\nextern \"C\" void injectFaultImpl(const char *fi_type, long llfi_index, \n unsigned size, unsigned fi_bit, char *buf) {\n FaultInjectorManager *m = FaultInjectorManager::getFaultInjectorManager();\n FaultInjector *fi = m->getFaultInjector(fi_type);\n fi->injectFault(llfi_index, size, fi_bit, buf);\n}\n" }, { "alpha_fraction": 0.6816731691360474, "alphanum_fraction": 0.7181850671768188, "avg_line_length": 29.65217399597168, "blob_id": "42be7c3af7de8b871aa54d4682673eaa29b266dc", "content_id": "c5a19b3ec54987575c30340da8f0685d25080a96", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2821, "license_type": "permissive", "max_line_length": 135, "num_lines": 92, "path": "/WATERS_auto_conf.sh", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n####################### Readme #######################\n# This script is for initializing a WATERS server.\n# Basic development tools and CMake 2.8 will be inst-\n# alled. LLVM 2.9 and a custom version of LLFI(for LL-\n# VM 2.9) will be downloaded, build and installed.\n# The default user account for this script is 'root'.\n# So that it does not involve typing any password dur-\n# ing the process\n#\n# Author: Qining\n######################################################\n\nexport MAINDIR=/home\n\n## Install basic pacakges\nyum update -y\nyum install -y nautilus-open-terminal\nyum install -y xauth\nyum install -y dbus-x11\nyum groupinstall -y 'Fonts'\nyum install -y gedit\nyum groupinstall -y 'Development Tools'\nyum install -y wget\nyum install -y git\n\n## Install CMake 2.8\ncd $MAINDIR\nmkdir cmake\nwget http://www.cmake.org/files/v2.8/cmake-2.8.12.2-Linux-i386.sh\nsh ./cmake-2.8.12.2-Linux-i386.sh --prefix=$MAINDIR/cmake --exclude-subdir\necho \"export PATH=/home/cmake/bin:\\$PATH\">>/root/.bashrc\nsource /root/.bashrc\ncmake --version\n\nmkdir $MAINDIR/Downloads\n\n## Compile, build and install llvm-3.4 and clang\ncd $MAINDIR/Downloads\nwget http://llvm.org/releases/3.4/llvm-3.4.src.tar.gz\nwget http://llvm.org/releases/3.4/clang-3.4.src.tar.gz\ntar -xvzf llvm-3.4.src.tar.gz\ntar -xvzf clang-3.4.src.tar.gz\nmv llvm-3.4 $MAINDIR/llvmsrc\nmv clang-3.4 /home/llvmsrc/tools\nmkdir $MAINDIR/llvm\ncd $MAINDIR/llvm\ncmake ../llvmsrc -DLLVM_REQUIRES_RTTI=1\nmake -j24\n#make install\n#echo \"export PATH=/home/llvm/bin:\\$PATH\">>/root/.bashrc\n\n## Install python3\ncd $MAINDIR/Downloads\nwget http://www.python.org/ftp/python/3.3.2/Python-3.3.2.tar.bz2\ntar jxvf Python-3.3.2.tar.bz2\ncd Python-3.3.2\n./configure\n./make -j24\n./make install\n\n# install pyyaml\ncd $MAINDIR/Downloads\nwget http://pyyaml.org/download/pyyaml/PyYAML-3.11.tar.gz\ntar -xvzf PyYAML-3.11.tar.gz\nmv PyYAML-3.11 $MAINDIR/pyyamlsrc\ncd $MAINDIR/pyyamlsrc\npython3 setup.py install --prefix=$MAINDIR/pyyaml\necho \"export PYTHONPATH=\\$PYTHONPATH:/home/pyyaml/lib/python3.3/site-packages\">>/root/.bashrc\nsource /root/.bashrc\n\n## install git 1.8\ncd $MAINDIR/Downloads\nwget http://git-core.googlecode.com/files/git-1.8.3.4.tar.gz\nwget -O git-manpages-1.8.3.4.tar.gz http://code.google.com/p/git-core/downloads/detail?name=git-manpages-1.8.3.4.tar.gz&can-2&q=\nyum install -y zlib-devel perl-CPAN gettext curl-devel\ntar xvfz git-1.8.3.4.tar.gz\ncd git-1.8.3.4\n./configure\nmake -j24\nmake install\ngit --version\n\n## Install LLFI\ncd $MAINDIR/Downloads\ngit clone -b merge https://github.com/DependableSystemsLab/LLFI.git llfisrc\nmv llfisrc $MAINDIR/llfisrc\ncd $MAINDIR/llfisrc\n./setup -LLVM_DST_ROOT $MAINDIR/llvm -LLVM_SRC_ROOT $MAINDIR/llvmsrc -LLFI_BUILD_ROOT $MAINDIR/llfi -LLVM_GXX_BIN_DIR $MAINDIR/llvm/bin\n#echo \"export PATH=\\$PATH:/home/llfi/bin\">>/root/.bashrc\n#source /root/.bashrc\n\n" }, { "alpha_fraction": 0.5525568127632141, "alphanum_fraction": 0.5568181872367859, "avg_line_length": 22.383333206176758, "blob_id": "f6d3897172bc334bb999b2232344357630543945", "content_id": "f69bbd748e6407a45f400400d46fe1a880f020b3", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1408, "license_type": "permissive", "max_line_length": 80, "num_lines": 60, "path": "/bin/llfi-gui.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n\"\"\"\n\n%(prog)s starts LLFI GUI application.\n\nUsage: %(prog)s [OPTIONS] <source IR file>\n\nList of options:\n\n--help(-h): Show help information\n\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\n\nprog = os.path.basename(sys.argv[0])\nscript_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append((os.path.join(script_path, '../config')))\nimport java_paths\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = 1\n msg = \"ERROR: \" + msg\n print(msg, file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(retval)\n\ndef parseArgs(args):\n global options\n argid = 0\n while argid < len(args):\n arg = args[argid]\n if arg.startswith(\"-\"):\n if arg == \"--help\" or arg == \"-h\":\n usage()\n\ndef startGUI():\n\tlib_path = os.path.join(script_path, os.pardir, 'gui/application/lib/*')\n\tclass_path = os.path.join(script_path, os.pardir, 'gui')\n\texeclist = [java_paths.JAVA_EXECUTABLE, '-classpath', \n\t\t\t\tjava_paths.CMAKE_JAVA_INCLUDE_PATH+':'+lib_path+':'+class_path, \n\t\t\t\t'application.Main']\n\tprint(' '.join(execlist))\n\tp = subprocess.Popen(execlist)\n\treturn\n\n################################################################################\ndef main(args):\n parseArgs(args)\n startGUI()\n\n################################################################################\n\nif __name__==\"__main__\":\n main(sys.argv[1:])\n \n" }, { "alpha_fraction": 0.6317300796508789, "alphanum_fraction": 0.6317300796508789, "avg_line_length": 37.70833206176758, "blob_id": "d78fbaa6acc4c39a31b1fdd41b9b9c2f2e63c84a", "content_id": "bec998f884467c0a6483242e79b93d2cfcfd4277", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2786, "license_type": "permissive", "max_line_length": 73, "num_lines": 72, "path": "/web-app/views/src/js/config/config.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var softwareInjectionTypeOptions = [\n\t{value: \"CPUHog(Res)\", text: \"CPUHog(Res)\"},\n\t{value: \"DataCorruption(Data)\", text: \"DataCorruption(Data)\"},\n\t{value: \"HighFrequentEvent(Timing)\", text: \"HighFrequentEvent(Timing)\"},\n\t{value: \"IncorrectOutput(API)\", text: \"IncorrectOutput(API)\"},\n\t{value: \"NoOutput(API)\", text: \"NoOutput(API)\"}\n];\n\n// Hardware injection types from gui/gui_config.yaml\nvar hardwareInjectionTypeOptions = [\n\t{value: \"ret\", text: \"ret-(ReturnInst)\"},\n\t{value: \"br\", text: \"br-(BranchInst)\"},\n\t{value: \"switch\", text: \"switch-(SwitchInst)\"},\n\t{value: \"indirectbr\", text: \"indirectbr-(IndirectBrInst)\"},\n\t{value: \"invoke\", text: \"invoke-(InvokeInst)\"},\n\t{value: \"resume\", text: \"resume\"},\n\t{value: \"unreachable\", text: \"unreachable-(UnreachableInst)\"},\n\t{value: \"add\", text: \"add-(BinaryADD)\"},\n\t{value: \"fadd\", text: \"fadd\"},\n\t{value: \"sub\", text: \"sub-(BinarySUB)\"},\n\t{value: \"fsub\", text: \"fsub\"},\n\t{value: \"mul\", text: \"mul-(BinaryMUL)\"},\n\t{value: \"fmul\", text: \"fmul\"},\n\t{value: \"udiv\", text: \"udiv\"},\n\t{value: \"sdiv\", text: \"sdiv\"},\n\t{value: \"fdiv\", text: \"fdiv\"},\n\t{value: \"urem\", text: \"urem\"},\n\t{value: \"srem\", text: \"srem\"},\n\t{value: \"frem\", text: \"frem\"},\n\t{value: \"shl\", text: \"shl\"},\n\t{value: \"lshr\", text: \"lshr\"},\n\t{value: \"ashr\", text: \"ashr\"},\n\t{value: \"and\", text: \"and-(BinaryOR)\"},\n\t{value: \"or\", text: \"or-(BinaryAND)\"},\n\t{value: \"xor\", text: \"xor-(BinaryXOR)\"},\n\t{value: \"extractelement\", text: \"extractelement\"},\n\t{value: \"insertelement\", text: \"insertelement\"},\n\t{value: \"shufflevector\", text: \"shufflevector\"},\n\t{value: \"extractvalue\", text: \"extractvalue\"},\n\t{value: \"insertvalue\", text: \"insertvalue\"},\n\t{value: \"alloca\", text: \"alloca\"},\n\t{value: \"load\", text: \"load-(LoadInst)\"},\n\t{value: \"store\", text: \"store-(StoreInst)\"},\n\t{value: \"fence\", text: \"fence\"},\n\t{value: \"cmpxchg\", text: \"cmpxchg\"},\n\t{value: \"atomicrmw\", text: \"atomicrmw\"},\n\t{value: \"getelementptr\", text: \"getelementptr\"},\n\t{value: \"trunc\", text: \"trunc\"},\n\t{value: \"zext\", text: \"zext\"},\n\t{value: \"fptrunc\", text: \"fptrunc\"},\n\t{value: \"fpext\", text: \"fpext\"},\n\t{value: \"fptoui\", text: \"fptoui\"},\n\t{value: \"fptosi\", text: \"fptosi\"},\n\t{value: \"uitofp\", text: \"uitofp\"},\n\t{value: \"sitofp\", text: \"sitofp\"},\n\t{value: \"ptrtoint\", text: \"ptrtoint\"},\n\t{value: \"inttoptr\", text: \"inttoptr\"},\n\t{value: \"bitcast\", text: \"bitcast\"},\n\t{value: \"addrspacecast\", text: \"addrspacecast\"},\n\t{value: \"icmp\", text: \"icmp\"},\n\t{value: \"fcmp\", text: \"fcmp\"},\n\t{value: \"phi\", text: \"phi\"},\n\t{value: \"select\", text: \"select\"},\n\t{value: \"call\", text: \"call\"},\n\t{value: \"va_arg\", text: \"va_arg\"},\n\t{value: \"landingpad\", text: \"landingpad\"}\n];\n\nexport const injectionType = {\n\tsoftwareInjectionTypeOptions: softwareInjectionTypeOptions,\n\thardwareInjectionTypeOptions: hardwareInjectionTypeOptions\n};" }, { "alpha_fraction": 0.6599511504173279, "alphanum_fraction": 0.6660561561584473, "avg_line_length": 31.13725471496582, "blob_id": "9b89192eec9c8762a0758e448f5392cf1e7563a3", "content_id": "444b1663731b56fe306f2c413d114f9112ddf9ad", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1638, "license_type": "permissive", "max_line_length": 116, "num_lines": 51, "path": "/test_suite/SCRIPTS/deploy_prog.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport yaml\nimport subprocess\n\ndef deploy_prog(*prog_list):\n\tr = 0\n\tcopied = 0\n\tsuite = {}\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\ttestsuite_dir = os.path.join(script_dir, os.pardir)\n\twith open(os.path.join(testsuite_dir, \"test_suite.yaml\")) as f:\n\t\ttry:\n\t\t\tsuite = yaml.load(f)\n\t\texcept:\n\t\t\tprint(\"ERROR: Unable to load yaml file: test_suite.yaml\", file=sys.stderr)\n\t\t\treturn -1\n\n\twork_dict = {}\n\tfor test in suite[\"SoftwareFaults\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"SoftwareFaults\" in prog_list:\n\t\t\twork_dict[\"./SoftwareFaults/\"+test] = suite[\"SoftwareFaults\"][test]\n\tfor test in suite[\"HardwareFaults\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"HardwareFaults\" in prog_list:\n\t\t\twork_dict[\"./HardwareFaults/\"+test] = suite[\"HardwareFaults\"][test]\n\tfor test in suite[\"BatchMode\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"BatchMode\" in prog_list:\n\t\t\twork_dict[\"./BatchMode/\"+test] = suite[\"BatchMode\"][test]\n\t\n\tfor test_path in work_dict:\n\t\tsrc_dir = os.path.join(testsuite_dir, \"PROGRAMS\", work_dict[test_path])\n\t\treq_files = [f for f in suite[\"PROGRAMS\"][work_dict[test_path]]]\n\t\tdst_dir = os.path.join(testsuite_dir, test_path)\n\t\tfor f in req_files:\n\t\t\tsrc_path = os.path.join(src_dir, f)\n\t\t\ttry:\n\t\t\t\tshutil.copy(src_path, dst_dir)\n\t\t\texcept:\n\t\t\t\tprint (\"ERROR: Failed in copying program files:\", work_dict[test_path], \"for test:\", test_path, file=sys.stderr)\n\t\t\t\tr += 1\n\t\t\telse:\n\t\t\t\tcopied += 1\n\tprint (\"MSG:\", copied, \"files copied\\n\")\n\treturn r\n\nif __name__ == \"__main__\":\n\tr = deploy_prog(*sys.argv[1:])\n\tsys.exit(r)" }, { "alpha_fraction": 0.7059585452079773, "alphanum_fraction": 0.7059585452079773, "avg_line_length": 21.05714225769043, "blob_id": "4c4b08f29a8efe07da837076e9fdb835de2b7e7d", "content_id": "33358784f2369081a526db8c35a5597ef92b5c1b", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 772, "license_type": "permissive", "max_line_length": 76, "num_lines": 35, "path": "/llvm_passes/hardware_failures/InstTypeFIInstSelector.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef INST_TYPE_FI_INST_SELECTOR_H\n#define INST_TYPE_FI_INST_SELECTOR_H\n#include <set>\n\n#include \"FIInstSelector.h\"\n\nusing namespace llvm;\nnamespace llfi {\n\nclass InstTypeFIInstSelector: public HardwareFIInstSelector {\n public:\n InstTypeFIInstSelector(std::set<unsigned> *opcodelist) {\n this->opcodelist = opcodelist;\n }\n ~InstTypeFIInstSelector() {\n delete opcodelist;\n }\n virtual void getCompileTimeInfo(std::map<std::string, std::string>& info){\n info[\"failure_class\"] = \"HardwareFault\";\n info[\"failure_mode\"] = \"SpecifiedInstructionTypes\";\n info[\"targets\"] = \"<include list in yaml>\";\n info[\"injector\"] = \"<fi_type>\";\n }\n\n private:\n virtual bool isInstFITarget(Instruction* inst);\n private:\n std::set<unsigned> *opcodelist;\n};\n\n}\n\n\n\n#endif\n" }, { "alpha_fraction": 0.5961138010025024, "alphanum_fraction": 0.5961138010025024, "avg_line_length": 30.326086044311523, "blob_id": "a2f94ce022401e7c46cc42368e195099418cd5f0", "content_id": "8b0b1f8ce0370fcce2e3df2f3403ffbc3c6041ed", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2882, "license_type": "permissive", "max_line_length": 83, "num_lines": 92, "path": "/llvm_passes/core/FIInstSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Support/InstIterator.h\"\n\n#include \"FIInstSelector.h\"\n\nnamespace llfi {\nvoid FIInstSelector::getFIInsts(Module &M, std::set<Instruction*> *fiinsts) {\n getInitFIInsts(M, fiinsts);\n\n std::set<Instruction* > bs;\n std::set<Instruction* > fs;\n // must do both of the computation on the fiinsts, and update\n // fiinsts finally\n if (includebackwardtrace)\n getBackwardTraceofInsts(fiinsts, &bs);\n if (includeforwardtrace)\n getForwardTraceofInsts(fiinsts, &fs);\n\n fiinsts->insert(bs.begin(), bs.end());\n fiinsts->insert(fs.begin(), fs.end());\n}\n\nvoid FIInstSelector::getInitFIInsts(Module &M, \n std::set<Instruction*> *fiinsts) {\n for (Module::iterator m_it = M.begin(); m_it != M.end(); ++m_it) {\n if (!m_it->isDeclaration()) {\n //m_it is a function \n for (inst_iterator f_it = inst_begin(m_it); f_it != inst_end(m_it);\n ++f_it) {\n Instruction *inst = &(*f_it);\n if (isInstFITarget(inst)) {\n fiinsts->insert(inst);\n }\n }\n } \n }\n}\n\nvoid FIInstSelector::getBackwardTraceofInsts(\n const std::set<Instruction* > *fiinsts, std::set<Instruction* > *bs) {\n for (std::set<Instruction* >::const_iterator inst_it = fiinsts->begin();\n inst_it != fiinsts->end(); ++inst_it) {\n Instruction *inst = *inst_it;\n getBackwardTraceofInst(inst, bs);\n }\n}\n\nvoid FIInstSelector::getForwardTraceofInsts(\n const std::set<Instruction* > *fiinsts, std::set<Instruction* > *fs) {\n for (std::set<Instruction* >::const_iterator inst_it = fiinsts->begin();\n inst_it != fiinsts->end(); ++inst_it) {\n Instruction *inst = *inst_it;\n getForwardTraceofInst(inst, fs);\n }\n}\n\nvoid FIInstSelector::getBackwardTraceofInst(Instruction *inst,\n std::set<Instruction*> *bs) {\n for (User::op_iterator op_it = inst->op_begin(); \n op_it != inst->op_end(); ++op_it) {\n Value *src = *op_it;\n if (Instruction *src_inst = dyn_cast<Instruction>(src)) {\n if (bs->find(src_inst) == bs->end()) {\n bs->insert(src_inst);\n getBackwardTraceofInst(src_inst, bs);\n }\n }\n }\n}\n\nvoid FIInstSelector::getForwardTraceofInst(Instruction *inst,\n std::set<Instruction*> *fs) {\n for (Value::use_iterator use_it = inst->use_begin();\n use_it != inst->use_end(); ++use_it) {\n User *use = *use_it;\n if (Instruction *use_inst = dyn_cast<Instruction>(use)) {\n if (fs->find(use_inst) == fs->end()) {\n fs->insert(use_inst);\n getForwardTraceofInst(use_inst, fs);\n }\n }\n }\n}\n\nvoid FIInstSelector::getCompileTimeInfo(std::map<std::string, std::string>& info) {\n info[\"failure_class\"] = \"Unknown\";\n info[\"failure_mode\"] = \"Unknown\";\n info[\"targets\"] = \"Unknown\";\n info[\"injector\"] = \"Unknown\";\n}\n\n}\n" }, { "alpha_fraction": 0.8213456869125366, "alphanum_fraction": 0.8213456869125366, "avg_line_length": 431, "blob_id": "1816713b57415914cd2ff9b087fd9bd7e1670cab", "content_id": "ad75605e4c93cd892bbb89a2b07234f7b5adbe06", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 431, "license_type": "permissive", "max_line_length": 431, "num_lines": 1, "path": "/test_suite/HardwareFaults/multiple-bit-flips-in-multiple-words/ReadMe.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "Using this yaml file, you can systematically inject multiple bit-flips in multiple words. The maximum number of bit-flips in one run of the program is controlled by fi_max_multiple. NOTE that a program may crash before being able to inject all bit-flips specified by fi_max_multiple. The distance between each injection is controlled by another two parameter, namely window_len_multiple_startindex and window_len_multiple_endindex." }, { "alpha_fraction": 0.6528028845787048, "alphanum_fraction": 0.6548372507095337, "avg_line_length": 31.41025733947754, "blob_id": "19aaa621daa26aac5da1e30df8bf46d97955e294", "content_id": "d66183151a93ed7aca43edd8f057594d8a133ac0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8848, "license_type": "permissive", "max_line_length": 82, "num_lines": 273, "path": "/llvm_passes/core/Controller.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Module.h\"\n#include \"llvm/Support/Debug.h\"\n#include \"llvm/Support/CommandLine.h\"\n#include \"llvm/Support/raw_ostream.h\"\n\n#include \"Controller.h\"\n#include \"FICustomSelectorManager.h\"\n#include \"Utils.h\"\n#include \"FIInstSelectorManager.h\"\n#include \"FIInstSelector.h\"\n#include \"InstTypeFIInstSelector.h\"\n#include \"FuncNameFIInstSelector.h\"\n#include \"FIRegSelector.h\"\n#include \"RegLocBasedFIRegSelector.h\"\n\nusing namespace llvm;\n\nnamespace llfi {\n/**\n * Inject Instruction\n */\nstatic cl::list< FIInstSelMethod > fiinstselmethod(\n cl::desc(\"Choose how to specify the fault injection target instructions\"),\n cl::values(\n clEnumVal(insttype, \"Specify through instruction type/opcode\"),\n clEnumVal(funcname, \"Specify through function name\"),\n clEnumVal(sourcecode, \"Specify through source code\"),\n clEnumVal(custominstselector, \n \"Specify through custom instruction selector\"),\n clEnumValEnd),\n cl::ZeroOrMore);\n\n// inst type\nstatic cl::list< std::string > includeinst(\"includeinst\", \n cl::desc(\"The type of instruction to be included for fault injection\"), \n cl::ZeroOrMore);\nstatic cl::list< std::string > excludeinst(\"excludeinst\", \n cl::desc(\"The type of instruction to be excluded for fault injection\"), \n cl::ZeroOrMore);\n\n// func name\nstatic cl::list< std::string > includefunc(\"includefunc\", \n cl::desc(\"The function name to be included for fault injection\"), \n cl::ZeroOrMore);\nstatic cl::list< std::string > excludefunc(\"excludefunc\", \n cl::desc(\"The function name to be excluded for fault injection\"), \n cl::ZeroOrMore);\n\n// custom instruction selector name\nstatic cl::opt < std::string > fiinstselectorname(\"fiinstselectorname\",\n cl::desc(\"Custom fault injection instruction selector name\"));\n\n// backtrace or forwardtrace included\nstatic cl::opt< bool > includebackwardtrace(\"includebackwardtrace\", \n cl::init(false),\n cl::desc(\n \"Include backward trace of the selected instructions for fault injection\"));\nstatic cl::opt< bool > includeforwardtrace(\"includeforwardtrace\",\n cl::init(false),\n cl::desc(\n \"Include forward trace of the selected instructions for fault injection\"));\n\n/**\n * Inject Register\n */\nstatic cl::opt< FIRegSelMethod > firegselmethod(\n cl::desc(\"Choose how to specify the fault injection target registers\"),\n cl::init(regloc),\n cl::values(\n clEnumVal(regloc, \n \"Specify through register location, e.g. dstreg, srcreg1.\"),\n clEnumVal(customregselector, \"Specify through custom register selector\"),\n clEnumValEnd));\n\nstatic cl::opt< FIRegLoc > fireglocation(\n cl::desc(\"Choose fault injection register location:\"),\n cl::init(dstreg),\n cl::values(\n clEnumVal(dstreg, \"Inject into destination register\"),\n clEnumVal(allsrcreg, \"Inject randomly into one of all source registers\"),\n clEnumVal(srcreg1, \"Inject into 1st source register\"),\n clEnumVal(srcreg2, \"Inject into 2nd source register\"),\n clEnumVal(srcreg3, \"Inject into 3rd source register\"),\n clEnumVal(srcreg4, \"Inject into 4th source register\"),\n clEnumValEnd));\n\nstatic cl::opt < std::string > firegselectorname(\"firegselectorname\",\n cl::desc(\"Custom fault injection register selector name\"));\n\n/**\n * Log file\n */\ncl::opt < std::string > llfilogfile(\"llfilogfile\",\n cl::init(\"llfi.log.compilation.txt\"),\n cl::Hidden,\n cl::desc(\"Name of compilation passes logging file\"));\n\n\nController *Controller::ctrl = NULL;\n\nvoid Controller::getOpcodeListofFIInsts(std::set<unsigned> *fi_opcode_set) {\n NameOpcodeMap fullnameopcodemap;\n genFullNameOpcodeMap(fullnameopcodemap);\n\n // include\n for (unsigned i = 0; i != includeinst.size(); ++i) {\n // TODO: make \"all\" a static string\n if (includeinst[i] == \"all\") {\n for (NameOpcodeMap::const_iterator it = fullnameopcodemap.begin();\n it != fullnameopcodemap.end(); ++it) {\n fi_opcode_set->insert(it->second); \n }\n break;\n } else {\n NameOpcodeMap::iterator loc = fullnameopcodemap.find(includeinst[i]);\n if (loc != fullnameopcodemap.end()) {\n fi_opcode_set->insert(loc->second);\n } else {\n errs() << \"ERROR: Invalid include instruction type: \" << includeinst[i]\n << \"\\n\";\n exit(1);\n }\n }\n }\n\n // exclude\n for (unsigned i = 0; i != excludeinst.size(); ++i) {\n NameOpcodeMap::iterator loc = fullnameopcodemap.find(excludeinst[i]);\n if (loc != fullnameopcodemap.end()) {\n fi_opcode_set->erase(loc->second);\n } else {\n errs() << \"ERROR: Invalid exclude instruction type: \" << excludeinst[i]\n << \"\\n\";\n exit(1);\n }\n }\n}\nvoid Controller::getFuncList(std::set<std::string> *fi_func_set) {\n std::set<std::string>::iterator it;\n std::string func;\n for (size_t i = 0; i < includefunc.size(); ++i) {\n if(includefunc[i] == \"all\") {\n for(it = func_set.begin(); it != func_set.end(); ++it) {\n func = demangleFuncName(*it);\n fi_func_set->insert(func);\n }\n } else {\n func = demangleFuncName(includefunc[i]);\n fi_func_set->insert(func);\n }\n }\n\n // exclude list\n for(size_t i = 0; i < excludefunc.size(); ++i) {\n it = fi_func_set->find(excludefunc[i]);\n if(it != fi_func_set->end()) {\n fi_func_set->erase(it);\n } else {\n errs() << \"ERROR: Invalid exclude function name: \" << excludefunc[i]\n << \"\\n\";\n exit(1);\n }\n }\n}\n\nvoid Controller::processInstSelArgs() {\n fiinstselector = new FIInstSelectorManager();\n std::set<unsigned> *fi_opcode_set;\n std::set<std::string> *fi_func_set;\n FICustomInstSelectorManager *m;\n for(size_t i = 0; i < fiinstselmethod.size(); ++i) {\n switch(fiinstselmethod[i]) {\n case insttype:\n fi_opcode_set = new std::set<unsigned>;\n getOpcodeListofFIInsts(fi_opcode_set);\n fiinstselector->addSelector(new InstTypeFIInstSelector(fi_opcode_set));\n break;\n case funcname:\n fi_func_set = new std::set<std::string>;\n getFuncList(fi_func_set);\n fiinstselector->addSelector(new FuncNameFIInstSelector(fi_func_set));\n break;\n case custominstselector:\n m = FICustomInstSelectorManager::getCustomInstSelectorManager();\n fiinstselector->addSelector(m->getCustomInstSelector(fiinstselectorname));\n break;\n default:\n // TODO: handle the source code case\n errs() << \"ERROR: option not implemented yet\\n\";\n exit(4);\n }\n }\n fiinstselector->setIncludeBackwardTrace(includebackwardtrace);\n fiinstselector->setIncludeForwardTrace(includeforwardtrace);\n}\n\nvoid Controller::processRegSelArgs() {\n firegselector = NULL;\n if (firegselmethod == regloc) {\n firegselector = new RegLocBasedFIRegSelector(fireglocation);\n } else {\n FICustomRegSelectorManager *m = \n FICustomRegSelectorManager::getCustomRegSelectorManager();\n firegselector = m->getCustomRegSelector(firegselectorname);\n }\n}\n\nvoid Controller::processCmdArgs() {\n // clear the log file\n std::string err;\n raw_fd_ostream logFile(llfilogfile.c_str(), err, sys::fs::F_Append);\n if (err == \"\") {\n logFile << \"\\n\\nStart of a pass\\n\";\n } else {\n errs() << \"Unable to output logging information to file \" << llfilogfile\n << \"\\n\";\n }\n logFile.close();\n\n processInstSelArgs();\n processRegSelArgs();\n}\n\n// Create a list of functions present in M. Certain care must be taken when\n// compiling C++ due to name mangling.\nvoid Controller::getModuleFuncs(Module &M) {\n Module::iterator it;\n for(it = M.begin(); it != M.end(); ++it) {\n std::string func_name = it->getName().str();\n std::string final_name = demangleFuncName(func_name);\n\n func_set.insert(final_name);\n }\n}\n\nvoid Controller::init(Module &M) {\n // generate list of functions present in M\n getModuleFuncs(M);\n\n processCmdArgs();\n \n // select fault injection instructions\n std::set<Instruction*> fiinstset;\n fiinstselector->getFIInsts(M, &fiinstset);\n \n // select fault injection registers\n firegselector->getFIInstRegMap(&fiinstset, &fi_inst_regs_map);\n}\n\nController::~Controller() {\n delete ctrl;\n ctrl = NULL;\n}\n\nvoid Controller::dump() const {\n for (std::map<Instruction*, std::list< int > *>::const_iterator inst_it =\n fi_inst_regs_map.begin(); inst_it != fi_inst_regs_map.end(); ++inst_it) {\n errs() << \"Selected instruction \" << *(inst_it->first) << \"\\nRegs:\\n\";\n for (std::list<int>::const_iterator reg_it = inst_it->second->begin();\n reg_it != inst_it->second->end(); ++reg_it) {\n if(*reg_it == DST_REG_POS) errs() << \"\\t\" << *(inst_it->first) << \"\\n\";\n else errs() << \"\\t\" << inst_it->first->getOperand(*reg_it) << \"\\n\";\n }\n errs() << \"\\n\";\n }\n}\n\nController *Controller::getInstance(Module &M) {\n if (ctrl == NULL)\n ctrl = new Controller(M);\n return ctrl;\n}\n}\n" }, { "alpha_fraction": 0.6252285242080688, "alphanum_fraction": 0.650822639465332, "avg_line_length": 20.8799991607666, "blob_id": "0c24a8873b7c9bd2b1ec3135e1ac396dd3a815d2", "content_id": "17d5461f9ca8f2d6eb0c74222e9a38a8c4d6a96c", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 547, "license_type": "permissive", "max_line_length": 98, "num_lines": 25, "path": "/runtime_lib/Utils.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <assert.h>\n\n#include \"Utils.h\"\n\nint start_tracing_flag = TRACING_GOLDEN_RUN; //for instTraceLib: initialized to Golden Run setting\n\nvoid getOpcodeExecCycleArray(const unsigned len, int *arr) {\n int i = 0;\n for (i = 0; i < len; ++i)\n arr[i] = -1;\n\n#define HANDLE_INST(N, OPC, CLASS, CYCLE) \\\n assert (N < len && \"opcode execution cycle array too small\");\\\n arr[N] = CYCLE;\n#include \"Instruction.def\"\n}\n\n\nbool isLittleEndian() {\n char *ptr;\n int data = 0x00000001;\n ptr = (char*)&data; \n return *ptr == 0x1;\n}\n" }, { "alpha_fraction": 0.7385621070861816, "alphanum_fraction": 0.7385621070861816, "avg_line_length": 21, "blob_id": "3262701cb3a80950b3ab87fdde50bb64b716f4da", "content_id": "7aa4af683ef5a8d1f4f422a32bbacb1c04e96b53", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 153, "license_type": "permissive", "max_line_length": 48, "num_lines": 7, "path": "/web-app/views/src/js/actions/faultSummaryActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar faultSummaryActions = Reflux.createActions([\n 'updateFaultSummary'\n ]);\n\nmodule.exports = faultSummaryActions;" }, { "alpha_fraction": 0.656810998916626, "alphanum_fraction": 0.6583949327468872, "avg_line_length": 32.83928680419922, "blob_id": "ef16db65f479a32413a6c5d776a0952ac3c2bdb5", "content_id": "deb3b60db6c109f9124983989daab291b8eaae92", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1894, "license_type": "permissive", "max_line_length": 83, "num_lines": 56, "path": "/test_suite/SCRIPTS/clear_all.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport yaml\n\ndef clear_all():\n\tsuite = {}\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\ttestsuite_dir = os.path.join(script_dir, os.pardir)\n\twith open(os.path.join(testsuite_dir, \"test_suite.yaml\")) as f:\n\t\ttry:\n\t\t\tsuite = yaml.load(f)\n\t\texcept:\n\t\t\tprint(\"ERROR: Unable to load yaml file: test_suite.yaml\", file=sys.stderr)\n\t\t\treturn -1\n\n\t## clear hardware faults\n\tfor test in suite[\"HardwareFaults\"]:\n\t\tfs = [f for f in os.listdir(os.path.join(testsuite_dir, \"HardwareFaults\", test))]\n\t\tfor f in fs:\n\t\t\tif f != \"input.yaml\":\n\t\t\t\tprint(\"MSG: Removing \", \"HardwareFaults/\"+test+\"/\"+f)\n\t\t\t\tif os.path.isdir(os.path.join(testsuite_dir, \"HardwareFaults\", test, f)):\n\t\t\t\t\tshutil.rmtree(os.path.join(testsuite_dir, \"HardwareFaults\", test, f))\n\t\t\t\telse:\n\t\t\t\t\tos.remove(os.path.join(testsuite_dir, \"HardwareFaults\", test, f))\n\n\t## clear software faults\n\tfor test in suite[\"SoftwareFaults\"]:\n\t\tfs = [f for f in os.listdir(os.path.join(testsuite_dir, \"SoftwareFaults\", test))]\n\t\tfor f in fs:\n\t\t\tif f != \"input.yaml\":\n\t\t\t\tprint(\"MSG: Removing \", \"SoftwareFaults/\"+test+\"/\"+f)\n\t\t\t\tif os.path.isdir(os.path.join(testsuite_dir, \"SoftwareFaults\", test, f)):\n\t\t\t\t\tshutil.rmtree(os.path.join(testsuite_dir, \"SoftwareFaults\", test, f))\n\t\t\t\telse:\n\t\t\t\t\tos.remove(os.path.join(testsuite_dir, \"SoftwareFaults\", test, f))\n\n\t## clear batch mode faults\n\tfor test in suite[\"BatchMode\"]:\n\t\tfs = [f for f in os.listdir(os.path.join(testsuite_dir, \"BatchMode\", test))]\n\t\tfor f in fs:\n\t\t\tif f != \"input.yaml\":\n\t\t\t\tprint(\"MSG: Removing \", \"BatchMode/\"+test+\"/\"+f)\n\t\t\t\tif os.path.isdir(os.path.join(testsuite_dir, \"BatchMode\", test, f)):\n\t\t\t\t\tshutil.rmtree(os.path.join(testsuite_dir, \"BatchMode\", test, f))\n\t\t\t\telse:\n\t\t\t\t\tos.remove(os.path.join(testsuite_dir, \"BatchMode\", test, f))\n\t\n\treturn 0\n\nif __name__ == \"__main__\":\n\tr = clear_all()\n\tsys.exit(r)" }, { "alpha_fraction": 0.577034056186676, "alphanum_fraction": 0.5776110887527466, "avg_line_length": 35.808509826660156, "blob_id": "2db39fb6b6f843ae97ae2fe06ca173ce0ba163d5", "content_id": "a065af92b694c251544156615d1bd64a0b03314b", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1733, "license_type": "permissive", "max_line_length": 89, "num_lines": 47, "path": "/llvm_passes/software_failures/_SoftwareFaultRegSelectors.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"_SoftwareFaultRegSelectors.h\"\n\nusing namespace std;\nnamespace llfi {\n bool FuncArgRegSelector::isRegofInstFITarget(Value *reg, Instruction *inst){ \n if(isa<CallInst>(inst) == false){\n return false;\n }else{\n CallInst* CI = dyn_cast<CallInst>(inst);\n if(this->specified_arg == true){\n if(reg == CI->getArgOperand(this->pos_argument)){\n return true;\n }else return false;\n }else{\n for(int i = 0; i<CI->getNumArgOperands(); i++){\n if(reg == CI->getArgOperand(i)) return true;\n }\n return false;\n }\n }\n }\n bool FuncArgRegSelector::isRegofInstFITarget(Value *reg, Instruction *inst, int pos){\n \tif(specified_arg == true)\n\t \treturn isRegofInstFITarget(reg, inst) && pos == this->pos_argument;\n }\n\n bool FuncDestRegSelector::isRegofInstFITarget(Value *reg, Instruction *inst){ \n if(isa<CallInst>(inst) == false){\n return false;\n }else{\n if(reg == inst) return true;\n else return false;\n }\n } \n\n bool RetValRegSelector::isRegofInstFITarget(Value *reg, Instruction *inst){ \n if(isa<ReturnInst>(inst)){\n ReturnInst* RI = dyn_cast<ReturnInst>(inst);\n if(reg == RI->getReturnValue()) return true;\n else return false;\n }else return false;\n }\n\n static RegisterFIRegSelector A(\"FuncArgRegSelector\", new FuncArgRegSelector());\n static RegisterFIRegSelector B(\"RetValRegSelector\", new RetValRegSelector());\n static RegisterFIRegSelector C(\"FuncDestRegSelector\", new FuncDestRegSelector());\n}\n\n \n" }, { "alpha_fraction": 0.7016574740409851, "alphanum_fraction": 0.7016574740409851, "avg_line_length": 26.42424201965332, "blob_id": "cf02bd9a7d422b352bdc8433653f00b1f0ee5d8c", "content_id": "457276c92b1da011a96d1bcb32f4de0cb0ab18bd", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 905, "license_type": "permissive", "max_line_length": 78, "num_lines": 33, "path": "/llvm_passes/SampleFIInstSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Instructions.h\"\n\n#include \"FIInstSelector.h\"\n#include \"FICustomSelectorManager.h\"\n\nusing namespace llvm;\n\nnamespace llfi {\n\n/**\n * This sample instruction selector only selects instructions in function main\n */\n// TODO: enable custom selctor to have more sources of options, e.g. read from\n// config file\nclass SampleFIInstSelector: public HardwareFIInstSelector {\n private:\n virtual bool isInstFITarget(Instruction *inst) {\n if (inst->getParent()->getParent()->getName() == \"main\")\n return true;\n else\n return false;\n }\n public:\n \tvirtual void getCompileTimeInfo(std::map<std::string, std::string>& info){\n info[\"failure_class\"] = \"HardwareFault\";\n info[\"failure_mode\"] = \"OnlyMain\";\n info[\"targets\"] = \"<instructions in main() function>\";\n info[\"injector\"] = \"<fi_type>\";\n }\n};\n\nstatic RegisterFIInstSelector X(\"onlymain\", new SampleFIInstSelector());\n}\n" }, { "alpha_fraction": 0.6227545142173767, "alphanum_fraction": 0.6295979619026184, "avg_line_length": 29.763158798217773, "blob_id": "4e83920a2f4c786dba36f937ea501fad5af27500", "content_id": "221eee98c627a4bd081c2ff026cf93bf98474751", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1169, "license_type": "permissive", "max_line_length": 86, "num_lines": 38, "path": "/web-app/server/runtimeOptions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var fs = require('fs');\nvar readline = require('readline');\nvar LLFI_BUILD_ROOT = require('./utils/config').LLFI_BUILD_ROOT;\n\nexports.processRuntimeOptions = function (req, res) {\n\n\tvar runtimeOptions = req.body.runtimeOptions;\n\tvar inputYamlFilePath = \"./uploads/\"+ req.ip +\"/input.yaml\";\n\tvar data = \"\";\n\tif (runtimeOptions.length) data += \"runOption:\\n\";\n\tfor (var j = 0; j < runtimeOptions.length; j ++) {\n\t\tvar runOption = runtimeOptions[j];\n\t\tdata += \"- run: {\";\n\n\t\tfor(var keys = Object.keys(runOption), i = 0, end = keys.length - 1; i < end; i++) {\n\t\t\tvar key = keys[i], value = runOption[key];\n\t\t\tdata += key + \": \" + value + \", \";\n\t\t}\n\t\tvar lastIndex = Object.keys(runOption).length - 1;\n\t\tif(lastIndex >= 0) {\n\t\t\tvar lastKey = Object.keys(runOption)[lastIndex];\n\t\t\tvar value = runOption[lastKey];\n\t\t\tdata += lastKey + \": \" + value + \"}\\n\";\n\t\t}\n\t}\n\n\t// Append the status to input yaml file\n\tfs.appendFile(inputYamlFilePath, data, function (err) {\n\t\tif (err) {\n\t\t\tres.status(500);\n\t\t\tres.send(err);\n\t\t\tconsole.log(\"err in modifying input.yaml file in runtimeOption: \", err);\n\t\t} else {\n\t\t\tconsole.log(\"runtimeOption Submit success\");\n\t\t\tres.end();\n\t\t}\n\t});\n};\n" }, { "alpha_fraction": 0.5759001970291138, "alphanum_fraction": 0.5794304609298706, "avg_line_length": 24.142011642456055, "blob_id": "85ea970567cc1f85cb781b9a4018f9382bc7a809", "content_id": "8063498297f4fca410a0aa2a6ff72247f838d81c", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4249, "license_type": "permissive", "max_line_length": 99, "num_lines": 169, "path": "/tools/compiletoIR.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n\n\"\"\"\n\n%(prog)s takes source files(s) as input and generate a single IR file\n\nUsage: %(prog)s [OPTIONS] <source files>\n\nList of options:\n\n-o <output file>: Intermediate representation (IR) output file\n-I <include directory>: Include directory for header files\n--readable: Generate human-readable output file\n--verbose: Show verbose information\n--debug:\t\tEnable debugging symbols\n--help(-h): Show help information\n\"\"\"\n\nimport sys, os, subprocess, tempfile\nscript_path = os.path.realpath(os.path.dirname(__file__))\nsys.path.append(os.path.join(script_path, '../config'))\nimport llvm_paths\n\nllvmlink = os.path.join(llvm_paths.LLVM_DST_ROOT, \"bin/llvm-link\")\nllvmgcc = os.path.join(llvm_paths.LLVM_GXX_BIN_DIR, \"clang\")\nllvmgxx = os.path.join(llvm_paths.LLVM_GXX_BIN_DIR, \"clang++\")\nprog = os.path.basename(sys.argv[0])\n\nbasedir = os.getcwd()\n\noptions = {\n \"o\": \"a.out\",\n \"sources\": [],\n \"I\": [],\n \"readable\": False,\n \"debug\": False,\n \"verbose\": False,\n}\n\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = 1\n msg = \"ERROR: \" + msg\n print(msg, file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(retval)\n\n\ndef verbosePrint(msg, verbose):\n if verbose:\n print(msg)\n\n\ndef parseArgs(args):\n global options\n argid = 0\n while argid < len(args):\n arg = args[argid]\n if arg.startswith(\"-\"):\n if arg == \"-o\":\n argid += 1\n options[\"o\"] = os.path.join(basedir, args[argid])\n elif arg == \"-I\":\n argid += 1\n options[\"I\"].append(os.path.join(basedir, args[argid]))\n elif arg == \"--readable\":\n options[\"readable\"] = True\n elif arg == \"--verbose\":\n options[\"verbose\"] = True\n elif arg == \"--debug\":\n options[\"debug\"] = True\n elif arg == \"--help\" or arg == \"-h\":\n usage()\n else:\n usage(\"Invalid argument: \" + arg)\n else:\n options[\"sources\"].append(os.path.join(basedir, arg))\n argid += 1\n\n if len(options[\"sources\"]) == 0:\n usage(\"No input file(s) specified.\")\n\n\n################################################################################\ndef execute(execlist):\n verbosePrint(' '.join(execlist), options[\"verbose\"])\n p = subprocess.Popen(execlist)\n p.wait()\n return p.returncode\n\n\ndef compileToIR(outputfile, inputfile):\n if inputfile.endswith(\".c\"):\n execlist = [llvmgcc]\n else:\n execlist = [llvmgxx]\n\n execlist.extend(['-w', '-emit-llvm', '-o', outputfile, inputfile])\n\n for header_dir in options[\"I\"]:\n execlist.extend(['-I', header_dir])\n\n if options['readable']:\n execlist.append('-S')\n else:\n execlist.append('-c')\n\n if options['debug']:\n execlist.append('-g')\n\n return execute(execlist)\n\n\ndef linkFiles(outputfile, inputlist):\n execlist = [llvmlink, '-o', outputfile]\n\n if options['readable']:\n execlist.append('-S')\n\n execlist.extend(inputlist)\n return execute(execlist)\n\n################################################################################\ndef compileProg():\n outputfile = options[\"o\"]\n srcfiles = options[\"sources\"]\n verbosePrint(\"Source files to be compiled: \", options[\"verbose\"])\n verbosePrint(\", \".join(srcfiles), options[\"verbose\"])\n verbosePrint(\"\\n======Compile======\", options[\"verbose\"])\n\n if len(srcfiles) == 1:\n retcode = compileToIR(outputfile, srcfiles[0])\n else:\n tmpfiles = []\n for src in srcfiles:\n file_handler, tmpfile = tempfile.mkstemp()\n tmpfiles.append(tmpfile)\n retcode = compileToIR(tmpfile, src)\n if retcode != 0:\n break\n\n if retcode == 0:\n retcode = linkFiles(outputfile, tmpfiles)\n\n # cleaning up the temporary files\n for tmpfile in tmpfiles:\n try:\n os.remove(tmpfile)\n except:\n pass\n\n if retcode != 0:\n print(\"\\nERROR: there was a compilation error, please follow\"\\\n \" the provided instructions for %s or compile the \"\\\n \"source file(s) to one single IR file manually.\" % prog, file=sys.stderr)\n sys.exit(retcode)\n\n\n################################################################################\ndef main(args):\n parseArgs(args)\n compileProg()\n\n\nif __name__==\"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.5942028760910034, "alphanum_fraction": 0.6038647294044495, "avg_line_length": 14.923076629638672, "blob_id": "90df02f6822ca0cf0af6126bd2fbe55db34d1602", "content_id": "b875906bb5876b51f905bc13c24cc74dea422707", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 207, "license_type": "permissive", "max_line_length": 55, "num_lines": 13, "path": "/test_suite/PROGRAMS/Makefile", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "progs = deadlock factorial mcf memcpy1 mpi sudoku2 bfs \n\ndefalt: all\n\nall:\n\tfor prog in $(progs); do \\\n\t\t$(MAKE) -C $$prog; \\\n\tdone\n \nclean:\n\tfor prog in $(progs); do \\\n\t\t$(MAKE) -C $$prog clean; \\\n\tdone\n" }, { "alpha_fraction": 0.6108906269073486, "alphanum_fraction": 0.6221455931663513, "avg_line_length": 23.279621124267578, "blob_id": "826244c0aceeeb55cfe76e86b28cc6050d2d7130", "content_id": "0884419ab6e07f3dc68e691b83eb3751d652d73f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15371, "license_type": "permissive", "max_line_length": 121, "num_lines": 633, "path": "/test_suite/PROGRAMS/bfs/parboil.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/*\n * (c) 2007 The Board of Trustees of the University of Illinois.\n */\n\n#include \"parboil.h\"\n#include <stdlib.h>\n#include <string.h>\n#include <stdio.h>\n\n#if _POSIX_VERSION >= 200112L\n# include <sys/time.h>\n#endif\n\n/* Free an array of owned strings. */\nstatic void\nfree_string_array(char **string_array)\n{\n char **p;\n\n if (!string_array) return;\n for (p = string_array; *p; p++) free(*p);\n free(string_array);\n}\n\n/* Parse a comma-delimited list of strings into an\n * array of strings. */\nstatic char ** \nread_string_array(char *in)\n{\n char **ret;\n int i;\n int count;\t\t\t/* Number of items in the input */\n char *substring;\t\t/* Current substring within 'in' */\n\n /* Count the number of items in the string */\n count = 1;\n for (i = 0; in[i]; i++) if (in[i] == ',') count++;\n\n /* Allocate storage */\n ret = (char **)malloc((count + 1) * sizeof(char *));\n\n /* Create copies of the strings from the list */\n substring = in;\n for (i = 0; i < count; i++) {\n char *substring_end;\n int substring_length;\n\n /* Find length of substring */\n for (substring_end = substring;\n\t (*substring_end != ',') && (*substring_end != 0);\n\t substring_end++);\n\n substring_length = substring_end - substring;\n\n /* Allocate memory and copy the substring */\n ret[i] = (char *)malloc(substring_length + 1);\n memcpy(ret[i], substring, substring_length);\n ret[i][substring_length] = 0;\n\n /* go to next substring */\n substring = substring_end + 1;\n }\n ret[i] = NULL;\t\t/* Write the sentinel value */\n\n return ret;\n}\n\nstruct argparse {\n int argc;\t\t\t/* Number of arguments. Mutable. */\n char **argv;\t\t\t/* Argument values. Immutable. */\n\n int argn;\t\t\t/* Current argument number. */\n char **argv_get;\t\t/* Argument value being read. */\n char **argv_put;\t\t/* Argument value being written.\n\t\t\t\t * argv_put <= argv_get. */\n};\n\nstatic void\ninitialize_argparse(struct argparse *ap, int argc, char **argv)\n{\n ap->argc = argc;\n ap->argn = 0;\n ap->argv_get = ap->argv_put = ap->argv = argv;\n}\n\nstatic void\nfinalize_argparse(struct argparse *ap)\n{\n /* Move the remaining arguments */\n for(; ap->argn < ap->argc; ap->argn++)\n *ap->argv_put++ = *ap->argv_get++;\n}\n\n/* Delete the current argument. */\nstatic void\ndelete_argument(struct argparse *ap)\n{\n if (ap->argn >= ap->argc) {\n fprintf(stderr, \"delete_argument\\n\");\n }\n ap->argc--;\n ap->argv_get++;\n}\n\n/* Go to the next argument. Also, move the current argument to its\n * final location in argv. */\nstatic void\nnext_argument(struct argparse *ap)\n{\n if (ap->argn >= ap->argc) {\n fprintf(stderr, \"next_argument\\n\");\n }\n /* Move argument to its new location. */\n *ap->argv_put++ = *ap->argv_get++;\n ap->argn++;\n}\n\nstatic int\nis_end_of_arguments(struct argparse *ap)\n{\n return ap->argn == ap->argc;\n}\n\nstatic char *\nget_argument(struct argparse *ap)\n{\n return *ap->argv_get;\n}\n\nstatic char *\nconsume_argument(struct argparse *ap)\n{\n char *ret = get_argument(ap);\n delete_argument(ap);\n return ret;\n}\n\nstruct pb_Parameters *\npb_ReadParameters(int *_argc, char **argv)\n{\n char *err_message;\n struct argparse ap;\n struct pb_Parameters *ret =\n (struct pb_Parameters *)malloc(sizeof(struct pb_Parameters));\n\n /* Initialize the parameters structure */\n ret->outFile = NULL;\n ret->inpFiles = (char **)malloc(sizeof(char *));\n ret->inpFiles[0] = NULL;\n\n /* Each argument */\n initialize_argparse(&ap, *_argc, argv);\n while(!is_end_of_arguments(&ap)) {\n char *arg = get_argument(&ap);\n\n /* Single-character flag */\n if ((arg[0] == '-') && (arg[1] != 0) && (arg[2] == 0)) {\n delete_argument(&ap);\t/* This argument is consumed here */\n\n switch(arg[1]) {\n case 'o':\t\t\t/* Output file name */\n\tif (is_end_of_arguments(&ap))\n\t {\n\t err_message = \"Expecting file name after '-o'\\n\";\n\t goto error;\n\t }\n\tfree(ret->outFile);\n\tret->outFile = strdup(consume_argument(&ap));\n\tbreak;\n case 'i':\t\t\t/* Input file name */\n\tif (is_end_of_arguments(&ap))\n\t {\n\t err_message = \"Expecting file name after '-i'\\n\";\n\t goto error;\n\t }\n\tret->inpFiles = read_string_array(consume_argument(&ap));\n\tbreak;\n case '-':\t\t\t/* End of options */\n\tgoto end_of_options;\n default:\n\terr_message = \"Unexpected command-line parameter\\n\";\n\tgoto error;\n }\n }\n else {\n /* Other parameters are ignored */\n next_argument(&ap);\n }\n } /* end for each argument */\n\n end_of_options:\n *_argc = ap.argc;\t\t/* Save the modified argc value */\n finalize_argparse(&ap);\n\n return ret;\n\n error:\n fputs(err_message, stderr);\n pb_FreeParameters(ret);\n return NULL;\n}\n\nvoid\npb_FreeParameters(struct pb_Parameters *p)\n{\n char **cpp;\n\n free(p->outFile);\n free_string_array(p->inpFiles);\n free(p);\n}\n\nint\npb_Parameters_CountInputs(struct pb_Parameters *p)\n{\n int n;\n\n for (n = 0; p->inpFiles[n]; n++);\n return n;\n}\n\n/*****************************************************************************/\n/* Timer routines */\n\nstatic void\naccumulate_time(pb_Timestamp *accum,\n\t\tpb_Timestamp start,\n\t\tpb_Timestamp end)\n{\n#if _POSIX_VERSION >= 200112L\n *accum += end - start;\n#else\n# error \"Timestamps not implemented for this system\"\n#endif\n}\n\n#if _POSIX_VERSION >= 200112L\nstatic pb_Timestamp get_time()\n{\n struct timeval tv;\n gettimeofday(&tv, NULL);\n return (pb_Timestamp) (tv.tv_sec * 1000000LL + tv.tv_usec);\n}\n#else\n# error \"no supported time libraries are available on this platform\"\n#endif\n\nvoid\npb_ResetTimer(struct pb_Timer *timer)\n{\n timer->state = pb_Timer_STOPPED;\n\n#if _POSIX_VERSION >= 200112L\n timer->elapsed = 0;\n#else\n# error \"pb_ResetTimer: not implemented for this system\"\n#endif\n}\n\nvoid\npb_StartTimer(struct pb_Timer *timer)\n{\n if (timer->state != pb_Timer_STOPPED) {\n fputs(\"Ignoring attempt to start a running timer\\n\", stderr);\n return;\n }\n\n timer->state = pb_Timer_RUNNING;\n\n#if _POSIX_VERSION >= 200112L\n {\n struct timeval tv;\n gettimeofday(&tv, NULL);\n timer->init = tv.tv_sec * 1000000LL + tv.tv_usec;\n }\n#else\n# error \"pb_StartTimer: not implemented for this system\"\n#endif\n}\n\nvoid\npb_StartTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer)\n{\n unsigned int numNotStopped = 0x3; // 11\n if (timer->state != pb_Timer_STOPPED) {\n fputs(\"Warning: Timer was not stopped\\n\", stderr);\n numNotStopped &= 0x1; // Zero out 2^1\n }\n if (subtimer->state != pb_Timer_STOPPED) {\n fputs(\"Warning: Subtimer was not stopped\\n\", stderr);\n numNotStopped &= 0x2; // Zero out 2^0\n }\n if (numNotStopped == 0x0) {\n fputs(\"Ignoring attempt to start running timer and subtimer\\n\", stderr);\n return;\n }\n\n timer->state = pb_Timer_RUNNING;\n subtimer->state = pb_Timer_RUNNING;\n\n#if _POSIX_VERSION >= 200112L\n {\n struct timeval tv;\n gettimeofday(&tv, NULL);\n \n if (numNotStopped & 0x2) {\n timer->init = tv.tv_sec * 1000000LL + tv.tv_usec;\n }\n \n if (numNotStopped & 0x1) {\n subtimer->init = tv.tv_sec * 1000000LL + tv.tv_usec;\n }\n }\n#else\n# error \"pb_StartTimer: not implemented for this system\"\n#endif\n\n}\n\nvoid\npb_StopTimer(struct pb_Timer *timer)\n{\n\n pb_Timestamp fini;\n\n if (timer->state != pb_Timer_RUNNING) {\n fputs(\"Ignoring attempt to stop a stopped timer\\n\", stderr);\n return;\n }\n\n timer->state = pb_Timer_STOPPED;\n\n#if _POSIX_VERSION >= 200112L\n {\n struct timeval tv;\n gettimeofday(&tv, NULL);\n fini = tv.tv_sec * 1000000LL + tv.tv_usec;\n }\n#else\n# error \"pb_StopTimer: not implemented for this system\"\n#endif\n\n accumulate_time(&timer->elapsed, timer->init, fini);\n timer->init = fini;\n\n}\n\nvoid pb_StopTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer) {\n\n pb_Timestamp fini;\n\n unsigned int numNotRunning = 0x3; // 0b11\n if (timer->state != pb_Timer_RUNNING) {\n fputs(\"Warning: Timer was not running\\n\", stderr);\n numNotRunning &= 0x1; // Zero out 2^1\n }\n if (subtimer->state != pb_Timer_RUNNING) {\n fputs(\"Warning: Subtimer was not running\\n\", stderr);\n numNotRunning &= 0x2; // Zero out 2^0\n }\n if (numNotRunning == 0x0) {\n fputs(\"Ignoring attempt to stop stopped timer and subtimer\\n\", stderr);\n return;\n }\n\n\n timer->state = pb_Timer_STOPPED;\n subtimer->state = pb_Timer_STOPPED;\n\n#if _POSIX_VERSION >= 200112L\n {\n struct timeval tv;\n gettimeofday(&tv, NULL);\n fini = tv.tv_sec * 1000000LL + tv.tv_usec;\n }\n#else\n# error \"pb_StopTimer: not implemented for this system\"\n#endif\n\n if (numNotRunning & 0x2) {\n accumulate_time(&timer->elapsed, timer->init, fini);\n timer->init = fini;\n }\n \n if (numNotRunning & 0x1) {\n accumulate_time(&subtimer->elapsed, subtimer->init, fini);\n subtimer->init = fini;\n }\n\n}\n\n/* Get the elapsed time in seconds. */\ndouble\npb_GetElapsedTime(struct pb_Timer *timer)\n{\n double ret;\n\n if (timer->state != pb_Timer_STOPPED) {\n fputs(\"Elapsed time from a running timer is inaccurate\\n\", stderr);\n }\n\n#if _POSIX_VERSION >= 200112L\n ret = timer->elapsed / 1e6;\n#else\n# error \"pb_GetElapsedTime: not implemented for this system\"\n#endif\n return ret;\n}\n\nvoid\npb_InitializeTimerSet(struct pb_TimerSet *timers)\n{\n int n;\n \n timers->wall_begin = get_time();\n\n timers->current = pb_TimerID_NONE;\n\n timers->async_markers = NULL;\n \n\n for (n = 0; n < pb_TimerID_LAST; n++) {\n pb_ResetTimer(&timers->timers[n]);\n timers->sub_timer_list[n] = NULL; // free first?\n }\n}\n\nvoid\npb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category) { \n \n struct pb_SubTimer *subtimer = (struct pb_SubTimer *) malloc\n (sizeof(struct pb_SubTimer));\n \n int len = strlen(label);\n \n subtimer->label = (char *) malloc (sizeof(char)*(len+1));\n sprintf(subtimer->label, \"%s\\0\", label);\n \n pb_ResetTimer(&subtimer->timer);\n subtimer->next = NULL;\n \n struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[pb_Category];\n if (subtimerlist == NULL) {\n subtimerlist = (struct pb_SubTimerList *) malloc\n (sizeof(struct pb_SubTimerList));\n subtimerlist->subtimer_list = subtimer;\n timers->sub_timer_list[pb_Category] = subtimerlist;\n } else {\n // Append to list\n struct pb_SubTimer *element = subtimerlist->subtimer_list;\n while (element->next != NULL) {\n element = element->next;\n }\n element->next = subtimer;\n }\n \n}\n\nvoid\npb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category)\n{\n\n// switchToSub( NULL, NONE\n// switchToSub( NULL, some\n// switchToSub( some, some\n// switchToSub( some, NONE -- tries to find \"some\" in NONE's sublist, which won't be printed\n \n struct pb_Timer *topLevelToStop = NULL;\n if (timers->current != category && timers->current != pb_TimerID_NONE) {\n // Switching to subtimer in a different category needs to stop the top-level current, different categoried timer.\n // NONE shouldn't have a timer associated with it, so exclude from branch\n topLevelToStop = &timers->timers[timers->current];\n } \n\n struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];\n struct pb_SubTimer *curr = (subtimerlist == NULL) ? NULL : subtimerlist->current;\n \n if (timers->current != pb_TimerID_NONE) {\n if (curr != NULL && topLevelToStop != NULL) {\n pb_StopTimerAndSubTimer(topLevelToStop, &curr->timer);\n } else if (curr != NULL) {\n pb_StopTimer(&curr->timer);\n } else {\n pb_StopTimer(topLevelToStop);\n }\n }\n \n subtimerlist = timers->sub_timer_list[category];\n struct pb_SubTimer *subtimer = NULL;\n \n if (label != NULL) { \n subtimer = subtimerlist->subtimer_list;\n while (subtimer != NULL) {\n if (strcmp(subtimer->label, label) == 0) {\n break;\n } else {\n subtimer = subtimer->next;\n }\n }\n } \n \n if (category != pb_TimerID_NONE) {\n \n if (subtimerlist != NULL) {\n subtimerlist->current = subtimer;\n }\n \n if (category != timers->current && subtimer != NULL) {\n pb_StartTimerAndSubTimer(&timers->timers[category], &subtimer->timer);\n } else if (subtimer != NULL) {\n // Same category, different non-NULL subtimer\n pb_StartTimer(&subtimer->timer);\n } else{\n // Different category, but no subtimer (not found or specified as NULL) -- unprefered way of setting topLevel timer\n pb_StartTimer(&timers->timers[category]);\n }\n } \n \n timers->current = category;\n \n}\n\nvoid\npb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer)\n{\n /* Stop the currently running timer */\n if (timers->current != pb_TimerID_NONE) {\n struct pb_SubTimer *currSubTimer = NULL;\n struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];\n \n if ( subtimerlist != NULL) {\n currSubTimer = timers->sub_timer_list[timers->current]->current;\n }\n if ( currSubTimer!= NULL) {\n pb_StopTimerAndSubTimer(&timers->timers[timers->current], &currSubTimer->timer);\n } else {\n pb_StopTimer(&timers->timers[timers->current]);\n }\n \n }\n\n timers->current = timer;\n\n if (timer != pb_TimerID_NONE) {\n pb_StartTimer(&timers->timers[timer]);\n }\n}\n\nvoid\npb_PrintTimerSet(struct pb_TimerSet *timers)\n{\n\n pb_Timestamp wall_end = get_time();\n\n struct pb_Timer *t = timers->timers;\n struct pb_SubTimer* sub = NULL;\n \n int maxSubLength;\n \n const char *categories[] = {\n \"IO\", \"Kernel\", \"Copy\", \"Driver\", \"Copy Async\", \"Compute\"\n };\n \n const int maxCategoryLength = 10;\n \n int i;\n for(i = 1; i < pb_TimerID_LAST-1; ++i) { // exclude NONE and OVRELAP from this format\n if(pb_GetElapsedTime(&t[i]) != 0) {\n \n // Print Category Timer\n printf(\"%-*s: %f\\n\", maxCategoryLength, categories[i-1], pb_GetElapsedTime(&t[i]));\n \n if (timers->sub_timer_list[i] != NULL) {\n sub = timers->sub_timer_list[i]->subtimer_list;\n maxSubLength = 0;\n while (sub != NULL) {\n // Find longest SubTimer label\n if (strlen(sub->label) > maxSubLength) {\n maxSubLength = strlen(sub->label);\n }\n sub = sub->next;\n }\n \n // Fit to Categories\n if (maxSubLength <= maxCategoryLength) {\n maxSubLength = maxCategoryLength;\n }\n \n sub = timers->sub_timer_list[i]->subtimer_list;\n \n // Print SubTimers\n while (sub != NULL) {\n printf(\" -%-*s: %f\\n\", maxSubLength, sub->label, pb_GetElapsedTime(&sub->timer));\n sub = sub->next;\n }\n }\n }\n }\n \n if(pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]) != 0)\n printf(\"CPU/Kernel Overlap: %f\\n\", pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]));\n \n float walltime = (wall_end - timers->wall_begin)/ 1e6;\n printf(\"Timer Wall Time: %f\\n\", walltime); \n \n}\n\nvoid pb_DestroyTimerSet(struct pb_TimerSet * timers)\n{\n /* clean up all of the async event markers */\n struct pb_async_time_marker_list ** event = &(timers->async_markers);\n while( *event != NULL) {\n struct pb_async_time_marker_list ** next = &((*event)->next);\n free(*event);\n (*event) = NULL;\n event = next;\n }\n \n int i = 0;\n for(i = 0; i < pb_TimerID_LAST; ++i) { \n if (timers->sub_timer_list[i] != NULL) {\n struct pb_SubTimer *subtimer = timers->sub_timer_list[i]->subtimer_list;\n struct pb_SubTimer *prev = NULL;\n while (subtimer != NULL) {\n free(subtimer->label);\n prev = subtimer;\n subtimer = subtimer->next;\n free(prev);\n }\n free(timers->sub_timer_list[i]);\n }\n }\n}\n\n\n" }, { "alpha_fraction": 0.6408498883247375, "alphanum_fraction": 0.6449623107910156, "avg_line_length": 25.509090423583984, "blob_id": "622334c0ec85e78c3fee3e54dc893873424f58ec", "content_id": "3db3bb79b5d776be212cb0d1db53fad5cf3f558d", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1459, "license_type": "permissive", "max_line_length": 144, "num_lines": 55, "path": "/tools/FIDL/config/TargetAllTemplate.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "// DO NOT MODIFY!\n// File generated on \n\n// This file was generated from <LLFI_SRC_ROOT>/tools/FIDL/TargetAllTemplate.cpp\n// by the <LLFI_SRC_ROOT>/tools/FIDL/FIDL-Algorithm.py\n// See https://github.com/DependableSystemsLab/LLFI/wiki/Using-FIDL-to-create-a-Custom-Software-Fault-Injector-and-a-Custom-Instruction-Selector\n// for more information.\n\n#include \"llvm/Pass.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/ADT/Statistic.h\"\n#include \"llvm/Support/CFG.h\"\n#include \"llvm/ADT/DepthFirstIterator.h\"\n#include \"llvm/ADT/GraphTraits.h\"\n\n#include \"Utils.h\"\n#include \"FIInstSelector.h\"\n#include \"FICustomSelectorManager.h\"\n#include \"_SoftwareFaultRegSelectors.h\"\n\n#include <fstream>\n#include <iostream>\n#include <map>\n#include <set>\n#include <string>\n\nusing namespace llvm;\nnamespace llfi {\n//fidl_1\n public:\n virtual void getCompileTimeInfo(std::map<std::string, std::string>& info) {\n//fidl_2\n }\n private:\n virtual bool isInstFITarget(Instruction* inst) {\n//fidl_3\n }\n \n static bool isTargetLLFIIndex(Instruction* inst) {\n//fidl_4\n if (n > 0) {\n long llfiindex = getLLFIIndexofInst(inst);\n for (int i = 0; i < n; i++) { \n if (llfiindex == targeted_indices[i]) { \n return true;\n }\n }\n return false;\n } else {\n return true;\n }\n }\n};\n\n" }, { "alpha_fraction": 0.6673684120178223, "alphanum_fraction": 0.6673684120178223, "avg_line_length": 22.799999237060547, "blob_id": "f2bcbdf589b4a5705bcb3c2c89d030db86830507", "content_id": "040ae242cc7a9164e5b78f45295e486843fc3026", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 475, "license_type": "permissive", "max_line_length": 45, "num_lines": 20, "path": "/web-app/views/src/js/components/mainWindow/windowLayout.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar FunctionTabs = require(\"./functionTabs\");\nvar MainPannel = require(\"./mainPannel\");\nvar BottomPannel = require(\"./bottomPannel\");\n\nvar WindowLayout = React.createClass({\n\trender: function() {\n\t\treturn (\n\t\t\t<div className = \"mainWindows\">\n\t\t\t\t<div className=\"container-fluid\">\n\t\t\t\t\t<FunctionTabs></FunctionTabs>\n\t\t\t\t\t<MainPannel></MainPannel>\n\t\t\t\t\t<BottomPannel></BottomPannel>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t);\n\t}\n});\n\nmodule.exports = WindowLayout;" }, { "alpha_fraction": 0.8098739385604858, "alphanum_fraction": 0.8098739385604858, "avg_line_length": 24.7297306060791, "blob_id": "3b1ad9835b140668dc7dd02c6ed74edbc11a3ad6", "content_id": "ff148eafc08ba1642d0712ab6fc71c73fe4ee4b7", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 952, "license_type": "permissive", "max_line_length": 67, "num_lines": 37, "path": "/llvm_passes/CMakeLists.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "include(../config/llvm_passes.cmake)\n\nset(LLVM_PASSES_DIRS_LLFI hardware_failures core software_failures)\n\ninclude_directories(${LLVM_PASSES_DIRS_LLFI})\n\n\nadd_llvm_loadable_module(llfi-passes\n SampleFIInstSelector.cpp\n SampleFIRegSelector.cpp\n SoftwareFailureAutoScanPass.cpp\n HardwareFailureAutoScanPass.cpp\n\n core/FaultInjectionPass.cpp\n core/InstTracePass.cpp\n core/LLFIDotGraphPass.cpp\n core/Utils.cpp\n core/Controller.cpp\n core/FICustomSelectorManager.cpp\n core/FIInstSelector.cpp\n core/FIInstSelectorManager.cpp\n core/FIRegSelector.cpp\n core/GenLLFIIndexPass.cpp\n core/ProfilingPass.cpp\n core/RegLocBasedFIRegSelector.cpp\n\n hardware_failures/FuncNameFIInstSelector.cpp\n hardware_failures/LLFIIndexFIInstSelector.cpp\n hardware_failures/InstTypeFIInstSelector.cpp\n \n software_failures/_Timing_HighFrequentEventSelector.cpp \n software_failures/_SoftwareFaultRegSelectors.cpp\n \n #FIDL - DO NOT MODIFY UNTIL '#END'\n #END\n \n)\n" }, { "alpha_fraction": 0.7278911471366882, "alphanum_fraction": 0.7278911471366882, "avg_line_length": 20.14285659790039, "blob_id": "c3ccf307c7f6e885fbeac0cff26b805203334290", "content_id": "c8ec0742e011444a59496f1e3a4674d002336d31", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 147, "license_type": "permissive", "max_line_length": 46, "num_lines": 7, "path": "/web-app/views/src/js/actions/runOptionsActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar runOptionsActions = Reflux.createActions([\n 'updateRunOptions'\n ]);\n\nmodule.exports = runOptionsActions;" }, { "alpha_fraction": 0.44947734475135803, "alphanum_fraction": 0.4773519039154053, "avg_line_length": 16.875, "blob_id": "fc1388f7de0a972fb8707d2cd26d67dac2a1889f", "content_id": "f1d4d79bcca41e4d07723bbfcb720be2a1b5c636", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 287, "license_type": "permissive", "max_line_length": 30, "num_lines": 16, "path": "/sample_programs/min/min.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include<stdio.h>\nmain(argc, argv)\n int argc;\n char *argv[];\n{\n int arr[5],min,i;\n //printf(\"%p\\n\", arr);\n\t\tfor(i=0;i<5;i++)\n\t\t\tarr[i] = atoi(argv[i+1]);\n\t\tmin=arr[0];\n\t\tfor(i=1;i<5;i++)\n\t\t\tif(min > arr[i])\n\t\t\t\tmin = arr[i];\n\t\tprintf(\"Min is:%d\",min);\n\t\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.6841753125190735, "alphanum_fraction": 0.6861808896064758, "avg_line_length": 38.66666793823242, "blob_id": "4bfe434d1cec2be79948f1fe58d1c6375799ffbe", "content_id": "44aea948fd06efa90b47f612fd86b19feb2204e0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10471, "license_type": "permissive", "max_line_length": 148, "num_lines": 264, "path": "/web-app/views/src/js/components/mainWindow/runtimeOptionModal.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require('react');\nvar ReactDOM = require('react-dom');\nvar Reflux = require(\"reflux\");\nvar Modal = require('react-bootstrap').Modal;\nvar FormGroup = require('react-bootstrap').FormGroup;\nvar FormControl = require('react-bootstrap').FormControl;\nvar Checkbox = require('react-bootstrap').Checkbox;\nvar Button = require('react-bootstrap').Button;\nvar ControlLabel = require('react-bootstrap').ControlLabel;\nvar FilteredMultiSelect = require('react-filtered-multiselect');\nvar injectionModeStore = require(\"./../../stores/injectionModeStore\");\nvar runOptionsActions = require(\"./../../actions/runOptionsActions\");\n\nvar RuntimeOptionModal = React.createClass({\n\tmixins: [Reflux.connect(injectionModeStore,\"injectionMode\")],\n\tgetInitialState() {\n\t\treturn {\n\t\t\tshow: false,\n\t\t\truntimeOptions: [{fi_type: \"bitflip\"}],\n\t\t\tinjectionMode: {},\n\t\t\truntimeOptionNumber: 0\n\t\t};\n\t},\n\n\tcomponentDidMount () {\n\t\t// Initial status of the UI elements\n\t},\n\tclose() {\n\t\tthis.setState({ show: false });\n\t},\n\n\topen() {\n\t\tthis.setState({ show: true });\n\t},\n\n\trender: function() {\n\t\tvar softwareOptions = (\n\t\t\t<div class=\"modal-container\" id=\"RuntimeOptionModalID\" onClick={this.open}>\n\t\t\t\t<Modal {...this.props} bsSize=\"large\" aria-labelledby=\"contained-modal-title-lg\" onClick={this.open} show={this.state.show} onHide={this.close}>\n\t\t\t\t\t<Modal.Header closeButton>\n\t\t\t\t\t\t<Modal.Title id=\"contained-modal-title-lg\">Run time Option</Modal.Title>\n\t\t\t\t\t</Modal.Header>\n\t\t\t\t\t<Modal.Body>\n\t\t\t\t\t\t<div class=\"runtimeContainer\">\n\t\t\t\t\t\t\t<p class=\"boldFont leftFloat font-size-large\">Fault Injection Configuration</p>\n\t\t\t\t\t\t\t<button class=\"rightFloat runtimeMargin\" onClick={this.deleteRun}>Delete Run</button>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer runtimeOptionContainer\">\n\t\t\t\t\t\t\t<p class=\"boldFont leftFloat font-size-large\">Runtime Option:</p>\n\t\t\t\t\t\t\t<label class=\"runtimeMargin alignCenter\">run <span id=\"runNumber\">{this.state.runtimeOptionNumber}</span></label>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Number Of Runs<span class=\"redColor\">*</span></label>\n\t\t\t\t\t\t\t<input id=\"numOfRuns\" class=\"runtimeInputs\" type=\"number\" onChange={this.onChangeNumberofRuns} min={1}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t<button onClick={this.previousRun}>{\"<\"}</button>\n\t\t\t\t\t\t\t<button class=\"rightFloat\" onClick={this.nextRun}>{\">\"}</button>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Random Seed</label>\n\t\t\t\t\t\t\t<input id=\"randomSeed\" class=\"runtimeInputs\" type=\"text\" placeholder=\"null\" onChange={this.onChangeRandomSeed}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>TimeOut(ms)</label>\n\t\t\t\t\t\t\t<input id=\"timeOut\" class=\"runtimeInputs\" type=\"text\" placeholder=\"null\" onChange={this.onChangeTimeout}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</Modal.Body>\n\t\t\t\t\t<Modal.Footer>\n\t\t\t\t\t\t<Button onClick={this.onClickInstrument}>Submit</Button>\n\t\t\t\t\t</Modal.Footer>\n\t\t\t\t</Modal>\n\t\t\t</div>\n\t\t);\n\t\tvar hardwareOptions = (\n\t\t\t<div class=\"modal-container\" id=\"RuntimeOptionModalID\" onClick={this.open}>\n\t\t\t\t<Modal {...this.props} bsSize=\"large\" aria-labelledby=\"contained-modal-title-lg\" onClick={this.open} show={this.state.show} onHide={this.close}>\n\t\t\t\t\t<Modal.Header closeButton>\n\t\t\t\t\t\t<Modal.Title id=\"contained-modal-title-lg\">Run time Option</Modal.Title>\n\t\t\t\t\t</Modal.Header>\n\t\t\t\t\t<Modal.Body>\n\t\t\t\t\t\t<div class=\"runtimeContainer\">\n\t\t\t\t\t\t\t<p class=\"boldFont leftFloat font-size-large\">Fault Injection Configuration</p>\n\t\t\t\t\t\t\t<button class=\"rightFloat runtimeMargin\" onClick={this.deleteRun}>Delete Run</button>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer runtimeOptionContainer\">\n\t\t\t\t\t\t\t<p class=\"boldFont leftFloat font-size-large\">Runtime Option:</p>\n\t\t\t\t\t\t\t<label class=\"runtimeMargin alignCenter\">run <span id=\"runNumber\">{this.state.runtimeOptionNumber}</span></label>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Number Of Runs<span class=\"redColor\">*</span></label>\n\t\t\t\t\t\t\t<input id=\"numOfRuns\" class=\"runtimeInputs\" type=\"number\" min={1} onChange={this.onChangeNumberofRuns}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Fault Injection Type<span class=\"redColor\">*</span></label>\n\t\t\t\t\t\t\t<div class=\"runtimeInputs\">\n\t\t\t\t\t\t\t\t<FormGroup controlId=\"fi_type\">\n\t\t\t\t\t\t\t\t\t<FormControl componentClass=\"select\" placeholder=\"select\" onChange={this.onChangeInjectionType}>\n\t\t\t\t\t\t\t\t\t<option value=\"bitflip\">bitflip</option>\n\t\t\t\t\t\t\t\t\t<option value=\"stuck_at_0\">stuck_at_0</option>\n\t\t\t\t\t\t\t\t\t<option value=\"stuck_at_1\">stuck_at_1</option>\n\t\t\t\t\t\t\t\t\t</FormControl>\n\t\t\t\t\t\t\t\t</FormGroup>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Fault Injection Cycles</label>\n\t\t\t\t\t\t\t<input id=\"fi_cycle\" class=\"runtimeInputs\" type=\"number\" placeholder=\"max:-1\" onChange={this.onChangeInjectionCycles}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t<button onClick={this.previousRun}>{\"<\"}</button>\n\t\t\t\t\t\t\t<button class=\"rightFloat\" onClick={this.nextRun}>{\">\"}</button>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Fault Injection Index</label>\n\t\t\t\t\t\t\t<input id=\"fi_index\" class=\"runtimeInputs\" type=\"number\" placeholder=\"max:34\" onChange={this.onChangeInjectionIndex}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Fault Injection Register Index</label>\n\t\t\t\t\t\t\t<input id=\"fi_reg_index\" class=\"runtimeInputs\" type=\"text\" placeholder=\"null\" onChange={this.onChangeInjectionRegisterIndex}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Fault Injection Bit</label>\n\t\t\t\t\t\t\t<input id=\"fi_bit\" class=\"runtimeInputs\" type=\"text\" placeholder=\"null\" onChange={this.onChangeInjectionBit}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>Random Seed</label>\n\t\t\t\t\t\t\t<input id=\"randomSeed\" class=\"runtimeInputs\" type=\"text\" placeholder=\"null\" onChange={this.onChangeRandomSeed}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div class=\"runtimeContainer flexDisplay\">\n\t\t\t\t\t\t\t<label>TimeOut(ms)</label>\n\t\t\t\t\t\t\t<input id=\"timeOut\" class=\"runtimeInputs\" type=\"text\" placeholder=\"null\" onChange={this.onChangeTimeout}></input>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</Modal.Body>\n\t\t\t\t\t<Modal.Footer>\n\t\t\t\t\t\t<Button onClick={this.onClickSubmit}>Submit</Button>\n\t\t\t\t\t</Modal.Footer>\n\t\t\t\t</Modal>\n\t\t\t</div>\n\t\t);\n\t\treturn this.state.injectionMode.injectionMode && this.state.injectionMode.injectionMode == \"software\" ? softwareOptions : hardwareOptions;\n\t},\n\tonChangeNumberofRuns: function (event) {\n\t\tthis.runtimeOptionChange(\"numOfRuns\", event.target.value);\n\t},\n\tonChangeRandomSeed: function (event) {\n\t\tthis.runtimeOptionChange(\"randomSeed\", event.target.value);\n\t},\n\tonChangeTimeout: function (event) {\n\t\tthis.runtimeOptionChange(\"timeOut\", event.target.value);\n\t},\n\tonChangeInjectionType: function (event) {\n\t\tthis.runtimeOptionChange(\"fi_type\", event.target.value);\n\t},\n\tonChangeInjectionCycles: function (event) {\n\t\tthis.runtimeOptionChange(\"fi_cycle\", event.target.value);\n\t},\n\tonChangeInjectionIndex: function (event) {\n\t\tthis.runtimeOptionChange(\"fi_index\", event.target.value);\n\t},\n\tonChangeInjectionRegisterIndex: function (event) {\n\t\tthis.runtimeOptionChange(\"fi_reg_index\", event.target.value);\n\t},\n\tonChangeInjectionBit: function (event) {\n\t\tthis.runtimeOptionChange(\"fi_bit\", event.target.value);\n\t},\n\truntimeOptionChange: function (filedName, value) {\n\t\tvar runtimeOptions = this.state.runtimeOptions;\n\t\truntimeOptions[this.state.runtimeOptionNumber][filedName] = value;\n\t\tthis.setState({ runtimeOptions: runtimeOptions});\n\t},\n\tpreviousRun: function () {\n\t\tvar previousRunNumber = this.state.runtimeOptionNumber - 1;\n\t\tif (previousRunNumber < 0) {\n\t\t\treturn;\n\t\t}\n\t\tthis.loadRunOption(previousRunNumber);\n\t\tthis.setState({runtimeOptionNumber: previousRunNumber});\n\t},\n\tnextRun: function () {\n\t\tvar nextRunNumber = this.state.runtimeOptionNumber + 1;\n\t\tvar runtimeOptions = this.state.runtimeOptions;\n\t\t// If the previous run has not specifed the required fields, do not create a new run\n\t\tif (!this.isRequiredFilled(runtimeOptions[nextRunNumber]) && !this.isRequiredFilled(runtimeOptions[this.state.runtimeOptionNumber])) {\n\t\t\treturn;\n\t\t}\n\n\t\tif (!runtimeOptions[nextRunNumber]) {\n\t\t\t// Add a new run option\n\t\t\truntimeOptions[nextRunNumber] = {fi_type: \"bitflip\"};\n\t\t}\n\t\tthis.loadRunOption(nextRunNumber);\n\t\tthis.setState({ runtimeOptions: runtimeOptions, runtimeOptionNumber: nextRunNumber});\n\t},\n\tloadRunOption: function (runNumber) {\n\t\tvar runOption = this.state.runtimeOptions[runNumber];\n\t\t$(\"#numOfRuns\").val(runOption.numOfRuns);\n\t\t$(\"#randomSeed\").val(runOption.randomSeed);\n\t\t$(\"#timeOut\").val(runOption.timeOut);\n\t\tif(runOption.fi_type) {\n\t\t\t$(\"#fi_type\").val(runOption.fi_type);\n\t\t} else {\n\t\t\t$(\"#fi_type\").val(\"bitflip\");\n\t\t}\n\t\t$(\"#fi_cycle\").val(runOption.fi_cycle);\n\t\t$(\"#fi_index\").val(runOption.fi_index);\n\t\t$(\"#fi_reg_index\").val(runOption.fi_reg_index);\n\t\t$(\"#fi_bit\").val(runOption.fi_bit);\n\t},\n\tdeleteRun: function () {\n\t\tvar runNumber = this.state.runtimeOptionNumber;\n\t\tvar runtimeOptions = this.state.runtimeOptions;\n\t\tif (runNumber == 0 && runtimeOptions.length <= 1) {\n\t\t\t// Initialize the options\n\t\t\truntimeOptions = [{fi_type: \"bitflip\"}];\n\t\t} else if (runNumber == 0 && runtimeOptions.length > 1) {\n\t\t\t// Bring the next run\n\t\t\truntimeOptions.splice(runNumber, 1);\n\t\t} else {\n\t\t\t// Bring the previous run\n\t\t\truntimeOptions.splice(runNumber, 1);\n\t\t\trunNumber --;\n\t\t}\n\t\tthis.setState({\n\t\t\truntimeOptions: runtimeOptions,\n\t\t\truntimeOptionNumber: runNumber\n\t\t}, function () {\n\t\t\tthis.loadRunOption(runNumber);\n\t\t}.bind(this));\n\t},\n\tisRequiredFilled: function (obj) {\n\t\tif (!obj) return false;\n\t\tif (obj.numOfRuns) return true;\n\t\treturn false;\n\t},\n\tonClickSubmit: function () {\n\t\tvar me = this;\n\t\tvar data = {};\n\t\tvar runtimeOptions = this.state.runtimeOptions;\n\t\t// Remove invalid run options\n\t\tfor (var i = 0; i < runtimeOptions.length; i++) {\n\t\t\tif (!this.isRequiredFilled(runtimeOptions[i])) {\n\t\t\t\truntimeOptions.splice(i, 1);\n\t\t\t\ti--;\n\t\t\t}\n\t\t}\n\t\tdata.runtimeOptions = runtimeOptions;\n\t\trunOptionsActions.updateRunOptions(runtimeOptions);\n\t\t$.ajax({\n\t\t\turl: '/runtimeOptions',\n\t\t\ttype: 'POST',\n\t\t\tdata: JSON.stringify(data),\n\t\t\tprocessData: false,\n\t\t\tcontentType: 'application/json',\n\t\t\tsuccess: function(data){\n\t\t\t\tconsole.log(\"runtimeOption submit success\");\n\t\t\t\tme.close();\n\t\t\t\twindow.alert(\"RuntimeOption submit Successful\");\n\t\t\t}\n\t\t});\n\t}\n});\n\nmodule.exports = RuntimeOptionModal;" }, { "alpha_fraction": 0.7413508892059326, "alphanum_fraction": 0.7594727873802185, "avg_line_length": 21.481481552124023, "blob_id": "3bffddec04cb43a8f1d3e0d6b7656eb8d7219f38", "content_id": "1182cf755c5c4e95256f4a882fbcbf1238cf7050", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 607, "license_type": "permissive", "max_line_length": 70, "num_lines": 27, "path": "/runtime_lib/Utils.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef LLFI_LIB_UTILS_H\n#define LLFI_LIB_UTILS_H\n\n#include <stdbool.h>\n\n// TRACING = Tracing flag\n#define TRACING_GOLDEN_RUN -1\n#define TRACING_FI_RUN_INIT 0\n#define TRACING_FI_RUN_FAULT_INSERTED 1\n#define TRACING_FI_RUN_START_TRACING 2\n#define TRACING_FI_RUN_END_TRACING 3\nextern int start_tracing_flag;\n\n// assume the max opcode in instruction.def (LLVM) is smaller than 100\n#define OPCODE_CYCLE_ARRAY_LEN 100\nvoid getOpcodeExecCycleArray(const unsigned len, int *arr);\n\nbool isLittleEndian();\n\n#define DEBUG\n#ifdef DEBUG\n#define debug(x) printf x; fflush(stdout);\n#else\n#define debug(x)\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.6193060874938965, "alphanum_fraction": 0.626011073589325, "avg_line_length": 37.82231521606445, "blob_id": "f7359f7d2593abfd0c51c9cfdf8151bfd911e86c", "content_id": "d1ff1af16b67bca8f6ea6c31333ded8f7373b1c9", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9396, "license_type": "permissive", "max_line_length": 115, "num_lines": 242, "path": "/llvm_passes/core/InstTracePass.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/***************\nInstTrace.cpp\nAuthor: Sam Coulter\n This llvm pass is part of the greater LLFI framework\n \n Run the pass with the opt -InstTrace option after loading LLFI.so\n \n This pass injects a function call before every non-void-returning, \n non-phi-node instruction that prints trace information about the executed\n instruction to a file specified during the pass.\n***************/\n\n#include <vector>\n#include <cmath>\n\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/DerivedTypes.h\"\n#include \"llvm/IR/GlobalValue.h\"\n#include \"llvm/Pass.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/Support/Debug.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Support/InstIterator.h\"\n#include \"llvm/Support/CommandLine.h\"\n#include \"llvm/IR/DataLayout.h\"\n\n#include \"Utils.h\"\n\nusing namespace llvm;\n\ncl::opt<bool> debugtrace(\"debugtrace\",\n cl::desc(\"Print tracing instrucmented instruction information\"),\n cl::init(false));\ncl::opt<int> maxtrace( \"maxtrace\",\n cl::desc(\"Maximum number of dynamic instructions that will be traced after fault injection\"),\n cl::init(1000));\n\nnamespace llfi {\n\nstruct InstTrace : public FunctionPass {\n\n static char ID;\n\n InstTrace() : FunctionPass(ID) {}\n\n //Add AnalysisUsage Pass as prerequisite for InstTrace Pass\n virtual void getAnalysisUsage(AnalysisUsage &AU) const {\n AU.addRequired<DataLayout>();\n }\n\n virtual bool doInitialization(Module &M) {\n return false;\n }\n\n virtual bool doFinalization(Module &M) {\n //Dont forget to delete the output filename string!\n Function* mainfunc = M.getFunction(\"main\");\n if (mainfunc == NULL) {\n errs() << \"ERROR: Function main does not exist, \" <<\n \"which is required by LLFI\\n\";\n exit(1);\n }\n\n LLVMContext &context = M.getContext();\n FunctionType *postinjectfunctype = FunctionType::get(\n Type::getVoidTy(context), false); \n Constant *postracingfunc = M.getOrInsertFunction(\"postTracing\",\n postinjectfunctype);\n\n std::set<Instruction*> exitinsts;\n getProgramExitInsts(M, exitinsts);\n assert (exitinsts.size() != 0 \n && \"Program does not have explicit exit point\");\n\n for (std::set<Instruction*>::iterator it = exitinsts.begin();\n it != exitinsts.end(); ++it) {\n Instruction *term = *it;\n CallInst::Create(postracingfunc, \"\", term);\n }\n\n return true;\n }\n\n long fetchLLFIInstructionID(Instruction *targetInst) {\n return llfi::getLLFIIndexofInst(targetInst);\n }\n \n Instruction* getInsertPoint(Instruction* llfiIndexedInst) {\n Instruction *insertPoint;\n if (!llfiIndexedInst->isTerminator()) {\n insertPoint = llfi::getInsertPtrforRegsofInst(llfiIndexedInst, llfiIndexedInst);\n // if insert point is a call to inject fault, insert printInstTrace after the injectFault call\n // iff injectFault occurs AFTER the targeted instruction (ie. dst targeted)\n insertPoint = changeInsertPtrIfInjectFaultInst(insertPoint);\n } else {\n // if terminator, insert before function\n insertPoint = llfiIndexedInst;\n }\n return insertPoint;\n }\n\n virtual bool runOnFunction(Function &F) {\n //Create handles to the functions parent module and context\n LLVMContext& context = F.getContext();\n Module *M = F.getParent();\n\n //iterate through each basicblock of the function\n inst_iterator lastInst;\n for (inst_iterator instIterator = inst_begin(F), \n lastInst = inst_end(F);\n instIterator != lastInst; ++instIterator) {\n\n //Print some Debug Info as the pass is being run\n Instruction *inst = &*instIterator;\n\n if (debugtrace) {\n if (!llfi::isLLFIIndexedInst(inst)) {\n errs() << \"Instruction \" << *inst << \" was not indexed\\n\";\n } else {\n errs() << \"Instruction \" << *inst << \" was indexed\\n\";\n }\n }\n if (llfi::isLLFIIndexedInst(inst)) {\n\n //Find instrumentation point for current instruction\n Instruction *insertPoint = getInsertPoint(inst);\n \n //Skip instrumentation for terminating instructions\n if (insertPoint->isTerminator()) {\n\t\t\tcontinue;\n\t\t}\n\n //======== Find insertion location for alloca QINING @SET 15th============\n Instruction* alloca_insertPoint = inst->getParent()->getParent()->begin()->getFirstNonPHIOrDbgOrLifetime();\n //========================================================================\n\n\n //Fetch size of instruction value\n //The size must be rounded up before conversion to bytes because some data in llvm\n //can be like 1 bit if it only needs 1 bit out of an 8bit/1byte data type\n float bitSize;\n AllocaInst* ptrInst;\n if (inst->getType() != Type::getVoidTy(context)) {\n //insert an instruction Allocate stack memory to store/pass instruction value\n ptrInst = new AllocaInst(inst->getType(), \"llfi_trace\", alloca_insertPoint);\n //Insert an instruction to Store the instruction Value!\n new StoreInst(inst, ptrInst, insertPoint);\n\n DataLayout &td = getAnalysis<DataLayout>();\n bitSize = (float)td.getTypeSizeInBits(inst->getType());\n }\n else {\n ptrInst = new AllocaInst(Type::getInt32Ty(context), \"llfi_trace\", alloca_insertPoint);\n new StoreInst(ConstantInt::get(IntegerType::get(context, 32), 0), \n ptrInst, insertPoint);\n bitSize = 32;\n }\n int byteSize = (int)ceil(bitSize / 8.0);\n\n //Insert instructions to allocate stack memory for opcode name\n \n const char* opcodeNamePt = inst->getOpcodeName();\n const std::string str(inst->getOpcodeName());\n ArrayRef<uint8_t> opcode_name_array_ref((uint8_t*)opcodeNamePt, str.size() + 1);\n //llvm::Value* OPCodeName = llvm::ConstantArray::get(context, opcode_name_array_ref);\n llvm::Value* OPCodeName = llvm::ConstantDataArray::get(context, opcode_name_array_ref);\n /********************************/\n\n AllocaInst* OPCodePtr = new AllocaInst(OPCodeName->getType(),\n \"llfi_trace\", alloca_insertPoint);\n new StoreInst(OPCodeName, OPCodePtr, insertPoint);\n\n //Create the decleration of the printInstTracer Function\n std::vector<Type*> parameterVector(5);\n parameterVector[0] = Type::getInt32Ty(context); //ID\n parameterVector[1] = OPCodePtr->getType(); \n //======== opcode_str QINING @SET 15th============\n //parameterVector[1] = PointerType::get(Type::getInt8Ty(context), 0); //Ptr to OpCode\n //================================================\n parameterVector[2] = Type::getInt32Ty(context); //Size of Inst Value\n parameterVector[3] = ptrInst->getType(); //Ptr to Inst Value\n parameterVector[4] = Type::getInt32Ty(context); //Int of max traces\n\n //LLVM 3.3 Upgrade\n ArrayRef<Type*> parameterVector_array_ref(parameterVector);\n\n FunctionType* traceFuncType = FunctionType::get(Type::getVoidTy(context), \n parameterVector_array_ref, false);\n Constant *traceFunc = M->getOrInsertFunction(\"printInstTracer\", traceFuncType); \n\n //Insert the tracing function, passing it the proper arguments\n std::vector<Value*> traceArgs;\n //Fetch the LLFI Instruction ID:\n ConstantInt* IDConstInt = ConstantInt::get(IntegerType::get(context, 32), \n fetchLLFIInstructionID(inst));\n\n ConstantInt* instValSize = ConstantInt::get(\n IntegerType::get(context, 32), byteSize);\n\n //Fetch maxtrace number:\n ConstantInt* maxTraceConstInt =\n ConstantInt::get(IntegerType::get(context, 32), maxtrace);\n\n //======== opcode_str QINING @SET 15th============\n //string opcode_str = fi_inst->getOpcodeName();\n //GlobalVariable* opcode_str_gv = findOrCreateGlobalNameString(M, opcode_str);\n //vector<Constant*> indices_for_gep(2);\n //indices_for_gep[0] = ConstantInt::get(Type::getInt32Ty(context),0);\n //indices_for_gep[1] = ConstantInt::get(Type::getInt32Ty(context),0);\n //ArrayRef<Constant*> gep_expr_ref(indices_for_gep);\n //Constant* gep_expr_opcode = ConstantExpr::getGetElementPtr(opcode_str_gv, gep_expr_ref);\n //================================================\n\n //Load All Arguments\n traceArgs.push_back(IDConstInt);\n traceArgs.push_back(OPCodePtr);\n traceArgs.push_back(instValSize);\n traceArgs.push_back(ptrInst);\n traceArgs.push_back(maxTraceConstInt);\n\n //LLVM 3.3 Upgrade\n ArrayRef<Value*> traceArgs_array_ref(traceArgs);\n\n //Create the Function\n CallInst::Create(traceFunc, traceArgs_array_ref, \"\", insertPoint);\n }\n }//Function Iteration\n\n return true; //Tell LLVM that the Function was modified\n }//RunOnFunction\n};//struct InstTrace\n\n//Register the pass with the llvm\nchar InstTrace::ID = 0;\nstatic RegisterPass<InstTrace> X(\"insttracepass\", \n \"Add tracing function calls in program to trace instruction value at runtime\", \n false, false);\n\n}//namespace llfi\n\n" }, { "alpha_fraction": 0.5531914830207825, "alphanum_fraction": 0.5531914830207825, "avg_line_length": 22.375, "blob_id": "7d027db19191fbe9ea4630e89e0b3f8999f74af6", "content_id": "d9a253ab163d4a157699d6baeec85a02fcad5d77", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 188, "license_type": "permissive", "max_line_length": 90, "num_lines": 8, "path": "/tools/FIDL/config/NewInjectorTemplate.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "// DO NOT MODIFY!\n public:\n virtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit, char *buf) {\n // Write your code here!\n \n // END\n }\n};\n\n" }, { "alpha_fraction": 0.5707316994667053, "alphanum_fraction": 0.5780487656593323, "avg_line_length": 20.63157844543457, "blob_id": "1a4c6ceda26e43abaa4b2f91122700bb14f494fc", "content_id": "529d6741584822b2607cab11274fd61ff652c90c", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 410, "license_type": "permissive", "max_line_length": 72, "num_lines": 19, "path": "/web-app/views/src/js/components/header.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\n\nvar Header = React.createClass({\n\trender: function() {\n\t\treturn (\n\t\t\t<div className = \"header\">\n\t\t\t\t<div className= \"row\">\n\t\t\t\t\t<div className = \"col-sm-2\"></div>\n\t\t\t\t\t<div className = \"col-sm-8\">\n\t\t\t\t\t\t<div className=\"header-message\">Welcome to LLFI Web Service </div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div className= \"col-sm-2\"></div>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t);\n\t}\n});\n\nmodule.exports = Header;" }, { "alpha_fraction": 0.8452442288398743, "alphanum_fraction": 0.8493573069572449, "avg_line_length": 42.20000076293945, "blob_id": "9e6b0a50afc0abc4bb8809ebb5c53a2fbefdeeff", "content_id": "873b45f55f799ff7b1ce1336148364d7c1fda211", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1945, "license_type": "permissive", "max_line_length": 79, "num_lines": 45, "path": "/gui/CMakeLists.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\n\ninclude(../config/llfigui.cmake)\ninclude(../config/copy_utils.cmake)\n\nproject(gui)\n\nMESSAGE(\"INCLUDED JAVA CLASSPATH: ${CMAKE_JAVA_INCLUDE_PATH}\")\n\nadd_java_src(application/ClosePopupController.java)\nadd_java_src(application/CompileToIrController.java)\nadd_java_src(application/Controller.java)\nadd_java_src(application/FaultInjectionController.java)\nadd_java_src(application/InstrumentController.java)\nadd_java_src(application/Main.java)\nadd_java_src(application/MyThread.java)\nadd_java_src(application/ProfilingController.java)\nadd_java_src(application/ProgressBarController.java)\nadd_java_src(application/ResultTable.java)\nadd_java_src(application/SaveProfileController.java)\nadd_java_src(application/Table.java)\nadd_java_src(application/TraceController.java)\nadd_java_src(application/InputYaml.java)\nadd_java_src(application/ConfigReader.java)\n\ncopy(application/TraceOpenError.fxml application/TraceOpenError.fxml)\ncopy(application/TracingErrorDisplay.fxml application/TracingErrorDisplay.fxml)\ncopy(application/compileToIR.fxml application/compileToIR.fxml)\ncopy(application/Config.fxml application/Config.fxml)\ncopy(application/ErrorDisplay.fxml application/ErrorDisplay.fxml)\ncopy(application/InjectFaultResult.fxml application/InjectFaultResult.fxml)\ncopy(application/Instrument.fxml application/Instrument.fxml)\ncopy(application/NextRunOptionPopup.fxml application/NextRunOptionPopup.fxml)\ncopy(application/Profile.fxml application/Profile.fxml)\ncopy(application/ProfileName.fxml application/ProfileName.fxml)\ncopy(application/ProfileWithInput.fxml application/ProfileWithInput.fxml)\ncopy(application/Profiling.fxml application/Profiling.fxml)\ncopy(application/ProgressWindow.fxml application/ProgressWindow.fxml)\ncopy(application/Sample.fxml application/Sample.fxml)\n\ncopy(gui_config.yaml gui_config.yaml)\n\ncopy(application/lib/snakeyaml-1.15.jar application/lib/snakeyaml-1.15.jar)\n\ncompileGUI()\n\n" }, { "alpha_fraction": 0.6561725735664368, "alphanum_fraction": 0.6612640619277954, "avg_line_length": 29.18545150756836, "blob_id": "54bb7aa894977cbfa2cc1f2de29643cec6a3a045", "content_id": "a3d432c5eb1a54870fee0d2f85ee0b68fb89f6c4", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 58922, "license_type": "permissive", "max_line_length": 235, "num_lines": 1952, "path": "/gui/application/Controller.java", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "package application;\n\nimport java.util.List;\nimport java.io.BufferedReader;\nimport java.io.File;\nimport java.io.FileNotFoundException;\nimport java.io.FileReader;\nimport java.io.IOException;\nimport java.io.InputStreamReader;\nimport java.net.URL;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.LinkedHashMap;\nimport java.util.ResourceBundle;\nimport java.util.Comparator;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport javafx.collections.FXCollections;\nimport javafx.collections.ObservableList;\nimport javafx.event.ActionEvent;\nimport javafx.fxml.FXML;\nimport javafx.fxml.FXMLLoader;\nimport javafx.fxml.Initializable;\nimport javafx.scene.Node;\nimport javafx.scene.Parent;\nimport javafx.scene.Scene;\nimport javafx.scene.chart.BarChart;\nimport javafx.scene.chart.CategoryAxis;\nimport javafx.scene.chart.NumberAxis;\nimport javafx.scene.chart.XYChart;\nimport javafx.scene.control.Button;\nimport javafx.scene.control.ComboBox;\nimport javafx.scene.control.Label;\nimport javafx.scene.control.ListView;\nimport javafx.scene.control.ProgressBar;\nimport javafx.scene.control.ProgressIndicator;\nimport javafx.scene.control.Tab;\nimport javafx.scene.control.TabPane;\nimport javafx.scene.control.TableColumn;\nimport javafx.scene.control.TableView;\nimport javafx.scene.control.TextArea;\nimport javafx.scene.control.cell.PropertyValueFactory;\nimport javafx.scene.control.cell.CheckBoxTableCell;\nimport javafx.scene.control.CheckBox;\nimport javafx.scene.control.TableCell;\nimport javafx.scene.input.MouseEvent;\nimport javafx.scene.layout.HBox;\nimport javafx.util.Callback;\nimport javafx.stage.FileChooser;\nimport javafx.stage.DirectoryChooser;\nimport javafx.beans.value.ChangeListener;\nimport javafx.beans.value.ObservableValue;\nimport application.InstrumentController;\nimport javafx.stage.Stage;\nimport javafx.event.EventHandler;\npublic class Controller implements Initializable {\n\n\t@FXML\n\tprivate Button compiletoIrButton;\n\t@FXML\n\tprivate TextArea programTextArea;\n\t@FXML\n\tprivate ListView<String> fileList;\n\t@FXML\n\tObservableList<String> items;\n\t@FXML\n\tprivate TableView<Table> profilingTable;\n\t@FXML\n\tprivate TableColumn<Table,Integer> indexCount;\n\t@FXML\n\tprivate TableColumn<Table,Integer> cycleCount;\n\t@FXML\n\tprivate TableColumn<Table, String> failureType;\n\t@FXML\n\tprivate TableView<ResultTable> resultTable;\n\t@FXML\n\tprivate TableColumn<ResultTable,Integer> tFiRun;\n\t@FXML\n\tprivate TableColumn<ResultTable,String> failureClass;\n\t@FXML\n\tprivate TableColumn<ResultTable,String> failureMode;\n\t@FXML\n\tprivate TableColumn<ResultTable,String> functionName;\n\t@FXML\n\tprivate TableColumn<ResultTable,String> tFiType;\n\t@FXML\n\tprivate TableColumn<ResultTable,Integer> tFiIndex;\n\t@FXML\n\tprivate TableColumn<ResultTable,Integer> tFiCycle;\n\t@FXML\n\tprivate TableColumn<ResultTable,Integer> tFiRegIndex;\n\t@FXML\n\tprivate TableColumn<ResultTable,Integer> tFiBit;\n\t@FXML\n\tprivate TableColumn<ResultTable,String> tFiSdc;\n\t@FXML\n\tprivate TableColumn<ResultTable,String> tFiStatus;\n\t@FXML\n\tprivate TableColumn<ResultTable,String> tFiResult;\n\t@FXML\n\tprivate TableColumn<ResultTable,Boolean> tFiTrace;\n\t@FXML\n\tprivate CategoryAxis xAxis;\n\t@FXML\n\tprivate NumberAxis yAxis;\n\t@FXML\n\tprivate BarChart<Integer, String> resultSummary;\n\t@FXML\n\tprivate Label UploadLabel;\n\t@FXML\n\tprivate CheckBox showTraceOutputText;\n\t@FXML\n\tprivate Button instrumentButton;\n\t@FXML\n\tprivate Button profilingButton;\n\t@FXML\n\tprivate Button injectfaultButton;\n\t@FXML\n\tprivate Button runtimeButton;\n\t@FXML\n\tprivate Button tracegraphButton;\n\t@FXML\n\tprivate TextArea errorTextArea;\n\t@FXML\n\tprivate TextArea consoleTextArea;\n\t@FXML \n\tprivate TextArea programInputText;\n\t@FXML \n\tprivate Tab profilingTab;\n\t@FXML \n\tprivate Tab faultStatus;\n\t@FXML \n\tprivate Tab faultSummaryTab;\n\t@FXML \n\tprivate Tab errorTab;\n\t@FXML \n\tprivate Tab consoleTab;\n\t@FXML \n\tprivate TabPane tabBottom;\n\t@FXML\n\tprivate ProgressBar progressBar;\n\t@FXML \n\tprivate ProgressIndicator indicator;\n\t@FXML\n\tprivate ProgressBar progressBar1;\n\t@FXML\n\tprivate ProgressIndicator progressIndicator;\n\tXYChart.Series<Integer, String> series = new XYChart.Series<Integer,String>();\n\t/**\n\t * Program folder as well as the .ll filename.\n\t */\n\tstatic public String currentProgramFolder;\n\tstatic public String llfibuildPath=null;\n\tstatic public String zgrviewerPath =null;\n\tstatic public String psViewer=null;\n\tpublic boolean checkFlag = true;\n\tpublic boolean indexStates =false;\n\tstatic public List<String> console = new ArrayList<String>();\n\n\tpublic ArrayList<String> fileNameLists = new ArrayList<>();\n\tpublic ArrayList<String> registerList = new ArrayList<>();\n\tprivate ArrayList<String> resultFileNameLists;\n\tprivate ArrayList<String> resultErrorFileNameLists;\n\tprivate ArrayList<String> resultOutputFileNameLists;\n\tprivate ArrayList<String> resultList;\n\tprivate ArrayList<String> TraceDiffReportFileNameLists;\n\n\tprivate String indexBound;\n\tprivate String cycleBound;\n\tpublic int runCount = 0;\n\tpublic int totalRunCount = 0;\n\tpublic int currentCount = 0;\n\t/**\n\t * Flag that fault injection is complete.\n\t */\n\tpublic int crashedCount = 0;\n\tpublic int hangedCount = 0;\n\tpublic int sdcCount = 0;\n\n\tFileReader errorFile;\n\tString str;\n\tString line;\n\tString subStr[];\n\tString fiTypefault;\n\n\tint regIndex;\n\tint bit;\n\tString status = \"\";\n\tString result =\"\";\n\tString sdc = \"\";\n\tBoolean trace = false;\n\n\tprivate boolean errorFlag;\n\tprivate LinkedHashMap<String, List<String>> fileSelecMap = new LinkedHashMap<>();\n\tstatic public List<String> errorString = new ArrayList<String>();\n\tstatic public String inputString;\n\n\tpublic ArrayList<String> rowCount = new ArrayList<>();\n\t@FXML\n\tObservableList<ResultTable> data1 = FXCollections.observableArrayList() ;\n\t@FXML\n\tObservableList<Table> data = FXCollections.observableArrayList();\n\t@FXML\n\tObservableList<String> row = FXCollections.observableArrayList();\n\tpublic ArrayList<String> parameter = new ArrayList<>();\n\t\n\t// #SFIT\n\t/**\n\t * Indicates whether we are doing hardware fault or software fault injection. This affects \n\t * how certain options/results are displayed, as well as how the input.yaml is generated.\n\t */\n\tstatic public boolean isHardwareInjection = true;\n\t/**\n\t * Only affects software fault injection. This is true when we are selecting more than 1\n\t * software fault and the batch mode script will be called instead.\n\t */\n\tstatic public boolean isBatchMode = false;\n\tstatic public List<String> selectedSoftwareFailures;\n\t@FXML\n\tprivate ComboBox<String> fiResultDisplay;\n\t\n\tstatic public ConfigReader configReader;\n\t\n\t// making later changes to how states are kept\n\tpublic static CurrentState cs;\n\t\n\t\n\t@FXML\n\tprivate void onClickProfiling(ActionEvent event){\n\t\ttry{\n\t\t\terrorFlag = false;\n\t\t\ttabBottom.getSelectionModel().select(profilingTab);\n\n\t\t\tconsole.clear();\n\t\t\terrorString.clear();\n\n\t\t\tinputString = programInputText.getText();\n\t\t\t\n\t\t\t// #SFIT\n\t\t\tString execName;\n\t\t\tif (!Controller.isBatchMode) {\n\t\t\t\texecName = \"bin/profile \" + currentProgramFolder+\"/llfi/\" + currentProgramFolder + \"-profiling.exe \" + inputString;\n\t\t\t} else {\n\t\t\t\t// #SFIT \n\t\t\t\t// call batch instrument instead if we have more than 1 software fault\n\t\t\t\texecName = \"bin/batchProfile \" + currentProgramFolder + \"/\" + currentProgramFolder\n\t\t\t\t\t\t\t\t+ \".ll \" + inputString;\n\t\t\t}\n\t\t\t\n\t\t\t// delete the old llfi.stat.prof.txt\n\t\t\tFiles.deleteIfExists(new File(currentProgramFolder + \"/llfi.stat.prof.txt\").toPath());\n\n\t\t\tconsole.add(\"$ \"+ llfibuildPath + execName + \"\\n\");\n\t\t\tProcess p = new ProcessBuilder(\"/bin/tcsh\",\"-c\", llfibuildPath + execName).redirectErrorStream(true).start();\n\t\t\tp.waitFor();\n\n\t\t\tBufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream()));\n\t\t\tString line;\n\t\t\twhile ((line = in.readLine()) != null) {\n\t\t\t\tconsole.add(line);\n\t\t\t\terrorString.add(line);\n\t\t\t\tif (line.contains(\"error\") || line.contains(\"Error\") || line.contains(\"ERROR\"))\n\t\t\t\t\terrorFlag = true;\n\t\t\t}\n\t\t\tin.close();\n\t\t\t\n\t\t\t// gets the number of index\n\t\t\tFileReader inputFile = new FileReader(currentProgramFolder\n\t\t\t\t\t+ \"/llfi.stat.totalindex.txt\");\n\t\t\tin = new BufferedReader(inputFile);\n\n\t\t\twhile ((line = in.readLine()) != null) {\n\t\t\t\tindexBound = line.split(\"=\")[1];\n\t\t\t}\n\t\t\tin.close();\n\n\t\t\t// #SFIT\n\t\t\t// if we are doing multiple software failure injection, we have to \n\t\t\t// get all the different cycles for each failure\n\t\t\tArrayList<Table> profileResult = new ArrayList<Table>();\n\t\t\tint num = isBatchMode ? selectedSoftwareFailures.size() : 1;\n\t\t\tfor (int i = 0; i < num; i++) {\n\t\t\t\tString failureName;\n\t\t\t\tif (isHardwareInjection) {\n\t\t\t\t\tfailureName = \"Hardware Fault(s)\";\n\t\t\t\t} else {\n\t\t\t\t\tfailureName = selectedSoftwareFailures.get(i);\n\t\t\t\t}\n\t\t\t\tif (!isBatchMode) {\n\t\t\t\t\tinputFile = new FileReader(currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi.stat.prof.txt\");\n\t\t\t\t} else {\n\t\t\t\t\tinputFile = new FileReader(currentProgramFolder + \"/llfi-\"\n\t\t\t\t\t\t\t+ selectedSoftwareFailures.get(i) + \"/llfi.stat.prof.txt\");\n\t\t\t\t}\n\t\t\t\tin = new BufferedReader(inputFile);\n\t\t\t\twhile ((line = in.readLine()) != null) {\n\t\t\t\t\tif (line.contains(\"=\"))\n\t\t\t\t\t\tcycleBound = line.split(\"=\")[1];\n\t\t\t\t}\n\t\t\t\tin.close();\n\t\t\t\t\n\t\t\t\tprofileResult.add(new Table(failureName, Integer.parseInt(indexBound), \n\t\t\t\t\t\tInteger.parseInt(cycleBound) - 1));\n\t\t\t}\n\t\t\t\n\t\t\tObservableList<Table> data = FXCollections.observableArrayList(profileResult);\n\t\t\t\n\t\t\tfailureType.setCellValueFactory(new PropertyValueFactory<Table, String>(\"failureType\"));\n\t\t\tindexCount.setCellValueFactory(new PropertyValueFactory<Table, Integer>(\"noIndex\"));\n\t\t\tcycleCount.setCellValueFactory(new PropertyValueFactory<Table, Integer>(\"noCycles\"));\n\t\t\t\n\t\t\tprofilingTable.setItems(data);\n\t\t\t\n\t\t\tif(errorFlag)\n\t\t\t{\n\t\t\t\terrorFlag = false;\n\n\t\t\t\tParent root = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/ErrorDisplay.fxml\"));\n\t\t\t\tStage stage = new Stage();\n\t\t\t\tstage.setTitle(\"Error\");\n\t\t\t\tstage.setScene(new Scene(root, 450, 100));\n\t\t\t\tstage.show();\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\terrorString.clear();\n\t\t\t\tParent root = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/Profile.fxml\"));\n\t\t\t\tStage stage = new Stage(); \n\n\t\t\t\tstage.setTitle(\"Profiling\");\n\t\t\t\tstage.setScene(new Scene(root, 400, 100));\n\t\t\t\tstage.show();\n\t\t\t\t\n\t\t\t\tcs.changeStateTo(State.PROFILING_COMPLETED);\n\t\t\t}\n\t\t} catch (FileNotFoundException e) {\n\t\t\t// file not found, probably mean the user didn't enter in a command\n\t\t\terrorString.add(\"\\n\" + e.getMessage());\n\t\t\terrorString.add(\"required file not generated: did you forget to enter in the program's argument(s)?\");\n\t\t\t\n\t\t\ttry {\n\t\t\t\tParent root = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/ErrorDisplay.fxml\"));\n\t\t\t\tStage stage = new Stage();\n\t\t\t\tstage.setTitle(\"Error\");\n\t\t\t\tstage.setScene(new Scene(root, 450, 100));\n\t\t\t\tstage.show();\n\t\t\t} catch (IOException e1) {\n\t\t\t\tSystem.err.println(\"ERR: cannot load ErrorDisplay.fxml!\");\n\t\t\t\te1.printStackTrace();\n\t\t\t}\n\t\t} catch (Exception e) {\n\t\t\tSystem.err.println(\"ERR: profiling failed!\");\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t\n\tprivate void importFile(String fileName) throws IOException {\n\t\t// add the file to the list and display if it doesn't exist\n\t\tif (!fileSelecMap.containsKey(fileName)) {\n\t\t\tfileNameLists.add(fileName);\n\t\t\t\n\t\t\tfileList.setItems(FXCollections.observableArrayList(fileNameLists));\n\t\t}\n\t\t\n\t\t// parses file and put it into the map\n\t\tFile f = new File(currentProgramFolder + \"/\" + fileName);\n\t\tfileSelecMap.put(fileName, parseFile(f));\n\t}\n\t\n\tprivate void setProgramTextArea(String fileName) {\n\t\tList<String> text = fileSelecMap.get(fileName);\n\t\tprogramTextArea.clear();\n\t\tfor (String s : text) {\n\t\t\tprogramTextArea.appendText(s);\n\t\t}\n\t\tprogramTextArea.home();\n\t}\n\n\t@FXML\n\tpublic void onClickActualFaultInjection(ActionEvent event)\n\t{\n\t\tParent root;\n\t\ttry{\n\t\t\ttabBottom.getSelectionModel().select(profilingTab); \n\t\t\t//TODO\n\t\t\t\n\t\t\t// read output folder(s), if exist delete them\n\t\t\tfinal File folder = new File(currentProgramFolder+\"/llfi/llfi_stat_output\");\n\t\t\tif(folder.exists())\n\t\t\t\tdeleteFilesInFolder(folder);\n\t\t\tfinal File errorFolder = new File(currentProgramFolder+\"/llfi/error_output\");\n\t\t\tif(errorFolder.exists())\n\t\t\t\tdeleteFilesInFolder(errorFolder);\n\n\t\t\t//if(new File(currentProgramFolder+\"/llfi/std_output\").exists());\n\t\t\t// {\n\t\t\tfinal File outputFolder = new File(currentProgramFolder+\"/llfi/std_output\");\n\t\t\tif(outputFolder.exists())\n\t\t\t\tdeleteFilesInFolder(outputFolder);\n\t\t\t// }\n\t\t\t\n\t\t\t// runs ProgressBarController.java\n\t\t\troot = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/ProgressWindow.fxml\"));\n\t\t\tStage stage = new Stage(); \n\n\t\t\tstage.setTitle(\"Fault Injection\");\n\t\t\tstage.setScene(new Scene(root, 440, 118));\n\t\t\tstage.show();\n\t\t}\n\t\tcatch (IOException e) {\n\n\t\t\t// TODO Auto-generated catch block\n\t\t\te.printStackTrace();\n\t\t}\n\n\t}\n\tpublic void deleteFilesInFolder(final File folder) {\n\t\t//resultFileNameLists = new ArrayList<String>();\n\t\tfor (final File fileEntry : folder.listFiles()) {\n\n\t\t\tif (fileEntry.isDirectory()) {\n\t\t\t\tdeleteFilesInFolder(fileEntry);\n\t\t\t} else {\n\t\t\t\tfileEntry.delete();\n\t\t\t}\n\t\t}\n\t}\n\n\t@FXML\n\tpublic void onClickInjectFaultOkHandler(ActionEvent event){\n\n\n\t}\n\n\t/**\n\t * Generates the 'Fault Injection Status' tab\n\t */\n\t@FXML\n\tpublic void onGeneratingResultTable() {\n\t\ttry {\n\t\t\tsdcCount = 0;\n\t\t\trunCount = 0;\n\t\t\tdata1 = FXCollections.observableArrayList();\n\t\t\t\n\t\t\t// Generate trace_report_output folder to hold TraceDiffReport files\n\t\t\tProcessBuilder deleteTraceReportFolder = new ProcessBuilder(\n\t\t\t\t\t\"/bin/tcsh\", \"-c\", \"rm -rf \"\n\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/trace_report_output\");\n\t\t\tdeleteTraceReportFolder.start().waitFor();\n\n\t\t\tProcessBuilder makeTraceReportFolder = new ProcessBuilder(\n\t\t\t\t\t\"/bin/tcsh\", \"-c\", \"mkdir -p \" \n\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/trace_report_output\");\n\t\t\tmakeTraceReportFolder.start().waitFor();\n\t\t\t\n\t\t\t\n\t\t\t// #SFIT\n\t\t\t// get the fault the display\n\t\t\t// if we are doing single injection this does not matter\n\t\t\tString selectedFault = fiResultDisplay.getSelectionModel().getSelectedItem();\n\t\t\t\n\t\t\t// for batch mode, if the user selected all we need to loop through all the \n\t\t\t// inner folders\n\t\t\tint faultFolderNum;\n\t\t\tif (isBatchMode && \"All\".equals(selectedFault)) {\n\t\t\t\tfaultFolderNum = selectedSoftwareFailures.size();\n\t\t\t} else {\n\t\t\t\tfaultFolderNum = 1;\n\t\t\t}\n\t\t\t\n\t\t\t// loop through each inner folder when doing batch injection\n\t\t\tfor (int it = 0; it < faultFolderNum; it++) {\n\t\t\t\t/**\n\t\t\t\t * Name of TraceDiff files needs to change if we are doing batch mode\n\t\t\t\t * as we are copying all the TraceDiff files to one location (\"llfi/trace_report_output\")\n\t\t\t\t */\n\t\t\t\tString diff;\n\t\t\t\tString fault;\n\t\t\t\t// fault name changes through each loop iteration\n\t\t\t\tif (isBatchMode && \"All\".equals(selectedFault)) {\n\t\t\t\t\tfault = selectedSoftwareFailures.get(it);\n\t\t\t\t\tdiff = fault + \"-\";\n\t\t\t\t} else {\n\t\t\t\t\tfault = selectedFault;\n\t\t\t\t\tdiff = \"\";\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// changes path accordingly if we are doing batch software injection\n\t\t\t\tString folderPath, errorFolderPath, outputFolderPath, goldenStdOutputPath, statTraceProfPath;\n\t\t\t\tif (!isBatchMode) {\n\t\t\t\t\tfolderPath = currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/llfi_stat_output/\";\n\t\t\t\t\terrorFolderPath = currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/error_output/\";\n\t\t\t\t\toutputFolderPath = currentProgramFolder \n\t\t\t\t\t\t\t+ \"/llfi/std_output/\";\n\t\t\t\t\tgoldenStdOutputPath = currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/baseline/golden_std_output\";\n\t\t\t\t\tstatTraceProfPath = Controller.currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/baseline/llfi.stat.trace.prof.txt\";\n\t\t\t\t} else {\n\t\t\t\t\tfolderPath = currentProgramFolder + \"/llfi-\" + fault\n\t\t\t\t\t\t\t+ \"/llfi/llfi_stat_output/\";\n\t\t\t\t\terrorFolderPath = currentProgramFolder + \"/llfi-\" + fault\n\t\t\t\t\t\t\t+ \"/llfi/error_output/\";\n\t\t\t\t\toutputFolderPath = currentProgramFolder + \"/llfi-\" + fault\n\t\t\t\t\t\t\t+ \"/llfi/std_output/\";\n\t\t\t\t\tgoldenStdOutputPath = currentProgramFolder + \"/llfi-\" + fault\n\t\t\t\t\t\t\t+ \"/llfi/baseline/golden_std_output\";\n\t\t\t\t\tstatTraceProfPath = Controller.currentProgramFolder + \"/llfi-\" + fault\n\t\t\t\t\t\t\t+ \"/llfi/baseline/llfi.stat.trace.prof.txt\";\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tlistFilesForFolder(new File(folderPath));\n\t\t\t\tlistFilesForErrorFolder(new File(errorFolderPath));\n\t\t\t\tlistFilesForOtputFolder(new File(outputFolderPath));\n\t\n\t\t\t\tfor (int i = 0; i < resultFileNameLists.size(); i++) {\n\t\t\t\t\t// get run_config and run_number from the file name\n\t\t\t\t\tString fileName = resultFileNameLists.get(i);\n\t\t\t\t\tString[] split = fileName.split(\"\\\\.\");\n\t\t\t\t\tString runNum = split[split.length - 2];\n\t\t\t\t\t/**\n\t\t\t\t\t * Used for generating the 'TraceDiffReportFile'\n\t\t\t\t\t * as well as finding it later. Get the last 8 character\n\t\t\t\t\t * of fileName as that is the run_config# - run#\n\t\t\t\t\t */\n\t\t\t\t\tString traceDiffName = diff + \"TraceDiffReportFile.\"\n\t\t\t\t\t\t\t+ runNum + \".txt\";\n\t\t\t\t\tif (fileName.contains(\"trace\")) {\n\t\t\t\t\t\t// Generate diff report file using tracediff\n\t\t\t\t\t\tProcessBuilder DiffFile = new ProcessBuilder(\"/bin/tcsh\",\n\t\t\t\t\t\t\t\t\"-c\", Controller.llfibuildPath + \"tools/tracediff '\"\n\t\t\t\t\t\t\t\t\t\t+ statTraceProfPath\n\t\t\t\t\t\t\t\t\t\t+ \"' '\" + folderPath\n\t\t\t\t\t\t\t\t\t\t+ fileName + \"' > './\"\n\t\t\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t\t\t\t+ \"/llfi/trace_report_output/\" + traceDiffName + \"'\");\n\t\t\t\t\t\tDiffFile.redirectErrorStream(true).start().waitFor();\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t} \n\t\t\t\t\tresultList = new ArrayList<String>();\n\t\t\t\t\trunCount++;\n\t\n\t\t\t\t\tFileReader inputFile = new FileReader(folderPath + fileName);\n\t\t\t\t\tBufferedReader bufferReader = new BufferedReader(inputFile);\n\t\t\t\t\t\n\t\t\t\t\t// this is parsing the file llfi/llfi_stat_output/llfi.stat.fi.injectedfaults.<*>-<*>.txt\n\t\t\t\t\twhile ((line = bufferReader.readLine()) != null) {\n\t\t\t\t\t\tstr = line.split(\":\")[1];\n\t\n\t\t\t\t\t\tsubStr = str.split(\",\");\n\t\t\t\t\t\tfor (int j = 0; j < subStr.length; j++) {\n\t\t\t\t\t\t\tresultList.add(subStr[j].split(\"=\")[1]);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbufferReader.close();\n\t\t\t\t\t\n\t\t\t\t\tif (resultErrorFileNameLists.size() > 0) {\n\t\t\t\t\t\tfor (int k = 0; k < resultErrorFileNameLists.size(); k++) {\n\t\t\t\t\t\t\tif (resultErrorFileNameLists\n\t\t\t\t\t\t\t\t\t.get(k)\n\t\t\t\t\t\t\t\t\t.substring(14)\n\t\t\t\t\t\t\t\t\t.equalsIgnoreCase(\n\t\t\t\t\t\t\t\t\t\t\tresultFileNameLists.get(i)\n\t\t\t\t\t\t\t\t\t\t\t\t\t.substring(28).split(\"\\\\.\")[0])) {\n\t\t\t\t\t\t\t\tresult = \"\";\n\t\t\t\t\t\t\t\tstatus = \"Injected\";\n\t\t\t\t\t\t\t\terrorFile = new FileReader(errorFolderPath\n\t\t\t\t\t\t\t\t\t\t+ resultErrorFileNameLists.get(k));\n\t\t\t\t\t\t\t\tBufferedReader bufferReader1 = new BufferedReader(\n\t\t\t\t\t\t\t\t\t\terrorFile);\n\t\t\t\t\t\t\t\twhile ((line = bufferReader1.readLine()) != null) {\n\t\t\t\t\t\t\t\t\tresult = result + line + \";\";\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tbufferReader1.close();\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tstatus = \"Not Injected \";\n\t\t\t\t\t\t\t\tresult = \"Nil\";\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tstatus = \"Not Injected \";\n\t\t\t\t\t\tresult = \"Nil\";\n\t\t\t\t\t}\n\t\n\t\t\t\t\tboolean tmpFlag = false;\n\t\t\t\t\tif (resultOutputFileNameLists.size() > 0) {\n\t\t\t\t\t\tfor (int k = 0; k < resultOutputFileNameLists.size(); k++) {\n\t\t\t\t\t\t\tfor (int l = 0; l < resultErrorFileNameLists.size(); l++) {\n\t\t\t\t\t\t\t\tif ((resultErrorFileNameLists.get(l).substring(14)\n\t\t\t\t\t\t\t\t\t\t.equalsIgnoreCase(resultFileNameLists\n\t\t\t\t\t\t\t\t\t\t\t\t.get(i).substring(28).split(\"\\\\.\")[0]))) {\n\t\t\t\t\t\t\t\t\tsdc = \"Not Occured\";\n\t\t\t\t\t\t\t\t\ttmpFlag = true;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t\tif (tmpFlag) {\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t\tif (!resultOutputFileNameLists\n\t\t\t\t\t\t\t\t\t.get(k)\n\t\t\t\t\t\t\t\t\t.substring(19)\n\t\t\t\t\t\t\t\t\t.equalsIgnoreCase(\n\t\t\t\t\t\t\t\t\t\t\tresultFileNameLists.get(i)\n\t\t\t\t\t\t\t\t\t\t\t\t\t.substring(28).split(\"\\\\.\")[0])) {\n\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t\t// i dont think the code below does anything \n\t\t\t\t\t\t\tProcessBuilder p1 = new ProcessBuilder(\"/bin/tcsh\",\n\t\t\t\t\t\t\t\t\t\"-c\", \"echo $COMPARE\");\n\t\t\t\t\t\t\tp1.redirectErrorStream(true);\n\t\t\t\t\t\t\tProcess pr1 = p1.start();\n\t\t\t\t\t\t\tBufferedReader in2 = new BufferedReader(\n\t\t\t\t\t\t\t\t\tnew InputStreamReader(pr1.getInputStream()));\n\t\t\t\t\t\t\tString line1;\n\t\t\t\t\t\t\tString comparePath = null;\n\t\t\t\t\t\t\twhile ((line1 = in2.readLine()) != null) {\n\t\t\t\t\t\t\t\tController.errorString.add(line1);\n\t\t\t\t\t\t\t\tif (line1.contains(\"error\")\n\t\t\t\t\t\t\t\t\t\t|| line1.contains(\"Error\")\n\t\t\t\t\t\t\t\t\t\t|| line1.contains(\"ERROR\")\n\t\t\t\t\t\t\t\t\t\t|| line1.contains(\"Undefined variable\")) {\n\t\t\t\t\t\t\t\t\terrorFlag = true;\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tcomparePath = line1;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpr1.waitFor();\n\t\t\t\t\t\t\tin2.close();\n\t\n\t\t\t\t\t\t\t// I dont think the if below ever runs, only the else ever runs\n\t\t\t\t\t\t\tif (comparePath != null) {\n\t\t\t\t\t\t\t\tProcessBuilder p2 = new ProcessBuilder(\"/bin/tcsh\",\n\t\t\t\t\t\t\t\t\t\t\"-c\", \"sh \" + comparePath + \" '\"\n\t\t\t\t\t\t\t\t\t\t\t\t+ goldenStdOutputPath + \"' '\"\n\t\t\t\t\t\t\t\t\t\t\t\t+ outputFolderPath\n\t\t\t\t\t\t\t\t\t\t\t\t+ resultOutputFileNameLists.get(k) + \"'\");\n\t\n\t\t\t\t\t\t\t\tp2.redirectErrorStream(true);\n\t\t\t\t\t\t\t\tProcess pr2 = p2.start();\n\t\t\t\t\t\t\t\tBufferedReader in3 = new BufferedReader(\n\t\t\t\t\t\t\t\t\t\tnew InputStreamReader(pr2.getInputStream()));\n\t\n\t\t\t\t\t\t\t\twhile ((line1 = in3.readLine()) != null) {\n\t\n\t\t\t\t\t\t\t\t\tController.errorString.add(line1);\n\t\t\t\t\t\t\t\t\tif (line1.contains(\"error\")\n\t\t\t\t\t\t\t\t\t\t\t|| line1.contains(\"Error\")\n\t\t\t\t\t\t\t\t\t\t\t|| line1.contains(\"ERROR\")) {\n\t\t\t\t\t\t\t\t\t\terrorFlag = true;\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tif (line1.equalsIgnoreCase(\"Not Identical\")) {\n\t\t\t\t\t\t\t\t\t\t\tsdc = \"Not Occured\";\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tsdcCount++;\n\t\t\t\t\t\t\t\t\t\t\tsdc = \"Occured\";\n\t\t\t\t\t\t\t\t\t\t\tstatus = \"Injected\";\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tpr2.waitFor();\n\t\t\t\t\t\t\t\tin3.close();\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// #SFIT\n\t\t\t\t\t\t\t\t// '(' and ')' need to be escaped or encapsulated in a '...'\n\t\t\t\t\t\t\t\t// eg. diff 'factorial/llfi-CPUHog(Res)/llfi/baseline/golden_std_output' 'factorial/llfi-CPUHog(Res)/llfi/std_output/std_outputfile-run-0-1'\n\t\t\t\t\t\t\t\t// or it wont work in a diff\n\t\t\t\t\t\t\t\tString cmd = \"diff '\" + goldenStdOutputPath + \"' '\"\n\t\t\t\t\t\t\t\t\t\t+ outputFolderPath\n\t\t\t\t\t\t\t\t\t\t+ resultOutputFileNameLists.get(k) + \"'\";\n\t\t\t\t\t\t\t\tProcessBuilder p3 = new ProcessBuilder(\"/bin/tcsh\",\n\t\t\t\t\t\t\t\t\t\t\"-c\", cmd);\n\t\t\t\t\t\t\t\tp3.redirectErrorStream(true);\n\t\t\t\t\t\t\t\tProcess pr3 = p3.start();\n\t\t\t\t\t\t\t\tBufferedReader in4 = new BufferedReader(\n\t\t\t\t\t\t\t\t\t\tnew InputStreamReader(pr3.getInputStream()));\n\t\t\t\t\t\t\t\tif ((line1 = in4.readLine()) == null) {\n\t\t\t\t\t\t\t\t\tsdc = \"Not Occured\";\n\t\t\t\t\t\t\t\t\tstatus = \"Injected\";\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\twhile ((line1 = in4.readLine()) != null) {\n\t\t\t\t\t\t\t\t\t\tController.errorString.add(line1);\n\t\t\t\t\t\t\t\t\t\tif (line1.contains(\"error\")\n\t\t\t\t\t\t\t\t\t\t\t\t|| line1.contains(\"Error\")\n\t\t\t\t\t\t\t\t\t\t\t\t|| line1.contains(\"ERROR\")) {\n\t\t\t\t\t\t\t\t\t\t\terrorFlag = true;\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tsdcCount++;\n\t\t\t\t\t\t\t\t\t\t\tsdc = \"Occured\";\n\t\t\t\t\t\t\t\t\t\t\tstatus = \"Injected\";\n\t\t\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tpr3.waitFor();\n\t\t\t\t\t\t\t\tin4.close();\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsdc = \"NA\";\n\t\t\t\t\t}\n\t\n\t\t\t\t\ttrace = false;\n\t\t\t\t\tdata1.add(new ResultTable(runCount, resultList.get(0), Integer\n\t\t\t\t\t\t\t.parseInt(resultList.get(1)), Integer\n\t\t\t\t\t\t\t.parseInt(resultList.get(2)), Integer\n\t\t\t\t\t\t\t.parseInt(resultList.get(6)), sdc, status, result,\n\t\t\t\t\t\t\ttrace, traceDiffName));\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttFiRun.setCellValueFactory(new PropertyValueFactory<ResultTable, Integer>(\n\t\t\t\t\t\"noOfRuns\"));\n\t\t\ttFiType.setCellValueFactory(new PropertyValueFactory<ResultTable, String>(\n\t\t\t\t\t\"FaultInjectionType\"));\n\t\t\ttFiIndex.setCellValueFactory(new PropertyValueFactory<ResultTable, Integer>(\n\t\t\t\t\t\"index\"));\n\t\t\ttFiCycle.setCellValueFactory(new PropertyValueFactory<ResultTable, Integer>(\n\t\t\t\t\t\"cycle\"));\n\t\t\ttFiBit.setCellValueFactory(new PropertyValueFactory<ResultTable, Integer>(\n\t\t\t\t\t\"bit\"));\n\t\t\ttFiSdc.setCellValueFactory(new PropertyValueFactory<ResultTable, String>(\n\t\t\t\t\t\"sdc\"));\n\t\t\ttFiStatus\n\t\t\t\t\t.setCellValueFactory(new PropertyValueFactory<ResultTable, String>(\n\t\t\t\t\t\t\t\"status\"));\n\t\t\ttFiResult\n\t\t\t\t\t.setCellValueFactory(new PropertyValueFactory<ResultTable, String>(\n\t\t\t\t\t\t\t\"result\"));\n\t\t\ttFiTrace.setCellValueFactory(new PropertyValueFactory<ResultTable, Boolean>(\n\t\t\t\t\t\"trace\"));\n\t\t\ttFiTrace.setCellFactory(new Callback<TableColumn<ResultTable, Boolean>, TableCell<ResultTable, Boolean>>() {\n\t\t\t\tpublic TableCell<ResultTable, Boolean> call(\n\t\t\t\t\t\tTableColumn<ResultTable, Boolean> p) {\n\t\t\t\t\treturn new CheckBoxTableCell<ResultTable, Boolean>();\n\t\t\t\t}\n\t\t\t});\n\t\t\ttFiTrace.setEditable(true);\n\n\t\t\tresultTable.setItems(data1);\n\t\t\tresultTable.setEditable(true);\n\t\t\ttracegraphButton.setDisable(false);\n\t\t\tshowTraceOutputText.setVisible(true);\n\n\t\t\t// Header CheckBox for select all\n\t\t\tHBox box = new HBox();\n\t\t\tLabel text = new Label(\"Trace\");\n\t\t\tCheckBox cb = new CheckBox();\n\t\t\tcb.setUserData(this.tFiTrace);\n\t\t\tcb.setText(\"Select All\");\n\t\t\tcb.setOnAction(handleSelectAllCheckbox());\n\t\t\tbox.getChildren().addAll(text, cb);\n\t\t\tbox.setSpacing(5);\n\t\t\tthis.tFiTrace.setGraphic(box);\n\t\t} catch (IOException e) {\n\t\t\tSystem.err.println(\"ERROR: ioexception\");\n\t\t\te.printStackTrace();\n\t\t} catch (InterruptedException e) {\n\t\t\tSystem.err.println(\"ERROR: interrupted\");\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t\n\tprivate EventHandler<ActionEvent> handleSelectAllCheckbox() { \n\t\treturn new EventHandler<ActionEvent>() { \n\t\t\t@Override \n\t\t\tpublic void handle(ActionEvent event) { \n\t\t\t\tCheckBox cb = (CheckBox) event.getSource(); \n\t\t\t\tif(cb.isSelected())\n\t\t\t\t{\n\t\t\t\t\tfor (ResultTable resultTableRow : resultTable.getItems())\n\t\t\t\t\t{\n\t\t\t\t\t\tresultTableRow.setTrace(true);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tfor (ResultTable resultTableRow : resultTable.getItems())\n\t\t\t\t\t{\n\t\t\t\t\t\tresultTableRow.setTrace(false);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} \n\t\t}; \n\t}\n\t\n\t/**\n\t * Clicking \"TraceGraph\" on the GUI runs this function.\n\t * @param event - Click event\n\t */\n\t@FXML\n\tprivate void onClickGenerateTraceGraph(ActionEvent event) {\n\t\tParent root;\n\t\ttry {\n\t\t\t// Generate Trace Union file\n\t\t\tfinal File TraceDiffReportFolder = new File(currentProgramFolder + \"/llfi/trace_report_output\");\n\t\t\tFileListofTraceReportFolder(TraceDiffReportFolder);\n\n\t\t\t//Delete old UnitedDiffReportFile.txt, TraceGraph.dot, and TraceGraph.ps files\n\t\t\tFile UnitedDiffReportFile = new File(currentProgramFolder + \"/llfi/trace_report_output/UnionedDiffReportFile.txt\");\n\t\t\tif(UnitedDiffReportFile.exists()) {\n\t\t\t\tdelete(UnitedDiffReportFile);\n\t\t\t}\n\t\t\t\n\t\t\tFile TraceGraphDot = new File(currentProgramFolder + \"/llfi/trace_report_output/TraceGraph.dot\");\n\t\t\tif(TraceGraphDot.exists()) {\n\t\t\t\tdelete(TraceGraphDot);\n\t\t\t}\n\t\t\t\n\t\t\tFile TraceGraphPs = new File(currentProgramFolder + \"/llfi/trace_report_output/TraceGraph.ps\");\n\t\t\tif(TraceGraphPs.exists()) {\n\t\t\t\tdelete(TraceGraphPs);\n\t\t\t}\n\n\t\t\tString TraceUnionCmd = Controller.llfibuildPath + \"tools/traceunion\";\n\t\t\tint found = 0;\n\t\t\tString traceFileName = \"\";\n\t\t\tfor (ResultTable resultTableRow : resultTable.getItems()) {\n\t\t\t\t// if checkBox is ticked, then use this file\n\t\t\t\tif (resultTableRow.getTrace() == true) {\n\t\t\t\t\tfor (int i = 0; i < TraceDiffReportFileNameLists.size(); i++) {\n\t\t\t\t\t\t// see if trace file exist (trace file does not exist if the fault injection\n\t\t\t\t\t\t// caused the program to crash)\n\t\t\t\t\t\ttraceFileName = TraceDiffReportFileNameLists.get(i);\n\t\t\t\t\t\t// System.out.println(traceFileName + \" \" + resultTableRow.getTraceFileName());\n\t\t\t\t\t\tif (resultTableRow.getTraceFileName().equals(traceFileName)) {\n\t\t\t\t\t\t\tTraceUnionCmd += \" './\"\n\t\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t\t\t+ \"/llfi/trace_report_output/\" + traceFileName\n\t\t\t\t\t\t\t\t\t+ \"'\";\n\t\t\t\t\t\t\tfound++;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tTraceUnionCmd += \"> ./\" + Controller.currentProgramFolder\n\t\t\t\t\t+ \"/llfi/trace_report_output/UnionedDiffReportFile.txt\";\n\t\t\tif (found > 1) {\n\t\t\t\tProcessBuilder UnionTraceDiffReportFile = new ProcessBuilder(\n\t\t\t\t\t\t\"/bin/tcsh\", \"-c\", TraceUnionCmd);\n\t\t\t\tUnionTraceDiffReportFile.redirectErrorStream(true).start()\n\t\t\t\t\t\t.waitFor();\n\n\t\t\t\t// Generate .dot graph file using traceontograph\n\t\t\t\tProcessBuilder TraceGraph = new ProcessBuilder(\n\t\t\t\t\t\t\"/bin/tcsh\",\n\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\tController.llfibuildPath\n\t\t\t\t\t\t\t\t+ \"tools/traceontograph \"\n\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t\t+ \"/llfi/trace_report_output/UnionedDiffReportFile.txt\"\n\t\t\t\t\t\t\t\t+ \" \" + currentProgramFolder\n\t\t\t\t\t\t\t\t+ \"/llfi.stat.graph.dot\" + \" > ./\"\n\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t\t+ \"/llfi/trace_report_output/TraceGraph.dot\");\n\t\t\t\tTraceGraph.redirectErrorStream(true).start().waitFor();\n\t\t\t} else if (found == 1) {\n\t\t\t\t// Generate .dot graph file using traceontograph\n\t\t\t\tProcessBuilder TraceGraph = new ProcessBuilder(\"/bin/tcsh\",\n\t\t\t\t\t\t\"-c\", Controller.llfibuildPath\n\t\t\t\t\t\t\t\t+ \"tools/traceontograph '\"\n\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t\t+ \"/llfi/trace_report_output/\" + traceFileName\n\t\t\t\t\t\t\t\t+ \"' \" + currentProgramFolder\n\t\t\t\t\t\t\t\t+ \"/llfi.stat.graph.dot\" + \" > ./\"\n\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\n\t\t\t\t\t\t\t\t+ \"/llfi/trace_report_output/TraceGraph.dot\");\n\t\t\t\tTraceGraph.redirectErrorStream(true).start().waitFor();\n\t\t\t} else { // found == 0\n\t\t\t\t// When cannot find trace files, inform users about the error.\n\t\t\t\troot = FXMLLoader.load(getClass().getClassLoader().getResource(\n\t\t\t\t\t\t\"application/TracingErrorDisplay.fxml\"));\n\t\t\t\tStage stage = new Stage();\n\n\t\t\t\tstage.setTitle(\"Error\");\n\t\t\t\tstage.setScene(new Scene(root, 500, 118));\n\t\t\t\tstage.show();\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t\n\t\t\t//If show traceoutput text box is selected, find the file opener then open the tracefile\n\t\t\tString fileOpener = \"\";\n\t\t\tif (showTraceOutputText.isSelected() && found != 0) {\n\t\t\t\tString osName = System.getProperty(\"os.name\").toLowerCase();\n\t\t\t\tif (osName.indexOf(\"mac\") >= 0) {\n\t\t\t\t\tfileOpener = \"open \";\n\t\t\t\t} else if (osName.indexOf(\"nux\") >= 0) {\n\t\t\t\t\tfileOpener = \"xdg-open \";\n\t\t\t\t} else if (osName.indexOf(\"solaris\") >= 0) {\n\t\t\t\t\tfileOpener = \"xdg-open \";\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tString fileName = fileOpener + \"'\" + Controller.currentProgramFolder + \"/llfi/trace_report_output/\";\n\t\t\t\tif (found > 1) {\n\t\t\t\t\tfileName += \"UnionedDiffReportFile.txt'\";\n\t\t\t\t} else {\n\t\t\t\t\tfileName += traceFileName + \"'\";\n\t\t\t\t}\n\t\t\t\tProcessBuilder openFile = new ProcessBuilder(\"/bin/tcsh\", \"-c\", fileName);\n\t\t\t\topenFile.redirectErrorStream(true).start();\n\t\t\t}\n\n\t\t\t// Covert traceontograph to pdf format using Graphviz\n\t\t\tProcessBuilder ConvertToPs = new ProcessBuilder(\"/bin/tcsh\",\"-c\",\"dot -Tps \"+Controller.currentProgramFolder+\"/llfi/trace_report_output/TraceGraph.dot -o \"+Controller.currentProgramFolder+\"/llfi/trace_report_output/TraceGraph.ps\");\n\t\t\tConvertToPs.redirectErrorStream(true);\n\t\t\tConvertToPs.start().waitFor();\n\t\t\t\n\t\t\t// if zgrviewer path has not been set/not installed, we will open the pdf instead\n\t\t\tif (zgrviewerPath.contains(\"Undefined\")) {\n\t\t\t\t// Test system before opening the graph\n\t\t\t\tString psOpenner =\"\";\n\t\t\t\tboolean psError = true;\n\t\t\t\tString checkExe =\"\";\n\t\t\t\tString osName = System.getProperty(\"os.name\").toLowerCase();\n\t\t\t\tif (osName.indexOf(\"mac\") >= 0) {\n\t\t\t\t\t// The current os is mac, use to view ps file\n\t\t\t\t\tpsOpenner = \"open \";\n\t\t\t\t\tpsError = false;\n\t\t\t\t\tcheckExe = \"open -h > /dev/null 2>&1\";\n\t\t\t\t} else if (osName.indexOf(\"nux\") >= 0) {\n\t\t\t\t\t// The current os is linux, use xdg-open to view ps file\n\t\t\t\t\tpsOpenner = \"xdg-open \";\n\t\t\t\t\tcheckExe = \"xdg-open --help > /dev/null\";\n\t\t\t\t\tpsError = false;\n\t\t\t\t} else if (osName.indexOf(\"solaris\") >= 0) {\n\t\t\t\t\t// The current os is solaris, use xdg-open to view ps file\n\t\t\t\t\tpsOpenner = \"xdg-open \";\n\t\t\t\t\tpsError = false;\n\t\t\t\t\tcheckExe = \"xdg-open --help > /dev/null\";\n\t\t\t\t} else {\n\t\t\t\t\t// Other OS, display error message when trying to open os file\n\t\t\t\t\tpsError = true;\n\t\t\t\t}\n\n\t\t\t\tif (!psError) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tRuntime rt = Runtime.getRuntime();\n\t\t\t\t\t\trt.exec(checkExe);\n\t\t\t\t\t\tpsError = false;\n\t\t\t\t\t} catch (IOException e) {\n\t\t\t\t\t\t//e.printStackTrace(); \n\t\t\t\t\t\t//System.out.println(e);\n\t\t\t\t\t\tpsError =true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// If user specified a psViewer in environment variable, use the defined ps file viewer.\n\t\t\t\t//kenneth\n\t\t\t\tif (!psViewer.contains(\"Undefined variable\")) {\n\t\t\t\t\tpsError = false;\n\t\t\t\t\tpsOpenner = psViewer + \" \";\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (psError) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\troot = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/TraceOpenError.fxml\"));\n\t\t\t\t\t\tStage stage = new Stage();\n\t\t\t\t\t\tstage.setTitle(\"Error\");\n\t\t\t\t\t\tstage.setScene(new Scene(root, 650, 100));\n\t\t\t\t\t\tstage.show();\n\t\t\t\t\t} catch (IOException e) {\n\t\t\t\t\t\te.printStackTrace();\n\t\t\t\t\t\tSystem.out.println(e);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t//Open the trace graph file\n\t\t\t\t\tProcessBuilder openGraph = new ProcessBuilder(\"/bin/tcsh\",\"-c\",psOpenner+Controller.currentProgramFolder+\"/llfi/trace_report_output/TraceGraph.ps\");\n\t\t\t\t\topenGraph.redirectErrorStream(true);\n\t\t\t\t\topenGraph.start().waitFor();\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tProcessBuilder openGraph = new ProcessBuilder(\"/bin/tcsh\",\"-c\",zgrviewerPath+\"run.sh \"+Controller.currentProgramFolder+\"/llfi/trace_report_output/TraceGraph.dot\");\n\t\t\t\topenGraph.redirectErrorStream(true).start();\n\t\t\t}\n\t\t} catch (IOException e) {\n\t\t\te.printStackTrace();\n\t\t\tSystem.out.println(e.getMessage());\n\t\t} catch (InterruptedException e) {\n\t\t\tSystem.out.println(e);\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t\n\t/**\n\t * Generates the 'Fault Summary' tab\n\t */\n\t@FXML\n\tprivate void generateFaultSummaryGraph() {\n\t\t// sets all counts to zero\n\t\tint faultCount = 0;\n\t\thangedCount = 0;\n\t\tcrashedCount = 0;\n\t\tresultList = new ArrayList<String>(); // this is not used here?\n\t\t\n\t\ttry {\n\t\t\t// #SFIT\n\t\t\t// get the fault the display\n\t\t\t// if we are doing single injection this does not matter\n\t\t\tString selectedFault = fiResultDisplay.getSelectionModel().getSelectedItem();\n\t\t\t\n\t\t\t// for batch mode, if the user selected all we need to loop through all the \n\t\t\t// inner folders\n\t\t\tint faultFolderNum;\n\t\t\tif (isBatchMode && \"All\".equals(selectedFault)) {\n\t\t\t\tfaultFolderNum = selectedSoftwareFailures.size();\n\t\t\t} else {\n\t\t\t\tfaultFolderNum = 1;\n\t\t\t}\n\t\t\t\n\t\t\t// loop through each inner folder when doing batch injection\n\t\t\tfor (int it = 0; it < faultFolderNum; it++) {\n\t\t\t\tString errorFolderPath;\n\t\t\t\tString folderPath;\n\t\t\t\tString fault;\n\t\t\t\t\n\t\t\t\t// are we in batch mode or single?\n\t\t\t\tif (isBatchMode && \"All\".equals(selectedFault)) {\n\t\t\t\t\tfault = selectedSoftwareFailures.get(it);\n\t\t\t\t} else {\n\t\t\t\t\tfault = selectedFault;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// go into inner folder if we are in batch mode\n\t\t\t\tif (!isBatchMode) {\n\t\t\t\t\tfolderPath = currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/llfi_stat_output/\";\n\t\t\t\t\terrorFolderPath = currentProgramFolder\n\t\t\t\t\t\t\t+ \"/llfi/error_output/\";\n\t\t\t\t} else {\n\t\t\t\t\tfolderPath = currentProgramFolder + \"/llfi-\" + fault\n\t\t\t\t\t\t\t+ \"/llfi/llfi_stat_output/\";\n\t\t\t\t\terrorFolderPath = currentProgramFolder + \"/llfi-\" + fault\n\t\t\t\t\t\t\t+ \"/llfi/error_output/\";\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// list out the files in these folder in a member variable\n\t\t\t\tlistFilesForFolder(new File(folderPath));\n\t\t\t\tlistFilesForErrorFolder(new File(errorFolderPath));\n\t\t\t\t\n\t\t\t\t// read all files from error folder for 'crashed' or 'hanged'\n\t\t\t\tfor (int k = 0; k < resultErrorFileNameLists.size(); k++) {\n\t\t\t\t\terrorFile = new FileReader(errorFolderPath\n\t\t\t\t\t\t\t+ resultErrorFileNameLists.get(k));\n\t\t\t\t\tBufferedReader bufferReader1 = new BufferedReader(errorFile);\n\t\t\t\t\twhile ((line = bufferReader1.readLine()) != null) {\n\t\t\t\t\t\tif (line.contains(\"crashed\")) {\n\t\t\t\t\t\t\tcrashedCount++;\n\t\t\t\t\t\t} else if (line.contains(\"hanged\")) {\n\t\t\t\t\t\t\thangedCount++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// get fault count\n\t\t\t\tif (resultFileNameLists.size() > 0) {\n\t\t\t\t\tfor (int i = 0; i < resultFileNameLists.size(); i++) {\n\t\t\t\t\t\tif (resultFileNameLists.get(i).contains(\"trace\")) {\n\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfaultCount++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} \n\n\t\t\tString[] params = { \"Crashed\", \"Hanged\", \"SDC\" };\n\n\t\t\t// Convert it to a list and add iUTILITYt to our ObservableList of\n\t\t\t// months.\n\t\t\tresultSummary.getData().clear();\n\n\t\t\txAxis.setLabel(\"Parameters\");\n\t\t\tyAxis.setLabel(\"Total.No.Of.Fault Injections\");\n\t\t\txAxis.setCategories(FXCollections\n\t\t\t\t\t.<String> observableArrayList(Arrays.asList(params)));\n\t\t\txAxis.setAutoRanging(false);\n\t\t\txAxis.invalidateRange(Arrays.asList(params));\n\n\t\t\tyAxis.setAutoRanging(false);\n\t\t\tyAxis.setLowerBound(0);\n\t\t\t\n\t\t\tyAxis.setUpperBound(faultCount);\n\t\t\tyAxis.setTickUnit(1);\n\n\t\t\tseries = new XYChart.Series<Integer, String>();\n\n\t\t\tXYChart.Data<Integer, String> faultData = new XYChart.Data<Integer, String>(\n\t\t\t\t\tcrashedCount, \"Crashed\");\n\t\t\tseries.getData().add(faultData);\n\n\t\t\tfaultData = new XYChart.Data<Integer, String>(hangedCount, \"Hanged\");\n\t\t\tseries.getData().add(faultData);\n\n\t\t\tfaultData = new XYChart.Data<Integer, String>(sdcCount, \"SDC\");\n\t\t\tseries.getData().add(faultData);\n\n\t\t\tresultSummary.getData().add(series);\n\n\t\t} catch (IOException e) {\n\t\t\tSystem.err.println(\"ERROR: cannot generate Fault Summary!\");\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\n\tpublic void listFilesForErrorFolder(final File folder) {\n\n\t\tresultErrorFileNameLists = new ArrayList<String>();\n\t\tfor (final File fileEntry : folder.listFiles()) { \n\n\t\t\tif (fileEntry.isDirectory()) {\n\t\t\t\tlistFilesForErrorFolder(fileEntry);\n\t\t\t} else {\n\t\t\t\tresultErrorFileNameLists.add(fileEntry.getName());\n\t\t\t}\n\t\t}\n\t\t//System.out.println(line1);\n\n\t}\n\tpublic void listFilesForOtputFolder(final File folder) {\n\t\tresultOutputFileNameLists = new ArrayList<String>();\n\t\tfor (final File fileEntry : folder.listFiles()) { \n\n\t\t\tif (fileEntry.isDirectory()) {\n\t\t\t\tlistFilesForErrorFolder(fileEntry);\n\t\t\t} else {\n\t\t\t\tresultOutputFileNameLists.add(fileEntry.getName());\n\t\t\t}\n\t\t}\n\t\t//System.out.println(line1);\n\n\t}\n\n\tpublic void FileListofTraceReportFolder(final File folder) {\n\t\tTraceDiffReportFileNameLists = new ArrayList<String>();\n\t\tfor (final File fileEntry : folder.listFiles()) { \n\n\t\t\tif (fileEntry.isDirectory()) {\n\t\t\t\tlistFilesForErrorFolder(fileEntry);\n\t\t\t} else {\n\t\t\t\tTraceDiffReportFileNameLists.add(fileEntry.getName());\n\t\t\t}\n\t\t}\n\t\t//System.out.println(line1);\n\n\t}\n\n\tpublic void listFilesForFolder(final File folder) {\n\t\tresultFileNameLists = new ArrayList<String>();\n\t\tFile[] files =folder.listFiles();\n\t\tArrays.sort(files, new FileNameComparator());\n\t\tfor (final File fileEntry : files) {\n\t\t\tif (fileEntry.isDirectory()) {\n\t\t\t\tlistFilesForFolder(fileEntry);\n\t\t\t} else {\n\n\t\t\t\tresultFileNameLists.add(fileEntry.getName());\n\t\t\t}\n\t\t}\n\t}\n\tPattern splitter = Pattern.compile(\"(\\\\d+|\\\\D+)\");\n\tpublic class FileNameComparator implements Comparator\n\t{\n\t\tpublic int compare(Object o1, Object o2)\n\t\t{\n\t\t\t// I deliberately use the Java 1.4 syntax, \n\t\t\t// all this can be improved with 1.5's generics\n\t\t\tString s1 = o1.toString(), s2 = o2.toString();\n\t\t\t// We split each string as runs of number/non-number strings\n\t\t\tArrayList sa1 = split(s1);\n\t\t\tArrayList sa2 = split(s2);\n\t\t\t// Nothing or different structure\n\t\t\tif (sa1.size() == 0 || sa1.size() != sa2.size())\n\t\t\t{\n\t\t\t\t// Just compare the original strings\n\t\t\t\treturn s1.compareTo(s2);\n\t\t\t}\n\t\t\tint i = 0;\n\t\t\tString si1 = \"\";\n\t\t\tString si2 = \"\";\n\t\t\t// Compare beginning of string\n\t\t\tfor (; i < sa1.size(); i++)\n\t\t\t{\n\t\t\t\tsi1 = (String)sa1.get(i);\n\t\t\t\tsi2 = (String)sa2.get(i);\n\t\t\t\tif (!si1.equals(si2))\n\t\t\t\t\tbreak; // Until we find a difference\n\t\t\t}\n\t\t\t// No difference found?\n\t\t\tif (i == sa1.size())\n\t\t\t\treturn 0; // Same strings!\n\n\t\t\t// Try to convert the different run of characters to number\n\t\t\tint val1, val2;\n\t\t\ttry\n\t\t\t{\n\t\t\t\tval1 = Integer.parseInt(si1);\n\t\t\t\tval2 = Integer.parseInt(si2);\n\t\t\t}\n\t\t\tcatch (NumberFormatException e)\n\t\t\t{\n\t\t\t\treturn s1.compareTo(s2); // Strings differ on a non-number\n\t\t\t}\n\n\t\t\t// Compare remainder of string\n\t\t\tfor (i++; i < sa1.size(); i++)\n\t\t\t{\n\t\t\t\tsi1 = (String)sa1.get(i);\n\t\t\t\tsi2 = (String)sa2.get(i);\n\t\t\t\tif (!si1.equals(si2))\n\t\t\t\t{\n\t\t\t\t\treturn s1.compareTo(s2); // Strings differ\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Here, the strings differ only on a number\n\t\t\treturn val1 < val2 ? -1 : 1;\n\t\t}\n\n\t\tArrayList split(String s)\n\t\t{\n\t\t\tArrayList r = new ArrayList();\n\t\t\tMatcher matcher = splitter.matcher(s);\n\t\t\twhile (matcher.find())\n\t\t\t{\n\t\t\t\tString m = matcher.group(1);\n\t\t\t\tr.add(m);\n\t\t\t}\n\t\t\treturn r;\n\t\t}\n\t}\n\n\t@FXML\n\tprivate void onClickCompileToIr(ActionEvent event){\n\t\tParent root;\n\t\ttry {\n\t\t\t// clear log tabs\n\t\t\tconsole.clear();\n\t\t\terrorString.clear();\n\t\t\t\n\t\t\t// change tab\n\t\t\ttabBottom.getSelectionModel().select(profilingTab);\n\n\t\t\t// Delete the old .ll file\n\t\t\tProcessBuilder deleteCmd = new ProcessBuilder(\"/bin/tcsh\", \"-c\", \"rm \" + currentProgramFolder + \"/\" + currentProgramFolder + \".ll\");\n\t\t\tdeleteCmd.start().waitFor();\n\n\t\t\t// call make\n\t\t\tString command = \"make\";\n\t\t\tconsole.add(\"./\" + currentProgramFolder + \"$ \" + command + \"\\n\");\n\t\t\tProcess p = Runtime.getRuntime().exec(command, null, new File(currentProgramFolder));\n\t\t\tp.waitFor();\n\n\t\t\t// get error messages\n\t\t\tBufferedReader in = new BufferedReader(new InputStreamReader(p.getErrorStream()));\n\t\t\twhile ((line = in.readLine()) != null) {\n\t\t\t\terrorString.add(line);\n\t\t\t}\n\t\t\tin.close();\n\t\t\t\n\t\t\t// get log messages\n\t\t\tin = new BufferedReader(new InputStreamReader(p.getInputStream()));\n\t\t\twhile ((line = in.readLine()) != null) {\n\t\t\t\tconsole.add(line);\n\t\t\t}\n\t\t\tin.close();\n\n\t\t\tif(errorString.isEmpty()) {\n\t\t\t\t// display success\n\t\t\t\troot = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/compileToIR.fxml\"));\n\t\t\t\tStage stage = new Stage();\n\t\t\t\tstage.setTitle(\"Compiling To IR Result\");\n\t\t\t\tstage.setScene(new Scene(root, 500, 150));\n\t\t\t\tstage.show();\n\n\t\t\t\t// import .ll file and display it\n\t\t\t\tString fileName = currentProgramFolder + \".ll\";\n\t\t\t\timportFile(fileName);\n\t\t\t\tsetProgramTextArea(fileName);\n\t\t\t\t\n\t\t\t\tcs.changeStateTo(State.COMPILE_COMPLETED);\n\t\t\t} else {\n\t\t\t\t// show error\n\t\t\t\troot = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/ErrorDisplay.fxml\"));\n\t\t\t\tStage stage = new Stage();\n\t\t\t\tstage.setTitle(\"Error\");\n\t\t\t\tstage.setScene(new Scene(root, 450, 100));\n\t\t\t\tstage.show();\n\t\t\t\treturn;\n\t\t\t}\n\t\t} catch (IOException | InterruptedException e) {\n\t\t\tSystem.err.println(\"ERROR: failed to generate .ll file\");\n\t\t\te.printStackTrace();\n\t\t}\n\n\t\tsoftwareFailureAutoScan();\n\t}\n\t\n\t/**\n\t * #SFIT Finds out which software faults are applicable (which one can be injected) \n\t * and dump it into \\<folder\\>/llfi.applicable.software.failures.txt\n\t */\n\tpublic static void softwareFailureAutoScan() {\n\t\tString cmd = Controller.llfibuildPath \n\t\t\t\t+ \"bin/SoftwareFailureAutoScan --no_input_yaml \" \n\t\t\t\t+ currentProgramFolder + \"/\" + currentProgramFolder + \".ll\";\n\t\tProcessBuilder softwareFailureAutoScan = new ProcessBuilder(\"/bin/tcsh\", \"-c\", cmd);\n\t\tProcess p;\n\t\tString line;\n\t\ttry {\n\t\t\tp = softwareFailureAutoScan.redirectErrorStream(true).start();\n\t\t\tp.waitFor();\n\t\t\t\n\t\t\t// route the output of the process to the GUI's console\n\t\t\tconsole.add(\"\\n$ \" + cmd + \"\\n\");\n\t\t\tBufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream()));\n\t\t\twhile ((line = in.readLine()) != null) {\n\t\t\t\tconsole.add(line);\n\t\t\t}\n\t\t\tin.close();\n\t\t} catch (IOException | InterruptedException e) {\n\t\t\tSystem.err.println(\"ERROR: SoftwareFailureAutoScan failed!\");\n\t\t\te.printStackTrace();\n\t\t} \n\t}\n\t\n\t@FXML\n\tprivate void onClickOkHandler(ActionEvent event){\n\n\n\t\tNode source = (Node) event.getSource(); \n\t\tStage stage = (Stage) source.getScene().getWindow();\n\t\tstage.close();\n\t}\n\t\n\t@FXML\n\tprivate void onClickInstrument(ActionEvent event) {\n\t\tParent root;\n\t\ttry {\n\t\t\tindexStates=false;\n\t\t\ttabBottom.getSelectionModel().select(profilingTab);\n\t\t\t\n\t\t\t// load InstrumentController and get it\n\t\t\tFXMLLoader loader = new FXMLLoader(getClass().getClassLoader().getResource(\"application/Instrument.fxml\"));\n\t\t\troot = loader.load();\n\t\t\tInstrumentController ic = loader.getController();\n\t\t\t\n\t\t\t// pass it fiResultDisplay\n\t\t\tic.initFiResultDisplay(fiResultDisplay);\n\t\t\t\n\t\t\tStage stage = new Stage();\n\t\t\tstage.setTitle(\"Instrument\");\n\t\t\tstage.setScene(new Scene(root, 742, 569));\n\t\t\tstage.show();\n\t\t} catch (IOException e) {\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\n\n\n\n\t@FXML\n\tprivate void onClickInjectFault(ActionEvent event) {\n\t\tParent root;\n\t\ttry {\n\n\t\t\ttabBottom.getSelectionModel().select(profilingTab);\n\t\t\troot = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/Profiling.fxml\"));\n\t\t\tStage stage = new Stage();\n\t\t\tstage.setTitle(\"Fault Injection\");\n\t\t\tstage.setScene(new Scene(root, 600, 500));\n\t\t\tstage.show();\n\t\t\t//flag = 1;\n\n\t\t\t/*if(errorFlag == true)\n {\n \t errorFlag = false;\n \t\t\t Node source = (Node) event.getSource(); \n \t\t\t stage = (Stage) source.getScene().getWindow();\n \t\t\t stage.close();\n\n \t\t\t root = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/ErrorDisplay.fxml\"));\n \t\t stage = new Stage();\n \t\t stage.setTitle(\"Error\");\n \t\t stage.setScene(new Scene(root, 450, 100));\n \t\t stage.show();\n }\n else\n {\n \t errorString = new ArrayList<>();\n \t root = FXMLLoader.load(getClass().getClassLoader().getResource(\"application/Profile.fxml\"));\n Stage stage = new Stage(); \n\n stage.setTitle(\"Profiling\");\n stage.setScene(new Scene(root, 400, 100));\n stage.show();\n\n }*/\n\t\t\tinjectfaultButton.setDisable(false);\n\t\t} catch (IOException e) {\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t@FXML\n\tprivate void onClickOpenFile(ActionEvent event) {\n\t\tStage stage = new Stage();\n\t\tFileChooser fileChooser = new FileChooser();\n\t\tfileChooser.setTitle(\"Open Resource File\");\n\t\tconfigureFileChooser(fileChooser); \n\t\tFile f = fileChooser.showOpenDialog(stage);\n\t\tif (f != null) {\n\t\t\t// clear previous file list\n\t\t\tfileNameLists.clear();\n\t\t\tfileSelecMap.clear();\n\t\t\t\n\t\t\topenFile(f);\n\t\t}\n\t}\n\n\t@FXML\n\tprivate void onClickOpenProject(ActionEvent event) {\n\t\tStage stage = new Stage();\n\t\tDirectoryChooser dirChooser = new DirectoryChooser();\n\t\tdirChooser.setTitle(\"Open Project Folder\");\n\t\tFile folder = dirChooser.showDialog(stage);\n\t\tif (folder != null) {\n\t\t\t// clear previous file list\n\t\t\tfileNameLists.clear();\n\t\t\tfileSelecMap.clear();\n\n\t\t\topenDirectory(folder);\n\t\t}\n\t}\n\n\n\tprivate static void configureFileChooser(\n\t\t\tfinal FileChooser fileChooser) { \n\n\t\tfileChooser.getExtensionFilters().addAll(\n\n\t\t\t\tnew FileChooser.ExtensionFilter(\"C\", \"*.c\"),\n\t\t\t\tnew FileChooser.ExtensionFilter(\"CPP\", \"*.cpp\"),\n\t\t\t\tnew FileChooser.ExtensionFilter(\"ll\", \"*.ll\")\n\t\t\t\t);\n\t}\n\t\n\t/**\n\t * Delete a file or a directory (if it is a directory, the delete runs recursively)\n\t * @param file - file or directory to be deleted\n\t * @throws IOException\n\t */\n\tpublic static void delete(File file)\n\t\t\tthrows IOException{\n\t\t\n\t\tif(file.isDirectory()){\n\n\t\t\t//directory is empty, then delete it\n\t\t\tif(file.list().length==0){\n\n\t\t\t\tfile.delete();\n\n\n\t\t\t}else{\n\n\t\t\t\t//list all the directory contents\n\t\t\t\tString files[] = file.list();\n\n\t\t\t\tfor (String temp : files) {\n\t\t\t\t\t//construct the file structure\n\t\t\t\t\tFile fileDelete = new File(file, temp);\n\n\t\t\t\t\t//recursive delete\n\t\t\t\t\tdelete(fileDelete);\n\t\t\t\t}\n\n\t\t\t\t//check the directory again, if empty then delete it\n\t\t\t\tif(file.list().length==0){\n\t\t\t\t\tfile.delete();\n\n\t\t\t\t}\n\t\t\t}\n\n\t\t}else{\n\t\t\t//if file, then delete it\n\t\t\tfile.delete();\n\n\t\t}\n\t}\n\t\n\t\n\tprivate void addDirectory(File dir, boolean generateMake) throws IOException {\n\t\t// change currentProgramFolder to reflect a change in folder\n\t\tcurrentProgramFolder = dir.getName();\n\t\t// generate the makefile for the directory\n\t\tif (generateMake) {\n\t\t\tgenerateMakeFile(dir);\n\t\t\tcs.disableCompileButton(false);\n\t\t} else {\n\t\t\tcs.disableCompileButton(true);\n\t\t}\n\t\t\n\t\t// load all the relevant (Makefile, .c, .cpp) files into the gui and\n\t\t// display them\n\t\tString lastFile = null;\n\t\tfor (File f : dir.listFiles()) {\n\t\t\tString fileName = f.getName();\n\t\t\tif (fileName.equals(\"Makefile\") || fileName.endsWith(\".c\") || fileName.endsWith(\".cpp\") || fileName.endsWith(\".ll\")) {\n\t\t\t\timportFile(fileName);\n\t\t\t\tlastFile = fileName;\n\t\t\t}\n\t\t}\n\n\t\t// display the Makefile\n\t\tif (generateMake) {\n\t\t\tsetProgramTextArea(\"Makefile\");\n\t\t} else {\n\t\t\tsetProgramTextArea(lastFile);\n\t\t}\n\t\tcs.changeStateTo(State.IMPORT_FILE_COMPLETED);\n\t}\n\t\n\t/**\n\t * Opens a directory/project folder\n\t * @param directory\n\t */\n\tprivate void openDirectory(File directory) {\n\t\ttry {\n\t\t\t// check if the directory to be copied in is already in the working directory\n\t\t\tFile dir = new File(directory.getName());\n\t\t\tif (!dir.getCanonicalPath().equals(directory.getCanonicalPath())) {\n\t\t\t\t// delete old directory if exist and not the same as the new one\n\t\t\t\tif (dir.exists()) {\n\t\t\t\t\tdelete(dir);\n\t\t\t\t}\n\t\t\t\t// copy\n\t\t\t\tProcessBuilder p = new ProcessBuilder(\"/bin/tcsh\", \"-c\",\n\t\t\t\t\t\t\"cp -r '\" + directory.getAbsolutePath() + \"' '\"\n\t\t\t\t\t\t\t\t+ directory.getName() + \"'\");\n\t\t\t\tp.start().waitFor();\n\t\t\t}\n\n\t\t\taddDirectory(dir, true);\n\t\t\tFiles.deleteIfExists(Paths.get(dir.getAbsolutePath(), \"/llfi.applicable.software.failures.txt\"));\n\t\t} catch (IOException | InterruptedException e) {\n\t\t\tSystem.err.println(\"ERROR: cannot open/read/move file!\");\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t\n\t/**\n\t * Opens a single file \n\t * @param file\n\t */\n\tprivate void openFile(File file) {\n\t\ttry{\n\t\t\tString fileName = file.getName();\n\t\t\tString fileNameNoExtension = fileName.split(\"\\\\.\")[0];\n\t\t\tString newFilePath = fileNameNoExtension + \"/\" + fileName;\n\t\t\t\n\t\t\t// check if the file to be copied in is already in the working directory\n\t\t\tFile dirFile = new File(newFilePath);\n\t\t\tFile dir = new File(fileNameNoExtension);\n\t\t\t\n\t\t\t// move in the file if it is in another location\n\t\t\tif (!dirFile.getCanonicalPath().equals(file.getCanonicalPath())) {\n\t\t\t\t// delete old directory if exist and not the same as the new one\n\t\t\t\tif (dir.exists()) {\n\t\t\t\t\tdelete(dir);\n\t\t\t\t}\n\t\t\t\t// mkdir\n\t\t\t\tProcessBuilder p = new ProcessBuilder(\"/bin/tcsh\", \"-c\",\n\t\t\t\t\t\t\"mkdir \" + fileNameNoExtension);\n\t\t\t\tp.start().waitFor();\n\t\t\t\t// copy\n\t\t\t\tp = new ProcessBuilder(\"/bin/tcsh\", \"-c\",\n\t\t\t\t\t\t\"cp -r '\" + file.getAbsolutePath() + \"' '\"\n\t\t\t\t\t\t\t\t+ newFilePath + \"'\");\n\t\t\t\tp.start().waitFor();\n\t\t\t}\n\t\t\t\n\t\t\tif (fileName.endsWith(\".ll\")) {\n\t\t\t\taddDirectory(dir, false);\n\t\t\t} else {\n\t\t\t\taddDirectory(dir, true);\n\t\t\t}\n\t\t} catch (IOException | InterruptedException e) {\n\t\t\tSystem.err.println(\"ERROR: cannot open/read/move file!\");\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t\n\t/**\n\t * Generates the Makefile in the specified directory.\n\t * @param directory\n\t */\n\tprivate void generateMakeFile(File directory) {\n\t\tString command = llfibuildPath\n\t\t\t\t+ \"tools/GenerateMakefile --readable --all -o \"\n\t\t\t\t+ currentProgramFolder + \".ll\";\n\t\ttry {\n\t\t\tconsole.clear();\n\t\t\tconsole.add(\"./\" + currentProgramFolder + \"$ \" + command + \"\\n\");\n\t\t\tProcess p = Runtime.getRuntime().exec(command, null, directory);\n\t\t\tp.waitFor();\n\t\t} catch (Exception e) {\n\t\t\tSystem.err.println(\"ERROR: unable to generate makefile!\");\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t\n\t/**\n\t * Opens a file f and loads it into a list of strings\n\t * @param f\n\t * @return\n\t * @throws IOException\n\t */\n\tprivate List<String> parseFile(File f) throws IOException {\n\t\tString line;\n\t\tArrayList<String> fileContent = new ArrayList<>();\n\t\tBufferedReader bufferReader = new BufferedReader(new FileReader(f));\n\t\twhile ((line = bufferReader.readLine()) != null) {\n\t\t\tfileContent.add(line + \"\\n\");\n\t\t}\n\t\tbufferReader.close();\n\t\treturn fileContent;\n\t}\n\t\n\t@FXML\n\tprivate void onFileSelection(MouseEvent event){\n\t\tString selectedFile = fileList.getSelectionModel().getSelectedItem();\n\t\tif (selectedFile != null) {\n\t\t\tsetProgramTextArea(selectedFile);\n\t\t}\n\t}\n\t\n\t@FXML\n\tprivate void onTabChange() {\n\t\tif (errorTab.isSelected()) {\n\t\t\terrorTextArea.clear();\n\t\t\tif (errorString.size() > 0) {\n\t\t\t\tfor (int i = 0; i < errorString.size(); i++) {\n\t\t\t\t\terrorTextArea.appendText(errorString.get(i) + \"\\n\");\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (consoleTab.isSelected()) {\n\t\t\tconsoleTextArea.clear();\n\t\t\tif (console.size() > 0) {\n\t\t\t\tfor (int i = 0; i < console.size(); i++) {\n\t\t\t\t\tconsoleTextArea.appendText(console.get(i) + \"\\n\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t@Override\n\tpublic void initialize(URL url, ResourceBundle rb) {\n\t\ttry{\n\t\t\tProcessBuilder p1 = new ProcessBuilder(\"/bin/tcsh\",\"-c\",\"echo $llfibuild\");\n\n\t\t\tp1.redirectErrorStream(true);\n\t\t\tProcess pr1 = p1.start();\n\t\t\tBufferedReader in = new BufferedReader(new InputStreamReader(pr1.getInputStream()));\n\t\t\tString line;\n\t\t\twhile ((line = in.readLine()) != null) {\n\n\t\t\t\tllfibuildPath = line;\n\t\t\t}\n\t\t\tpr1.waitFor();\n\t\t\tpr1.destroy();\n\t\t\tin.close();\n\n\n\n\t\t\tProcessBuilder p2 = new ProcessBuilder(\"/bin/tcsh\",\"-c\",\"echo $psViewer\");\n\n\t\t\tp2.redirectErrorStream(true);\n\t\t\tProcess pr2 = p2.start();\n\t\t\tBufferedReader in2 = new BufferedReader(new InputStreamReader(pr2.getInputStream()));\n\t\t\twhile ((line = in2.readLine()) != null) {\n\n\t\t\t\tpsViewer = line;\n\t\t\t}\n\t\t\tpr2.waitFor();\n\t\t\tpr2.destroy();\n\t\t\tin2.close();\n\t\t\t\n\t\t\t\n\t\t\tProcessBuilder p3 = new ProcessBuilder(\"/bin/tcsh\",\"-c\",\"echo $zgrviewer\");\n\n\t\t\tp3.redirectErrorStream(true);\n\t\t\tProcess pr3 = p3.start();\n\t\t\tBufferedReader in3 = new BufferedReader(new InputStreamReader(pr3.getInputStream()));\n\n\t\t\twhile ((line = in3.readLine()) != null) {\n\n\t\t\t\tzgrviewerPath = line;\n\t\t\t}\n\t\t\tpr3.waitFor();\n\t\t\tpr3.destroy();\n\t\t\tin3.close();\n\t\t}\n\t\tcatch(IOException e)\n\t\t{\n\t\t\tSystem.out.println(e);\n\t\t}catch(InterruptedException e)\n\t\t{\n\t\t\tSystem.out.println(e);\n\t\t}\n\t\t\n\t\tconfigReader = new ConfigReader();\n\t\tcs = new CurrentState(State.INITIAL);\n\n\t\t// #SFIT\n\t\tfiResultDisplay.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<String>(){\n\t\t\t@Override\n\t\t\tpublic void changed(ObservableValue<? extends String> observable,\n\t\t\t\t\tString oldValue, String newValue) {\n\t\t\t\tif (oldValue == null || newValue == null) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tif (!oldValue.equals(newValue) && cs.getCurrentState() == State.INJECT_FAULT_COMPLETED && isBatchMode) {\n\t\t\t\t\tgenerateInjectionResult();\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\t\n\t/**\n\t * Updates the 'Fault Injection Status' and the 'Fault Summary' tab\n\t * with the output data from the injection.\n\t * @param forceUpdate - forces the two tab to update, even if nothing has changed\n\t */\n\tpublic void generateInjectionResult() {\n\t\tonGeneratingResultTable();\n\t\tgenerateFaultSummaryGraph();\n\t}\n\t\n\tpublic class CurrentState {\n\t\tprivate State s;\n\t\t@SuppressWarnings(\"rawtypes\")\n\t\tprivate ObservableList emptyList = FXCollections.observableArrayList();\n\t\tprivate boolean disableCompileButton = false;\n\t\t\n\t\tpublic CurrentState(State s) {\n\t\t\tchangeStateTo(s);\n\t\t}\n\t\t\n\t\t/**\n\t\t * Disable the compileToIR button if the user has imported a .ll file.\n\t\t * @param value - true = disable\n\t\t */\n\t\tpublic void disableCompileButton(boolean value) {\n\t\t\tthis.disableCompileButton = value;\n\t\t}\n\t\t\n\t\tpublic State getCurrentState() {\n\t\t\treturn s;\n\t\t}\n\t\t\n\t\t@SuppressWarnings(\"unchecked\")\n\t\tpublic void changeStateTo(State s) {\n\t\t\tswitch (s) {\n\t\t\tcase INITIAL:\n\t\t\t\tthis.s = s;\n\t\t\t\t\n\t\t\t\tcompiletoIrButton.setDisable(true);\n\t\t\t\tinstrumentButton.setDisable(true);\n\t\t\t\tprofilingButton.setDisable(true);\n\t\t\t\truntimeButton.setDisable(true);\n\t\t\t\tinjectfaultButton.setDisable(true);\n\t\t\t\ttracegraphButton.setDisable(true);\n\t\t\t\t\n\t\t\t\t// clear previous results\n\t\t\t\tprofilingTable.setItems(emptyList);\n\t\t\t\tresultTable.setItems(emptyList);\n\t\t\t\tresultSummary.getData().clear();\n\t\t\t\t\n\t\t\t\t// so that the user can't play with with UNTIL\n\t\t\t\t// they have selected batch mode\n\t\t\t\tfiResultDisplay.setVisible(false);\n\t\t\t\tfiResultDisplay.setValue(\"All\");\n\t\t\t\t\n\t\t\t\tbreak;\n\t\t\tcase IMPORT_FILE_COMPLETED:\n\t\t\t\tthis.s = s;\n\t\t\t\t\n\t\t\t\tcompiletoIrButton.setDisable(false);\n\t\t\t\tinstrumentButton.setDisable(true);\n\t\t\t\tprofilingButton.setDisable(true);\n\t\t\t\truntimeButton.setDisable(true);\n\t\t\t\tinjectfaultButton.setDisable(true);\n\t\t\t\ttracegraphButton.setDisable(true);\n\t\t\t\t\n\t\t\t\tfiResultDisplay.setVisible(false);\n\t\t\t\tfiResultDisplay.setValue(\"All\");\n\t\t\t\t\n\t\t\t\t// clear previous results\n\t\t\t\tprofilingTable.setItems(emptyList);\n\t\t\t\tresultTable.setItems(emptyList);\n\t\t\t\tresultSummary.getData().clear();\n\t\t\t\t\n\t\t\t\tboolean fileHasBeenCompiled = false;\n\t\t\t\tfor (String str : fileNameLists) {\n\t\t\t\t\tif (str.endsWith(currentProgramFolder + \".ll\")) {\n\t\t\t\t\t\tfileHasBeenCompiled = true;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (!fileHasBeenCompiled) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\tcase COMPILE_COMPLETED:\n\t\t\t\tthis.s = s;\n\t\t\t\t\n\t\t\t\tcompiletoIrButton.setDisable(false);\n\t\t\t\tinstrumentButton.setDisable(false);\n\t\t\t\tprofilingButton.setDisable(true);\n\t\t\t\truntimeButton.setDisable(true);\n\t\t\t\tinjectfaultButton.setDisable(true);\n\t\t\t\ttracegraphButton.setDisable(true);\n\t\t\t\t\n\t\t\t\tfiResultDisplay.setVisible(false);\n\t\t\t\tfiResultDisplay.setValue(\"All\");\n\t\t\t\t\n\t\t\t\t// clear previous results\n\t\t\t\tprofilingTable.setItems(emptyList);\n\t\t\t\tresultTable.setItems(emptyList);\n\t\t\t\tresultSummary.getData().clear();\n\t\t\t\t\n\t\t\t\tbreak;\n\t\t\tcase INSTRUMENT_COMPLETED:\n\t\t\t\tthis.s = s;\n\t\t\t\t\n\t\t\t\tcompiletoIrButton.setDisable(false);\n\t\t\t\tinstrumentButton.setDisable(false);\n\t\t\t\tprofilingButton.setDisable(false);\n\t\t\t\truntimeButton.setDisable(true);\n\t\t\t\tinjectfaultButton.setDisable(true);\n\t\t\t\ttracegraphButton.setDisable(true);\n\t\t\t\t\n\t\t\t\t// clear previous results\n\t\t\t\tprofilingTable.setItems(emptyList);\n\t\t\t\tresultTable.setItems(emptyList);\n\t\t\t\tresultSummary.getData().clear();\n\t\t\t\t\n\t\t\t\tfiResultDisplay.setVisible(false);\n\t\t\t\tfiResultDisplay.setValue(\"All\");\n\t\t\t\t\n\t\t\t\t// Display index file in Text area\n\t\t\t\tString fileName = currentProgramFolder + \"-llfi_displayIndex.ll\";\n\t\t\t\ttry {\n\t\t\t\t\timportFile(fileName);\n\t\t\t\t} catch (IOException e) {\n\t\t\t\t\tSystem.err.println(\"ERR: unable to import \" + fileName);\n\t\t\t\t\te.printStackTrace();\n\t\t\t\t}\n\t\t\t\tsetProgramTextArea(fileName);\n\t\t\t\t\n\t\t\t\tbreak;\n\t\t\tcase PROFILING_COMPLETED:\n\t\t\t\tthis.s = s;\n\t\t\t\t\n\t\t\t\tcompiletoIrButton.setDisable(false);\n\t\t\t\tinstrumentButton.setDisable(false);\n\t\t\t\tprofilingButton.setDisable(false);\n\t\t\t\truntimeButton.setDisable(false);\n\t\t\t\tinjectfaultButton.setDisable(true);\n\t\t\t\ttracegraphButton.setDisable(true);\n\t\t\t\t\n\t\t\t\t// clear previous results\n\t\t\t\tresultTable.setItems(emptyList);\n\t\t\t\tresultSummary.getData().clear();\n\t\t\t\t\n\t\t\t\tfiResultDisplay.setVisible(false);\n\t\t\t\tfiResultDisplay.setValue(\"All\");\n\t\t\t\t\n\t\t\t\tInputYaml input = new InputYaml();\n\t\t\t\tinput.load(new File(currentProgramFolder + \"/input.yaml\"));\n\t\t\t\tif (input.getRuntimeOptions().size() == 0) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\tcase RUNTIME_OPTIONS_COMPLETED:\n\t\t\t\tthis.s = s;\n\t\t\t\t\n\t\t\t\tcompiletoIrButton.setDisable(false);\n\t\t\t\tinstrumentButton.setDisable(false);\n\t\t\t\tprofilingButton.setDisable(false);\n\t\t\t\truntimeButton.setDisable(false);\n\t\t\t\tinjectfaultButton.setDisable(false);\n\t\t\t\ttracegraphButton.setDisable(true);\n\t\t\t\t\n\t\t\t\t// clear previous results\n\t\t\t\tresultTable.setItems(emptyList);\n\t\t\t\tresultSummary.getData().clear();\n\t\t\t\t\n\t\t\t\tfiResultDisplay.setVisible(false);\n\t\t\t\tfiResultDisplay.setValue(\"All\");\n\t\t\t\tbreak;\n\t\t\tcase INJECT_FAULT_COMPLETED:\n\t\t\t\tthis.s = s;\n\t\t\t\t\n\t\t\t\tcompiletoIrButton.setDisable(false);\n\t\t\t\tinstrumentButton.setDisable(false);\n\t\t\t\tprofilingButton.setDisable(false);\n\t\t\t\truntimeButton.setDisable(false);\n\t\t\t\tinjectfaultButton.setDisable(false);\n\t\t\t\ttracegraphButton.setDisable(false);\n\t\t\t\t\n\t\t\t\tif (isBatchMode) {\n\t\t\t\t\tfiResultDisplay.setVisible(true);\n\t\t\t\t} else {\n\t\t\t\t\tfiResultDisplay.setVisible(false);\n\t\t\t\t}\n\t\t\t\tfiResultDisplay.setValue(\"All\");\n\t\t\t\t\n\t\t\t\tgenerateInjectionResult();\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\t\n\t\t\t// remove glowing 'please select file' red text on first state change\n\t\t\tUploadLabel.setVisible(false);\n\t\t\t\n\t\t\tif (disableCompileButton) {\n\t\t\t\tcompiletoIrButton.setDisable(true);\n\t\t\t}\n\t\t}\n\t}\n\t\n\tpublic enum State {\n\t\tINITIAL,\n\t\tIMPORT_FILE_COMPLETED,\n\t\tCOMPILE_COMPLETED,\n\t\tINSTRUMENT_COMPLETED,\n\t\tPROFILING_COMPLETED,\n\t\tRUNTIME_OPTIONS_COMPLETED,\n\t\tINJECT_FAULT_COMPLETED;\n\t}\n}\n" }, { "alpha_fraction": 0.6987928152084351, "alphanum_fraction": 0.7011368274688721, "avg_line_length": 28.80264663696289, "blob_id": "b3cacba277f083b2d40623a42fb745159deefa32", "content_id": "52cc69ffe51e753ec305a49ca6252f392ad81504", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 25597, "license_type": "permissive", "max_line_length": 137, "num_lines": 831, "path": "/gui/application/InstrumentController.java", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "package application;\r\n\r\nimport java.io.BufferedReader;\r\nimport java.io.BufferedWriter;\r\nimport java.io.File;\r\nimport java.io.FileNotFoundException;\r\nimport java.io.FileReader;\r\nimport java.io.FileWriter;\r\nimport java.io.IOException;\r\nimport java.io.InputStreamReader;\r\nimport java.net.URL;\r\nimport java.nio.file.Files;\r\nimport java.nio.file.Paths;\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\nimport java.util.Map;\r\nimport java.util.ResourceBundle;\r\n\r\nimport org.yaml.snakeyaml.Yaml;\r\n\r\nimport javafx.collections.FXCollections;\r\nimport javafx.collections.ObservableList;\r\nimport javafx.event.ActionEvent;\r\nimport javafx.fxml.FXML;\r\nimport javafx.fxml.FXMLLoader;\r\nimport javafx.fxml.Initializable;\r\nimport javafx.scene.Node;\r\nimport javafx.scene.Parent;\r\nimport javafx.scene.Scene;\r\nimport javafx.scene.control.Button;\r\nimport javafx.scene.control.CheckBox;\r\nimport javafx.scene.control.ComboBox;\r\nimport javafx.scene.control.Label;\r\nimport javafx.scene.control.ListView;\r\nimport javafx.scene.control.RadioButton;\r\nimport javafx.scene.control.SelectionMode;\r\nimport javafx.scene.control.TextField;\r\nimport javafx.stage.FileChooser;\r\nimport javafx.stage.Stage;\r\nimport application.Controller;\r\nimport application.Controller.State;\r\nimport application.InputYaml.InstrumentOption;\r\nimport application.InputYaml.RuntimeOption;\r\npublic class InstrumentController implements Initializable {\r\n\r\n\t@FXML\r\n\tprivate ListView<String> instExcludeListView;\r\n\t@FXML\r\n\tprivate ListView<String> instIncludeListView;\r\n\t@FXML\r\n\tprivate RadioButton instTypeRadio;\r\n\t@FXML\r\n\tprivate RadioButton customInstTypeRadio;\r\n\t@FXML\r\n\tprivate ComboBox<String> regCombo;\r\n\t@FXML\r\n\tprivate RadioButton regTypeRadio;\r\n\t@FXML\r\n\tprivate RadioButton customRegTypeRadio;\r\n\t@FXML\r\n\tprivate RadioButton noTraceRadio;\r\n\t@FXML\r\n\tprivate RadioButton fullTraceRadio;\r\n\t@FXML\r\n\tprivate RadioButton limitTraceRadio;\r\n\t@FXML\r\n\tprivate Label includeLabel;\r\n\t@FXML\r\n\tprivate Button instIncludeButton;\r\n\t@FXML\r\n\tprivate Button instExcludeButton;\r\n\t@FXML\r\n\tprivate Button regIncludeButton;\r\n\t@FXML\r\n\tprivate Button regExcludeButton;\r\n\t@FXML\r\n\tprivate CheckBox forwardCheckbox;\r\n\t@FXML\r\n\tprivate CheckBox backwardCheckbox;\r\n\t@FXML\r\n\tprivate Label regIncludeLabel;\r\n\t@FXML\r\n\tprivate TextField traceCountText;\r\n\t@FXML\r\n\tprivate Node traceCountLabel;\r\n\t@FXML\r\n\tprivate ComboBox<String> customInstCombo;\r\n\t@FXML\r\n\tprivate ComboBox<String> customRegCombo;\r\n\t@FXML\r\n\tprivate Button createNewProfileButton;\r\n\t@FXML\r\n\tprivate CheckBox allCheckBox;\r\n\r\n\r\n\tprivate List<String> fileContent;\r\n\tprivate List<String> includeInstList;\r\n\tprivate List<String> excludeInstList;\r\n\tprivate List<String> removeList;\r\n\tprivate List<String> registerList;\r\n\tprivate List<String> customInstList;\r\n\tprivate List<String> customRegList;\r\n\r\n\t@FXML\r\n\tObservableList<String> items;\r\n\t@FXML\r\n\tObservableList<String> includeItems;\r\n\t@FXML\r\n\tObservableList<String> tempItems=FXCollections.observableArrayList();\r\n\t@FXML\r\n\tObservableList<String> tempItems1=FXCollections.observableArrayList();\r\n\r\n\tprivate boolean errorFlag;\r\n\tpublic String folderName;\r\n\tpublic String fileName;\r\n\tFile theDirectory;\r\n\r\n\t// #SFIT\r\n\t// used for selection software injection\r\n\t@FXML\r\n\tprivate RadioButton software;\r\n\t@FXML\r\n\tprivate RadioButton hardware;\r\n\r\n\t@FXML\r\n\tprivate Node registerSelectionMethodLabel;\r\n\t@FXML\r\n\tprivate Node separator;\r\n\r\n\t// for determining if the previous runtime option should be kept\r\n\tprivate InstrumentOption previousInstrumentOption;\r\n\tprivate List<RuntimeOption> previousRuntimeOption;\r\n\r\n\t// fiResultDisplay from Controller\r\n\tprivate ComboBox<String> fiResultDisplay;\r\n\r\n\t@FXML\r\n\tprivate void onClickGenerateYamlFile(ActionEvent event) {\r\n\t\tParent root;\r\n\t\tController.console = new ArrayList<String>();\r\n\r\n\t\ttry {\r\n\t\t\t// delete old folders and files from last fault injection excluding llfi.applicable.software.failures.txt\r\n\t\t\tString cmd1 = \"rm -rf `find ./\" + Controller.currentProgramFolder + \" -name 'llfi*' ! -name 'llfi.applicable.software.failures.txt'`\";\r\n\t\t\tProcessBuilder p1 = new ProcessBuilder(\"/bin/tcsh\", \"-c\", cmd1);\r\n\t\t\tp1.start().waitFor();\r\n\r\n\t\t\tInstrumentOption option = new InstrumentOption();\r\n\r\n\t\t\t// see what type of injection we're doing\r\n\t\t\tif (Controller.isHardwareInjection) {\r\n\t\t\t\toption.isHardwareFault = true;\r\n\r\n\t\t\t\t// set regloc_OR_customRegSelector\r\n\t\t\t\t// does not matter for software injection\r\n\t\t\t\tif (regTypeRadio.isSelected()) {\r\n\t\t\t\t\toption.regloc_OR_customRegSelector = regCombo.getValue().toString().split(\"-\")[0];\r\n\t\t\t\t\toption.customRegister = false;\r\n\t\t\t\t} else {\r\n\t\t\t\t\toption.regloc_OR_customRegSelector = customRegCombo.getValue().toString();\r\n\t\t\t\t\toption.customRegister = true;\r\n\t\t\t\t}\r\n\t\t\t} else {\r\n\t\t\t\toption.isHardwareFault = false;\r\n\t\t\t}\r\n\r\n\t\t\t// see if we are doing custom injection\r\n\t\t\tif (instTypeRadio.isSelected()) {\r\n\t\t\t\toption.customInstruction = false;\r\n\r\n\t\t\t\tArrayList<String> formattedList = new ArrayList<String>();\r\n\t\t\t\tfor (String s: instIncludeListView.getItems()) {\r\n\t\t\t\t\tformattedList.add(s.split(\"-\")[0]);\r\n\t\t\t\t}\r\n\r\n\t\t\t\toption.includedInstruction = formattedList;\r\n\t\t\t} else {\r\n\t\t\t\toption.customInstruction = true;\r\n\t\t\t\toption.includedInstruction = new ArrayList<String>();\r\n\t\t\t\toption.includedInstruction.add(customInstCombo.getValue().toString().split(\"-\")[0]);\r\n\t\t\t}\r\n\r\n\t\t\t// set tracing options\r\n\t\t\tif (fullTraceRadio.isSelected() || limitTraceRadio.isSelected()) {\r\n\t\t\t\toption.tracingEnabled = true;\r\n\t\t\t\toption.forwardTrace = forwardCheckbox.isSelected();\r\n\t\t\t\toption.backwardTrace = backwardCheckbox.isSelected();\r\n\t\t\t} else {\r\n\t\t\t\toption.tracingEnabled = false;\r\n\t\t\t}\r\n\t\t\tif (limitTraceRadio.isSelected()) {\r\n\t\t\t\toption.maxTrace = Integer.parseInt(traceCountText.getText());\r\n\t\t\t} else {\r\n\t\t\t\toption.maxTrace = null;\r\n\t\t\t}\r\n\r\n\t\t\t// #SFIT\r\n\t\t\t// calls the correct script if the user selected more than 1 software fault\r\n\t\t\tString scriptToCall;\r\n\t\t\tObservableList<String> numFaultTypes = instIncludeListView.getItems();\r\n\t\t\tif (instTypeRadio.isSelected()) {\r\n\t\t\t\tnumFaultTypes = instIncludeListView.getItems();\r\n\t\t\t} else {\r\n\t\t\t\t// custom instruction is selected\r\n\t\t\t\tnumFaultTypes = FXCollections.observableArrayList(customInstCombo.getValue().toString());\r\n\t\t\t}\r\n\t\t\tController.selectedSoftwareFailures = numFaultTypes;\r\n\r\n\t\t\tif (Controller.isHardwareInjection || numFaultTypes.size() == 1) {\r\n\t\t\t\tscriptToCall = \"bin/instrument -lpthread --readable \";\r\n\t\t\t\tController.isBatchMode = false;\r\n\t\t\t} else {\r\n\t\t\t\t// run batch instrument instead\r\n\t\t\t\tscriptToCall = \"bin/batchInstrument --readable \";\r\n\t\t\t\tController.isBatchMode = true;\r\n\t\t\t}\r\n\r\n\t\t\tInputYaml input = new InputYaml();\r\n\r\n\t\t\t// if injection mode did not change, keep the same runtime option\r\n\t\t\tif (!injectionModeChanged() && previousRuntimeOption.size() != 0) {\r\n\t\t\t\tinput.setRuntimeOption(previousRuntimeOption);\r\n\t\t\t}\r\n\r\n\t\t\tinput.setInstrumentOption(option);\r\n\t\t\tinput.writeChanges(Controller.currentProgramFolder + \"/input.yaml\");\r\n\r\n\t\t\t// sets the ComboBox so that the user can select which result to display\r\n\t\t\t// when fault injection has completed\r\n\t\t\tif (Controller.isBatchMode) {\r\n\t\t\t\tObservableList<String> displayedFaultResult = FXCollections.observableArrayList(numFaultTypes);\r\n\t\t\t\tdisplayedFaultResult.add(0, \"All\");\r\n\t\t\t\tfiResultDisplay.setItems(displayedFaultResult);\r\n\t\t\t}\r\n\r\n\t\t\tString cmd = Controller.llfibuildPath\r\n\t\t\t\t\t+ scriptToCall + folderName + \"/\"\r\n\t\t\t\t\t+ folderName + \".ll\";\r\n\r\n\t\t\tProcessBuilder p = new ProcessBuilder(\"/bin/tcsh\", \"-c\", cmd);\r\n\t\t\tController.console.add(\"$ \" + cmd + \"\\n\");\r\n\r\n\t\t\tp.redirectErrorStream(true);\r\n\t\t\tProcess pr = p.start();\r\n\t\t\tBufferedReader in1 = new BufferedReader(new InputStreamReader(\r\n\t\t\t\t\tpr.getInputStream()));\r\n\r\n\t\t\tController.errorString = new ArrayList<>();\r\n\t\t\tString line1;\r\n\t\t\tboolean success = false;\r\n\r\n\t\t\twhile ((line1 = in1.readLine()) != null) {\r\n\t\t\t\t/*\r\n\t\t\t\t * if(line1.contains(\"Sucess\")) Controller.errorString = new\r\n\t\t\t\t * ArrayList<>(); else\r\n\t\t\t\t */\r\n\t\t\t\tController.console.add(line1);\r\n\t\t\t\tController.errorString.add(line1);\r\n\r\n\t\t\t\tif (line1.contains(\"error\") || line1.contains(\"Error\")\r\n\t\t\t\t\t\t|| line1.contains(\"ERROR\"))\r\n\t\t\t\t\terrorFlag = true;\r\n\r\n\t\t\t\t// c++ program is compiled with clang++, but the script will try clang\r\n\t\t\t\t// first, which would output some error(s)\r\n\t\t\t\tif (line1.contains(\"Success\") || line1.contains(\"success\") ) {\r\n\t\t\t\t\tsuccess = true;\r\n\t\t\t\t}\r\n\r\n\t\t\t}\r\n\t\t\tpr.waitFor();\r\n\t\t\tin1.close();\r\n\r\n\t\t\tif (errorFlag && !success) {\r\n\t\t\t\terrorFlag = false;\r\n\t\t\t\tNode source = (Node) event.getSource();\r\n\t\t\t\tStage stage = (Stage) source.getScene().getWindow();\r\n\t\t\t\tstage.close();\r\n\r\n\t\t\t\troot = FXMLLoader.load(getClass().getClassLoader().getResource(\r\n\t\t\t\t\t\t\"application/ErrorDisplay.fxml\"));\r\n\t\t\t\tstage = new Stage();\r\n\t\t\t\tstage.setTitle(\"Error\");\r\n\t\t\t\tstage.setScene(new Scene(root, 450, 100));\r\n\t\t\t\tstage.show();\r\n\r\n\t\t\t} else {\r\n\t\t\t\t// Generate the LLFI .ll file with index labelled.\r\n\t\t\t\t// use for the indexed injection\r\n\r\n\t\t\t\t// #SFIT\r\n\t\t\t\t// if batch mode, we need to go into one of the generated folder\r\n\t\t\t\t// to generated the indexed .ll file\r\n\t\t\t\tfileContent = new ArrayList<>();\r\n\t\t\t\tString line;\r\n\t\t\t\tFileReader inputIndexFile;\r\n\t\t\t\tFile outputIndexFile = new File(\r\n\t\t\t\t\t\tController.currentProgramFolder + \"/\"\r\n\t\t\t\t\t\t+ Controller.currentProgramFolder\r\n\t\t\t\t\t\t+ \"-llfi_displayIndex.ll\");\r\n\t\t\t\tif (!Controller.isBatchMode) {\r\n\t\t\t\t\tinputIndexFile = new FileReader(\r\n\t\t\t\t\t\t\tController.currentProgramFolder + \"/llfi/\"\r\n\t\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\r\n\t\t\t\t\t\t\t\t\t+ \"-llfi_index.ll\");\r\n\t\t\t\t} else {\r\n\t\t\t\t\t// if we are in software batch more, the location of the\r\n\t\t\t\t\t// files changes, so we need to copy files and move the location\r\n\t\t\t\t\t// of some files\r\n\t\t\t\t\tinputIndexFile = new FileReader(\r\n\t\t\t\t\t\t\tController.currentProgramFolder + \"/llfi-\"\r\n\t\t\t\t\t\t\t\t\t+ numFaultTypes.get(0) + \"/llfi/\"\r\n\t\t\t\t\t\t\t\t\t+ Controller.currentProgramFolder\r\n\t\t\t\t\t\t\t\t\t+ \"-llfi_index.ll\");\r\n\t\t\t\t\t// also copy the llfi.stat.totalindex.txt file out of (one of) the inner folder\r\n\t\t\t\t\tFiles.copy(\r\n\t\t\t\t\t\t\tPaths.get(Controller.currentProgramFolder + \"/llfi-\" + numFaultTypes.get(0) + \"/llfi.stat.totalindex.txt\"),\r\n\t\t\t\t\t\t\tPaths.get(Controller.currentProgramFolder + \"/llfi.stat.totalindex.txt\"));\r\n\t\t\t\t\t// also the dot file for batch trace graph\r\n\t\t\t\t\tFiles.copy(\r\n\t\t\t\t\t\t\tPaths.get(Controller.currentProgramFolder + \"/llfi-\" + numFaultTypes.get(0) + \"/llfi.stat.graph.dot\"),\r\n\t\t\t\t\t\t\tPaths.get(Controller.currentProgramFolder + \"/llfi.stat.graph.dot\"));\r\n\t\t\t\t}\r\n\r\n\t\t\t\t// reads llfi/<program>-llfi_index.ll\r\n\t\t\t\tBufferedReader bufferReader = new BufferedReader(inputIndexFile);\r\n\t\t\t\twhile ((line = bufferReader.readLine()) != null) {\r\n\t\t\t\t\tfileContent.add(line + \"\\n\");\r\n\t\t\t\t}\r\n\t\t\t\tbufferReader.close();\r\n\r\n\t\t\t\t// writes a modified, easily readable file (<program>-llfi_displayIndex.ll)\r\n\t\t\t\tBufferedWriter outputFile = new BufferedWriter(new FileWriter(outputIndexFile));\r\n\t\t\t\tlong index = 1;\r\n\t\t\t\tfor (int i = 0; i < fileContent.size(); i++) {\r\n\t\t\t\t\tString l = fileContent.get(i);\r\n\r\n\t\t\t\t\tif (l.contains(\"!llfi_index !\")) {\r\n\t\t\t\t\t\toutputFile.write(index++ + \"\\t\\t\" + l.substring(0, l.indexOf(\"!llfi_index !\")) + \"\\n\");\r\n\t\t\t\t\t} else if (!l.contains(\"= metadata !\")) {\r\n\t\t\t\t\t\toutputFile.write(\"\\t\\t\" + l);\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\toutputFile.close();\r\n\r\n\t\t\t\tController.cs.changeStateTo(State.INSTRUMENT_COMPLETED);\r\n\t\t\t\tController.errorString.clear();\r\n\r\n\t\t\t\tNode source = (Node) event.getSource();\r\n\t\t\t\tStage stage = (Stage) source.getScene().getWindow();\r\n\t\t\t\tstage.close();\r\n\t\t\t}\r\n\t\t} catch (IOException e) {\r\n\t\t\tSystem.err.println(\"ERROR: instrumentation failed!\");\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (InterruptedException e) {\r\n\t\t\tSystem.err.println(\"ERROR: instrumentation failed!\");\r\n\t\t\te.printStackTrace();\r\n\r\n\t\t}\r\n\t}\r\n\r\n\tprivate static enum InjectionType {\r\n\t\tHARDWARE, SOFTWARE, SOFTWARE_BATCH;\r\n\t}\r\n\r\n\tprivate boolean injectionModeChanged() {\r\n\t\tInjectionType current, previous;\r\n\r\n\t\t// first time doing instrument\r\n\t\tif (previousInstrumentOption == null) {\r\n\t\t\treturn true;\r\n\t\t}\r\n\r\n\t\t// compute current mode\r\n\t\tif (Controller.isHardwareInjection) {\r\n\t\t\tcurrent = InjectionType.HARDWARE;\r\n\t\t} else if (Controller.isBatchMode) {\r\n\t\t\tcurrent = InjectionType.SOFTWARE_BATCH;\r\n\t\t} else {\r\n\t\t\tcurrent = InjectionType.SOFTWARE;\r\n\t\t}\r\n\r\n\t\t// compute previous mode\r\n\t\tif (previousInstrumentOption.isHardwareFault) {\r\n\t\t\tprevious = InjectionType.HARDWARE;\r\n\t\t} else {\r\n\t\t\tif (previousInstrumentOption.includedInstruction.size() > 1) {\r\n\t\t\t\tprevious = InjectionType.SOFTWARE_BATCH;\r\n\t\t\t} else {\r\n\t\t\t\tprevious = InjectionType.SOFTWARE;\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tif (current == previous) {\r\n\t\t\treturn false;\r\n\t\t} else {\r\n\t\t\treturn true;\r\n\t\t}\r\n\t}\r\n\r\n\t@FXML\r\n\tprivate void onClickInstructionInclude(ActionEvent event) {\r\n\r\n\t\tincludeInstList = new ArrayList<>();\r\n\t\tif(instIncludeListView.getItems().size() > 0)\r\n\t\t{\r\n\t\t\tfor(int i = 0; i < instIncludeListView.getItems().size();i++ )\r\n\t\t\t{\r\n\t\t\t\tincludeInstList.add(instIncludeListView.getItems().get(i).toString());\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t\tremoveList = new ArrayList<>();\r\n\t\tif(instExcludeListView.getSelectionModel().getSelectedItems().size() > 0)\r\n\t\t{\r\n\t\t\tfor(int i = 0; i < instExcludeListView.getSelectionModel().getSelectedItems().size();i++ )\r\n\t\t\t{\r\n\t\t\t\tremoveList.add(instExcludeListView.getSelectionModel().getSelectedItems().get(i).toString());\r\n\t\t\t\tincludeInstList.add(instExcludeListView.getSelectionModel().getSelectedItems().get(i).toString());\r\n\t\t\t\t//instExcludeListView.getItems().remove(instExcludeListView.getSelectionModel().getSelectedItems().get(i));\r\n\t\t\t}\r\n\t\t}\r\n\t\titems =FXCollections.observableArrayList (includeInstList);\r\n\t\tinstIncludeListView.setItems(items);\r\n\t\tfor(int i = 0; i < removeList.size();i++ )\r\n\t\t{\r\n\t\t\t//System.out.println(\"selected items = \"+removeList.get(i));\r\n\t\t\tinstExcludeListView.getItems().remove(removeList.get(i));\r\n\t\t}\r\n\r\n\t}\r\n\t@FXML\r\n\tprivate void onClickInstructionExclude(ActionEvent event) {\r\n\r\n\r\n\t\texcludeInstList = new ArrayList<>();\r\n\t\tif(instExcludeListView.getItems().size() > 0)\r\n\t\t{\r\n\t\t\tfor(int i = 0; i < instExcludeListView.getItems().size();i++ )\r\n\t\t\t{\r\n\t\t\t\texcludeInstList.add(instExcludeListView.getItems().get(i).toString());\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t\tremoveList = new ArrayList<>();\r\n\t\tif(instIncludeListView.getSelectionModel().getSelectedItems().size() > 0)\r\n\t\t{\r\n\t\t\tfor(int i = 0; i < instIncludeListView.getSelectionModel().getSelectedItems().size();i++ )\r\n\t\t\t{\r\n\t\t\t\tremoveList.add(instIncludeListView.getSelectionModel().getSelectedItems().get(i).toString());\r\n\t\t\t\texcludeInstList.add(instIncludeListView.getSelectionModel().getSelectedItems().get(i).toString());\r\n\t\t\t}\r\n\t\t}\r\n\t\titems =FXCollections.observableArrayList (excludeInstList);\r\n\t\tinstExcludeListView.setItems(items);\r\n\t\tfor(int i = 0; i < removeList.size();i++ )\r\n\t\t{\r\n\t\t\tinstIncludeListView.getItems().remove(removeList.get(i));\r\n\t\t}\r\n\r\n\t}\r\n\r\n\r\n\t@FXML\r\n\tprivate void enableIncludeMultipleSelection(javafx.scene.input.MouseEvent event){\r\n\r\n\t\tinstIncludeListView.getSelectionModel().setSelectionMode(SelectionMode.MULTIPLE);\r\n\r\n\t}\r\n\t@FXML\r\n\tprivate void enableExcludeMultipleSelection(javafx.scene.input.MouseEvent event){\r\n\r\n\t\tinstExcludeListView.getSelectionModel().setSelectionMode(SelectionMode.MULTIPLE);\r\n\r\n\t}\r\n\r\n\t@FXML\r\n\tprivate void onSelectInstSelectRadio(ActionEvent event){\r\n\t\tif (customInstTypeRadio.isSelected()) {\r\n\t\t\tcustomInstCombo.setDisable(false);\r\n\t\t\tincludeLabel.setDisable(true);\r\n\r\n\t\t\tinstIncludeListView.setDisable(true);\r\n\t\t\tinstExcludeListView.setDisable(true);\r\n\t\t\tinstIncludeButton.setDisable(true);\r\n\t\t\tinstExcludeButton.setDisable(true);\r\n\r\n\t\t\tallCheckBox.setDisable(true);\r\n\t\t} else {\r\n\t\t\tcustomInstCombo.setDisable(true);\r\n\t\t\tincludeLabel.setDisable(false);\r\n\r\n\t\t\tinstIncludeListView.setDisable(false);\r\n\t\t\tinstExcludeListView.setDisable(false);\r\n\t\t\tinstIncludeButton.setDisable(false);\r\n\t\t\tinstExcludeButton.setDisable(false);\r\n\r\n\t\t\tallCheckBox.setDisable(false);\r\n\t\t}\r\n\t}\r\n\r\n\t@FXML\r\n\tprivate void onSelectRegSelectRadio(ActionEvent event) {\r\n\t\tif (customRegTypeRadio.isSelected() == true) {\r\n\t\t\tregCombo.setDisable(true);\r\n\t\t\tcustomRegCombo.setDisable(false);\r\n\t\t} else {\r\n\t\t\tregCombo.setDisable(false);\r\n\t\t\tcustomRegCombo.setDisable(true);\r\n\t\t}\r\n\t}\r\n\r\n\t@FXML\r\n\tprivate void onClickTraceOption(ActionEvent event) {\r\n\t\ttraceCountText.clear();\r\n\t\tif (noTraceRadio.isSelected()) {\r\n\t\t\tforwardCheckbox.setDisable(true);\r\n\t\t\tbackwardCheckbox.setDisable(true);\r\n\t\t\ttraceCountText.setDisable(true);\r\n\t\t\ttraceCountLabel.setDisable(true);\r\n\t\t} else if (limitTraceRadio.isSelected()) {\r\n\t\t\tforwardCheckbox.setDisable(false);\r\n\t\t\tbackwardCheckbox.setDisable(false);\r\n\t\t\ttraceCountText.setDisable(false);\r\n\t\t\ttraceCountLabel.setDisable(false);\r\n\t\t} else { // full trace\r\n\t\t\tforwardCheckbox.setDisable(false);\r\n\t\t\tbackwardCheckbox.setDisable(false);\r\n\t\t\ttraceCountText.setDisable(true);\r\n\t\t\ttraceCountLabel.setDisable(true);\r\n\t\t}\r\n\t}\r\n\r\n\t@FXML\r\n\tprivate void onClickSelectProfile(ActionEvent event){\r\n\t\tStage stage = new Stage();\r\n\r\n\t\tFileChooser fileChooser = new FileChooser();\r\n\t\tfileChooser.setTitle(\"Open an input.yaml file\");\r\n\t\tfileChooser.getExtensionFilters().addAll(new FileChooser.ExtensionFilter(\"YAML\", \"*.yaml\"));\r\n\r\n\t\tFile file = fileChooser.showOpenDialog(stage);\r\n\t\tif (file != null) {\r\n\t\t\tloadProfile(file);\r\n\t\t}\r\n\t}\r\n\r\n\tprivate void loadProfile(File file) {\r\n\t\tInputYaml parser = new InputYaml();\r\n\t\tparser.load(file);\r\n\t\tInstrumentOption option = parser.getInstrumentOption();\r\n\r\n\t\t// runtime option stays the same if\r\n\t\t// injection mode did not change\r\n\t\tpreviousInstrumentOption = option;\r\n\t\tpreviousRuntimeOption = parser.getRuntimeOptions();\r\n\r\n\t\t// set if hardware fault or not\r\n\t\tif (option.isHardwareFault) {\r\n\t\t\tController.isHardwareInjection = true;\r\n\t\t} else {\r\n\t\t\tController.isHardwareInjection = false;\r\n\t\t}\r\n\r\n\t\t// reset options to the correct ones\r\n\t\tresetAllOptions();\r\n\r\n\t\tif (option.isHardwareFault) {\r\n\t\t\t// set register\r\n\t\t\tif (option.customRegister) {\r\n\t\t\t\tregTypeRadio.setSelected(true);\r\n\t\t\t\tregCombo.setValue(option.regloc_OR_customRegSelector);\r\n\t\t\t} else {\r\n\t\t\t\tcustomRegTypeRadio.setSelected(true);\r\n\t\t\t\tcustomRegCombo.setValue(option.regloc_OR_customRegSelector);\r\n\t\t\t}\r\n\t\t\tonSelectRegSelectRadio(null);\r\n\t\t}\r\n\r\n\t\t// set selected instruction\r\n\t\tif (option.customInstruction) {\r\n\t\t\t// set state\r\n\t\t\tcustomInstTypeRadio.setSelected(true);\r\n\r\n\t\t\t// set value\r\n\t\t\tcustomInstCombo.setValue(option.includedInstruction.get(0));\r\n\t\t} else {\r\n\t\t\t// set state\r\n\t\t\tinstTypeRadio.setSelected(true);\r\n\r\n\t\t\t// find what the new excluded list is\r\n\t\t\tList<String> newExcludedList;\r\n\t\t\tList<String> newIncludedList = new ArrayList<String>();\r\n\t\t\tif (Controller.isHardwareInjection) {\r\n\t\t\t\tnewExcludedList = Controller.configReader.getInstruction();\r\n\t\t\t} else {\r\n\t\t\t\tnewExcludedList = getApplicableSoftwareFailures();\r\n\t\t\t}\r\n\t\t\tfor (String exclude: instExcludeListView.getItems()) {\r\n\t\t\t\tfor (String include: option.includedInstruction) {\r\n\t\t\t\t\tif (include.equals(exclude.split(\"-\")[0])) {\r\n\t\t\t\t\t\tnewExcludedList.remove(exclude);\r\n\t\t\t\t\t\tnewIncludedList.add(exclude);\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\t// set excluded list\r\n\t\t\tinstExcludeListView.setItems(FXCollections.observableArrayList(newExcludedList));\r\n\t\t\t// set included list\r\n\t\t\tinstIncludeListView.setItems(FXCollections.observableArrayList(newIncludedList));\r\n\t\t}\r\n\t\tonSelectInstSelectRadio(null);\r\n\r\n\t\t// reset all checkbox #TODO does this run the allcheckbox function???\r\n\t\tallCheckBox.setSelected(false);\r\n\r\n\t\t// reset trace state\r\n\t\tif (option.maxTrace != null) {\r\n\t\t\tlimitTraceRadio.setSelected(true);\r\n\t\t} else if (option.tracingEnabled) {\r\n\t\t\tfullTraceRadio.setSelected(true);\r\n\t\t} else {\r\n\t\t\tnoTraceRadio.setSelected(true);\r\n\t\t}\r\n\t\tonClickTraceOption(null);\r\n\r\n\t\t// set trace count if exists\r\n\t\tif (option.maxTrace != null) {\r\n\t\t\ttraceCountText.setText(\"\" + option.maxTrace.intValue());\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Should be called 'reset profile' instead\r\n\t * @param event\r\n\t */\r\n\t@FXML\r\n\tprivate void onClickCreateNewProfile(ActionEvent event)\r\n\t{\r\n\t\tFile yFile = new File(folderName+\"/input.yaml\");\r\n\t\tyFile.delete();\r\n\t\tresetAllOptions();\r\n\t\tController.cs.changeStateTo(State.COMPILE_COMPLETED);\r\n\t}\r\n\r\n\t@FXML\r\n\tpublic void onClickAll(ActionEvent e) {\r\n\t\tresetInstList(allCheckBox.isSelected());\r\n\t}\r\n\r\n\t/**\r\n\t * Reset selection boxes to all software or hardware fault, depending on state of\r\n\t * @param isSelectAll - this variable\r\n\t */\r\n\tprivate void resetInstList(boolean isSelectAll) {\r\n\t\tList<String> allApplicableFaults;\r\n\t\tif (Controller.isHardwareInjection) {\r\n\t\t\tallApplicableFaults = Controller.configReader.getInstruction();\r\n\t\t} else {\r\n\t\t\tallApplicableFaults = getApplicableSoftwareFailures();\r\n\t\t}\r\n\t\titems = FXCollections.observableArrayList(allApplicableFaults);\r\n\t\tObservableList<String> blankList = FXCollections\r\n\t\t\t\t.observableArrayList(new ArrayList<String>());\r\n\r\n\t\tif (isSelectAll) {\r\n\t\t\tinstExcludeListView.setItems(blankList);\r\n\t\t\tinstIncludeListView.setItems(items);\r\n\t\t} else {\r\n\t\t\tinstIncludeListView.setItems(blankList);\r\n\t\t\tinstExcludeListView.setItems(items);\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Reset the InstrumentOption to reflect hardware or software injection.\r\n\t */\r\n\tprivate void resetAllOptions() {\r\n\t\t// RESETS / REREADS ALL FILES\r\n\r\n\t\t// reset register list\r\n\t\tregisterList = Controller.configReader.getRegister();\r\n\t\tregCombo.setItems(FXCollections.observableArrayList(registerList));\r\n\t\tregCombo.setPromptText(\"-- Select --\");\r\n\r\n\t\t// reset custom register list\r\n\t\tcustomRegList = Controller.configReader.getCustomRegister();\r\n\t\tcustomRegCombo.setItems(FXCollections.observableArrayList(customRegList));\r\n\t\tcustomRegCombo.setPromptText(\"-- Select --\");\r\n\r\n\t\t// reset the included/excluded instruction list\r\n\t\tresetInstList(false);\r\n\r\n\t\t// reset custom instruction list\r\n\t\tif (Controller.isHardwareInjection) {\r\n\t\t\tcustomInstList = Controller.configReader.getCustomInstruction();\r\n\t\t} else {\r\n\t\t\tcustomInstList = new ArrayList<String>();\r\n\t\t}\r\n\t\tcustomInstCombo.setItems(FXCollections.observableArrayList(customInstList));\r\n\t\tcustomInstCombo.setPromptText(\"-- Select --\");\r\n\r\n\t\t// RESET STATES\r\n\r\n\t\t// reset all checkbox\r\n\t\tallCheckBox.setSelected(false);\r\n\r\n\t\t// reset included / excluded inst\r\n\t\tinstTypeRadio.setSelected(true);\r\n\t\tonSelectInstSelectRadio(null);\r\n\r\n\t\t// reset register selection state\r\n\t\tregTypeRadio.setSelected(true);\r\n\t\tonSelectRegSelectRadio(null);\r\n\r\n\t\t// reset trace state\r\n\t\tfullTraceRadio.setSelected(true);\r\n\t\tonClickTraceOption(null);\r\n\r\n\t\t// switch display between hardware/software injection\r\n\t\tchangeInjectionDisplay(Controller.isHardwareInjection);\r\n\t}\r\n\r\n\t/**\r\n\t * #SFIT opens llfi.applicable.software.failures.txt and read it into the list.\r\n\t * If the file does not exist, it will generate one.\r\n\t * @return - the list of applicable software failures\r\n\t */\r\n\t@SuppressWarnings(\"unchecked\")\r\n\tprivate List<String> getApplicableSoftwareFailures() {\r\n\t\tFileReader applicableSoftwareFailure = null;\r\n\t\tString inputLocation = folderName + \"/llfi.applicable.software.failures.txt\";\r\n\t\ttry {\r\n\t\t\tapplicableSoftwareFailure = new FileReader(inputLocation);\r\n\t\t} catch (FileNotFoundException e) {\r\n\t\t\t// file not found, generate one\r\n\t\t\tController.softwareFailureAutoScan();\r\n\t\t\ttry {\r\n\t\t\t\tapplicableSoftwareFailure = new FileReader(inputLocation);\r\n\t\t\t} catch (FileNotFoundException e1) {\r\n\t\t\t\tSystem.err.println(\"ERROR: Unable to read \" + inputLocation);\r\n\t\t\t\te1.printStackTrace();\r\n\t\t\t\treturn null;\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tYaml y = new Yaml();\r\n\t\tMap<String, Object> config = (Map<String, Object>) y.load(applicableSoftwareFailure);\r\n\r\n\t\tif (config.get(\"instSelMethod\") == null) {\r\n\t\t\treturn new ArrayList<String>();\r\n\t\t} else {\r\n\t\t\treturn (List<String>) config.get(\"instSelMethod\");\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Change GUI state to Hardware Fault Injection\r\n\t * @param e\r\n\t */\r\n\t@FXML\r\n\tprivate void onClickChangeInjectionType(ActionEvent e) {\r\n\t\tif (hardware.isSelected()) {\r\n\t\t\tController.isHardwareInjection = true;\r\n\t\t} else {\r\n\t\t\tController.isHardwareInjection = false;\r\n\t\t}\r\n\t\tresetAllOptions();\r\n\t}\r\n\r\n\tprivate void changeInjectionDisplay(boolean isHardwareInjection) {\r\n\t\tif (!isHardwareInjection) {\r\n\t\t\tsoftware.setSelected(true);\r\n\r\n\t\t\t// makes these boxes disappear\r\n\t\t\tcustomRegTypeRadio.setVisible(false);\r\n\t\t\tregisterSelectionMethodLabel.setVisible(false);\r\n\t\t\tseparator.setVisible(false);\r\n\t\t\tregTypeRadio.setVisible(false);\r\n\t\t\tregCombo.setVisible(false);\r\n\t\t\tcustomRegCombo.setVisible(false);\r\n\r\n\t\t\tinstTypeRadio.setVisible(false);\r\n\t\t\tcustomInstCombo.setVisible(false);\r\n\t\t\tcustomInstTypeRadio.setVisible(false);\r\n\t\t} else {\r\n\t\t\thardware.setSelected(true);\r\n\r\n\t\t\t// make these boxes appear\r\n\t\t\tcustomRegTypeRadio.setVisible(true);\r\n\t\t\tregisterSelectionMethodLabel.setVisible(true);\r\n\t\t\tseparator.setVisible(true);\r\n\t\t\tregTypeRadio.setVisible(true);\r\n\t\t\tregCombo.setVisible(true);\r\n\t\t\tcustomRegCombo.setVisible(true);\r\n\r\n\t\t\tinstTypeRadio.setVisible(true);\r\n\t\t\tcustomInstCombo.setVisible(true);\r\n\t\t\tcustomInstTypeRadio.setVisible(true);\r\n\t\t}\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void initialize(URL url, ResourceBundle rb) {\r\n\t\tfolderName = Controller.currentProgramFolder;\r\n\r\n\t\t// load profile if exist (user is re-instrumenting)\r\n\t\tFile f = new File(Controller.currentProgramFolder + \"/input.yaml\");\r\n\r\n\t\tif(f.exists()) {\r\n\t\t\tcreateNewProfileButton.setDisable(false);\r\n\t\t\tfileContent = new ArrayList<>();\r\n\r\n\t\t\tloadProfile(f);\r\n\t\t} else {\r\n\t\t\tresetAllOptions();\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * fiResultDisplay from Controller.class needs to be passed to this controller\r\n\t * after initialization. See http://stackoverflow.com/questions/14187963/passing-parameters-javafx-fxml\r\n\t * @param fiResultDisplay\r\n\t */\r\n\tpublic void initFiResultDisplay(ComboBox<String> fiResultDisplay) {\r\n\t\tthis.fiResultDisplay = fiResultDisplay;\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.7442799210548401, "alphanum_fraction": 0.7483176589012146, "avg_line_length": 29.95833396911621, "blob_id": "fd1fca16de2086840eb1cc1f827a4211a4c28284", "content_id": "4028b584fba3a1650ce639526cf46c06a6facfbc", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 743, "license_type": "permissive", "max_line_length": 83, "num_lines": 24, "path": "/config/llvm_passes.cmake", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "# TODO: support installed LLVM and LLFI installation\n\ncmake_minimum_required(VERSION 2.8)\n\ninclude(../config/llvm_paths.cmake)\nset(LLVM_ON_UNIX 1)\n\nif (NOT EXISTS ${LLVM_DST_ROOT}/include/llvm)\n message(FATAL_ERROR \"LLVM_DST_ROOT (${LLVM_DST_ROOT}) is not a valid LLVM build\")\nendif()\n\nset(LLVM_INCLUDE_DIRS_LLFI ${LLVM_DST_ROOT}/include ${LLVM_SRC_ROOT}/include)\nset(LLVM_LIBRARY_DIRS_LLFI ${LLVM_DST_ROOT}/lib)\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} \"${LLVM_DST_ROOT}/share/llvm/cmake\")\n\ninclude(LLVMConfig)\ninclude(AddLLVM)\n\nadd_definitions(-D__STDC_CONSTANT_MACROS)\nadd_definitions(-D__STDC_LIMIT_MACROS)\n\nadd_definitions(${LLVM_DEFINITIONS})\ninclude_directories(${LLVM_INCLUDE_DIRS_LLFI})\nlink_directories(${LLVM_LIBRARY_DIRS_LLFI})\n" }, { "alpha_fraction": 0.4714912176132202, "alphanum_fraction": 0.5438596606254578, "avg_line_length": 25.823530197143555, "blob_id": "d66a0ffec323ed08017fb1feecbf7022c3a7b9b2", "content_id": "70fdbfd8c6a7cce7abdd43fa35fb87937c6d954a", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 912, "license_type": "permissive", "max_line_length": 75, "num_lines": 34, "path": "/test_suite/PROGRAMS/mcf/pbla.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nPBLA.H of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Sun Nov 21 16:22:19 2004 by Andreas Loebel (boss.local.de) */\n/* $Id: pbla.h,v 1.10 2005/02/17 19:42:21 bzfloebe Exp $ */\n\n\n\n#ifndef _PBLA_H\n#define _PBLA_H\n\n\n#include \"defines.h\"\n\n\nextern node_t *primal_iminus _PROTO_(( flow_t *, long *, node_t *, \n node_t *, node_t ** ));\n\n\n#endif\n" }, { "alpha_fraction": 0.6335460543632507, "alphanum_fraction": 0.6360984444618225, "avg_line_length": 34.3870964050293, "blob_id": "abd28c028a31d23bb3fbc2976bef4e163d9aab77", "content_id": "7c05100f1f114625ee4609ef2f1879fe0aa16291", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5485, "license_type": "permissive", "max_line_length": 207, "num_lines": 155, "path": "/llvm_passes/core/ProfilingPass.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "//===- profilingpass.cpp - Dynamic Instruction Profiling Pass -==//\n//\n// LLFI Distribution\n//\n// This file is distributed under the University of Illinois Open Source\n// License. See LICENSE.TXT for details.\n//\n//===----------------------------------------------------------------------===//\n// The trace function is a C function which increments the count \n// when the function is executed\n// See profiling_lib.c doProfiling() function for more details. This function \n// definition is linked to the instrumented bitcode file (after this pass). \n//===----------------------------------------------------------------------===//\n\n#include \"llvm/IR/DerivedTypes.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/Support/raw_ostream.h\"\n//BEHROOZ:\n#include \"llvm/Support/CommandLine.h\"\n\n\n#include <list>\n#include <map>\n#include <vector>\n\n#include \"ProfilingPass.h\"\n#include \"Controller.h\"\n#include \"Utils.h\"\n\nusing namespace llvm;\n\nnamespace llfi {\n\n//BEHROOZ: \nextern cl::opt< std::string > llfilogfile;\n\nbool ProfilingPass::runOnModule(Module &M) {\n\tLLVMContext &context = M.getContext();\n\n std::map<Instruction*, std::list< int >* > *fi_inst_regs_map;\n Controller *ctrl = Controller::getInstance(M);\n ctrl->getFIInstRegsMap(&fi_inst_regs_map);\n //BEHROOZ: \n std::string err;\n raw_fd_ostream logFile(llfilogfile.c_str(), err, sys::fs::F_Append);\n\n for (std::map<Instruction*, std::list< int >* >::const_iterator \n inst_reg_it = fi_inst_regs_map->begin(); \n inst_reg_it != fi_inst_regs_map->end(); ++inst_reg_it) {\n Instruction *fi_inst = inst_reg_it->first;\n std::list<int > *fi_regs = inst_reg_it->second;\n /*BEHROOZ: This section makes sure that we do not instrument the intrinsic functions*/ \n if(isa<CallInst>(fi_inst)){\n bool continue_flag=false;\n for (std::list<int>::iterator reg_pos_it_mem = fi_regs->begin();\n (reg_pos_it_mem != fi_regs->end()) && (*reg_pos_it_mem != DST_REG_POS); ++reg_pos_it_mem) {\n std::string reg_mem = fi_inst->getOperand(*reg_pos_it_mem)->getName();\n if ((reg_mem.find(\"memcpy\") != std::string::npos) || (reg_mem.find(\"memset\") != std::string::npos) || (reg_mem.find(\"expect\") != std::string::npos) || (reg_mem.find(\"memmove\") != std::string::npos)){\n logFile << \"LLFI cannot instrument \" << reg_mem << \" intrinsic function\"<< \"\\n\";\n continue_flag=true;\n break;\n }\n }\n if(continue_flag)\n continue;\n }\n /*BEHROOZ: This is to make sure we do not instrument landingpad instructions.*/\n std::string current_opcode = fi_inst->getOpcodeName();\n if(current_opcode.find(\"landingpad\") != std::string::npos){\n logFile << \"LLFI cannot instrument \" << current_opcode << \" instruction\" << \"\\n\";\n continue;\n }\n\n\n Value *fi_reg = *(fi_regs->begin())==DST_REG_POS ? fi_inst : (fi_inst->getOperand(*(fi_regs->begin())));\n Instruction *insertptr = getInsertPtrforRegsofInst(fi_reg, fi_inst);\n \n // function declaration\n Constant* profilingfunc = getLLFILibProfilingFunc(M);\n\n // prepare for the calling argument and call the profiling function\n std::vector<Value*> profilingarg(1);\n const IntegerType* itype = IntegerType::get(context, 32);\n\n //LLVM 3.3 Upgrading\n IntegerType* itype_non_const = const_cast<IntegerType*>(itype);\n Value* opcode = ConstantInt::get(itype_non_const, fi_inst->getOpcode());\n profilingarg[0] = opcode; \n ArrayRef<Value*> profilingarg_array_ref(profilingarg);\n\n CallInst::Create(profilingfunc, profilingarg_array_ref,\n \"\", insertptr);\n }\n\n //BEHROOZ: \n logFile.close();\n\n addEndProfilingFuncCall(M);\n return true;\n}\n\n\nvoid ProfilingPass::addEndProfilingFuncCall(Module &M) {\n Function* mainfunc = M.getFunction(\"main\");\n if (mainfunc != NULL) {\n Constant *endprofilefunc = getLLFILibEndProfilingFunc(M);\n\n // function call\n std::set<Instruction*> exitinsts;\n getProgramExitInsts(M, exitinsts);\n assert (exitinsts.size() != 0 \n && \"Program does not have explicit exit point\");\n\n for (std::set<Instruction*>::iterator it = exitinsts.begin();\n it != exitinsts.end(); ++it) {\n Instruction *term = *it;\n CallInst::Create(endprofilefunc, \"\", term);\n }\n } else {\n errs() << \"ERROR: Function main does not exist, \" << \n \"which is required by LLFI\\n\";\n exit(1);\n }\n}\n\nConstant *ProfilingPass::getLLFILibProfilingFunc(Module &M) {\n\tLLVMContext& context = M.getContext();\n std::vector<Type*> paramtypes(1);\n paramtypes[0] = Type::getInt32Ty(context);\n\n // LLVM 3.3 Upgrading\n ArrayRef<Type*> paramtypes_array_ref(paramtypes);\n\n FunctionType* profilingfunctype = FunctionType::get(\n Type::getVoidTy(context), paramtypes_array_ref, false);\n Constant *profilingfunc = M.getOrInsertFunction(\n \"doProfiling\", profilingfunctype);\n return profilingfunc;\n}\n\nConstant *ProfilingPass::getLLFILibEndProfilingFunc(Module &M) {\n LLVMContext& context = M.getContext();\n FunctionType* endprofilingfunctype = FunctionType::get(\n Type::getVoidTy(context), false);\n Constant *endprofilefunc = M.getOrInsertFunction(\"endProfiling\", \n endprofilingfunctype);\n return endprofilefunc;\n}\n\nstatic RegisterPass<ProfilingPass> X(\"profilingpass\", \n \"Profiling pass\", false, false);\n}\n" }, { "alpha_fraction": 0.635456383228302, "alphanum_fraction": 0.6396298408508301, "avg_line_length": 35.01960754394531, "blob_id": "f477546ca2e927c6fe4fb905e53df51ae81e837b", "content_id": "c264564ad0b492da3146e795e5d8a7a6789e5ab1", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5511, "license_type": "permissive", "max_line_length": 175, "num_lines": 153, "path": "/bin/SoftwareFailureAutoScan.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n\"\"\"\n%(prog)s takes a single IR file as input and scan all instructions to find potential applicable target points for fault injection, and to create applicable failure modes list.\n\nUsage: %(prog)s [OPTIONS] <source IR file>\n\nList of options:\n\n-outputfilename=<filename>: set the name of the file that stores the list of applicable software failures (default: llfi.applicable.software.failures.txt)\n Note: If <filename> is a relative path instead of an absolute path, the base path of <filename> will be the path of the targeting IR file instead of the calling path.\n \n-numOfRuns <number of runs>: set the number of runs for each found failure mode (default: 1)\n--enable_tracing: enable tracing\n--enable_forward_injection: enable injection on the forward slice of the selected injection point\n--enable_backward_injection: enable injection on the backward slice of the selected injection point\n--no_input_yaml: do not generate an master input.yaml automatically.\n--help: print this message.\n\n\"\"\"\n\n\nimport os\nimport subprocess\nimport sys\nfrom subprocess import call\nimport yaml\n\nscript_path = os.path.realpath(os.path.dirname(__file__))\nsys.path.append(os.path.join(script_path, '../config'))\nimport llvm_paths\n\n\noptbin = os.path.join(llvm_paths.LLVM_DST_ROOT, \"bin/opt\")\nllcbin = os.path.join(llvm_paths.LLVM_DST_ROOT, \"bin/llc\")\nllfipasses = os.path.join(script_path, \"../llvm_passes/llfi-passes.so\")\nllfilinklib = os.path.join(script_path, \"../runtime_lib\")\nprog = os.path.basename(sys.argv[0])\n# option list for AutoScan pass\noptions = []\n# output file name of AutoScan pass\nfilename = \"llfi.applicable.software.failures.txt\"\n# directory of the target IR\nbasedir = \"\"\n# input.yaml generation\nrun_num_dict = {'numOfRuns': 1}\ntracing_dict = {'tracingPropagation':False, 'tracingPropagationOption':{'generateCDFG':False}}\ntrace_injection_dict = {'includeInjectionTrace':[]}\n\nno_input_yaml_flag = False\n\ndef parseArgs(args):\n global basedir\n global options\n global filename\n global no_input_yaml_flag\n \n cwd = os.getcwd()\n for i, arg in enumerate(args):\n option = arg\n if os.path.isfile(arg):\n basedir = os.path.realpath(os.path.dirname(arg))\n option = os.path.basename(arg)\n options.append(option)\n elif arg.startswith('-outputfilename='):\n filename = arg.split('-outputfilename=')[-1]\n options.append('-softwarescan_outputfilename='+filename)\n elif arg == \"-numOfRuns\":\n run_num_dict['numOfRuns'] = int(args[i+1])\n elif arg == \"--enable_tracing\":\n tracing_dict['tracingPropagation'] = True\n tracing_dict['tracingPropagationOption']['generateCDFG'] = True\n elif arg == \"--enable_backward_injection\":\n trace_injection_dict['includeInjectionTrace'].append('forward')\n elif arg == \"--enable_forward_injection\":\n trace_injection_dict['includeInjectionTrace'].append('backward')\n elif arg == \"--no_input_yaml\":\n no_input_yaml_flag = True\n os.chdir(basedir)\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = 1\n msg = \"ERROR: \" + msg\n print(msg, file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(retval)\n\ndef runAutoScan(args):\n global filename\n execlist = [optbin , \"-load\", llfipasses, \"-genllfiindexpass\", \"-SoftwareFailureAutoScanPass\", \"-analyze\"]\n execlist.extend(args)\n print(' '.join(execlist))\n p = subprocess.Popen(execlist)\n p.wait()\n if p.returncode != 0:\n print(\"ERROR: Software Auto scan pass return code !=0\\n\")\n exit(p.returncode)\n elif os.path.isfile(os.path.join(basedir, filename)) == False:\n print(\"ERROR: No output file found at: \"+os.path.join(basedir, filename)+\"!\\n\")\n exit(1)\n return 0\n\ndef generateInputYaml():\n global filename\n selector_list = []\n with open(os.path.join(basedir, filename)) as f:\n for line in f.readlines()[1:]:\n selector_list.append(line.split('-')[-1].strip())\n customInstselector_dict = {'customInstselector':{'include':selector_list}}\n yaml_dict = {\n 'compileOption':{\n 'instSelMethod':[customInstselector_dict],\n 'regSelMethod':'customregselector',\n 'customRegSelector':'Automatic',\n },\n 'runOption':[{\n 'run':{\n 'fi_type':'AutoInjection'\n }\n }]\n }\n yaml_dict['compileOption'].update(tracing_dict)\n yaml_dict['compileOption'].update(trace_injection_dict)\n yaml_dict['runOption'][0]['run'].update(run_num_dict)\n yaml_text = yaml.dump(yaml_dict, default_flow_style=False)\n with open(os.path.join(basedir, 'input.yaml'), 'w') as f:\n f.write(yaml_text)\n return 0\n\ndef cleanDir():\n global basedir\n stale_config_file_path = os.path.join(basedir, 'llfi.config.compiletime.txt')\n if os.path.isfile(stale_config_file_path):\n os.remove(stale_config_file_path)\n\ndef main(args):\n global no_input_yaml_flag\n\n parseArgs(args)\n r = runAutoScan(options)\n if no_input_yaml_flag == False:\n s = generateInputYaml()\n cleanDir()\n return 0\n\nif __name__ == \"__main__\":\n if len(sys.argv[1:]) < 1 or sys.argv[1] == '--help' or sys.argv[1] == '-h':\n usage()\n sys.exit(0)\n r = main(sys.argv[1:])\n sys.exit(r)\n" }, { "alpha_fraction": 0.7378516793251038, "alphanum_fraction": 0.7455242872238159, "avg_line_length": 42.27777862548828, "blob_id": "08fee5d49d06d0982c144ec92dddf46296905fc6", "content_id": "02562cad012e4862349eea476f32b63190d4c445", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 782, "license_type": "permissive", "max_line_length": 127, "num_lines": 18, "path": "/web-app/README.MD", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "Dependencies:\nNodejs.\nwebpack \n\nSteps to set up the development environment: \n1: Download this project from Git \n2: Download NodeJs \n3: Install libraries: Go to the web-app directory and run \"npm install\" \n4: Install Webpack: In the same directory as step 3, run \"sudo npm install -g webpack\" \n5: Configurate the LLFI root path for the server: \nThe default bevaiour of the program use environment variable $llfibuild as the path of the llfi build directory \nYou can set the environment variable llfibuild in your system to point it to the LLFI build directory in your local machine. \n\nStart the server: \nGo to the /web-app/server folder and run \"node server.js\" \n\nStart the front-end dev tool: \nGo to the web-app directory and run \"webpack\" or \"webpack -w\" \n" }, { "alpha_fraction": 0.6761633157730103, "alphanum_fraction": 0.6780626773834229, "avg_line_length": 30, "blob_id": "6f06c68d1e695d5ff34ba869df72153574deb8c5", "content_id": "e20be57a1e3bb0c5ff885095d2826aa77e43313c", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1053, "license_type": "permissive", "max_line_length": 96, "num_lines": 34, "path": "/web-app/views/src/js/stores/fileUploadStore.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\nvar fileUploadActions = require(\"./../actions/fileUploadActions\");\nvar fileList = [];\nvar fileName ='';\nvar fileUploadStore = Reflux.createStore({\n\tlistenables: [fileUploadActions],\n\t// Adding a single file to store\n\tonAddFile: function(file) {\n\t\tfileList.push({\n\t\t\tfileName: file.fileName,\n\t\t\tfileContent: file.fileContent\n\t\t});\n\t\tthis.trigger(fileList);\n\t},\n\t// Adding an array of files to store\n\tonAddFiles: function(files) {\n\t\tfor (var i = 0; i < files.length; i++) {\n\t\t\tif (fileList.findIndex(file => file.fileName == files[i].fileName) >= 0) {\n\t\t\t\t// Replace the existing file if a file is already loaded\n\t\t\t\tvar existingIndex = fileList.findIndex(file => file.fileName == files[i].fileName);\n\t\t\t\tfileList[existingIndex] = { fileName: files[i].fileName, fileContent: files[i].fileContent};\n\t\t\t} else {\n\t\t\t\t// Add the new file to the list\n\t\t\t\tfileList.push({\n\t\t\t\t\tfileName: files[i].fileName,\n\t\t\t\t\tfileContent: files[i].fileContent\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t\tthis.trigger(fileList);\n\t}\n});\n\nmodule.exports = fileUploadStore;" }, { "alpha_fraction": 0.6684160232543945, "alphanum_fraction": 0.6684160232543945, "avg_line_length": 40.939998626708984, "blob_id": "47ce05dc51691cb155b3b6186e9b61fa78b5af9d", "content_id": "5584280b4e456810649ee448aea8bcd7cd63bdf5", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2096, "license_type": "permissive", "max_line_length": 139, "num_lines": 50, "path": "/web-app/views/src/js/components/mainWindow/bottomPannel/outputSummary.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar ProfilingStatus = require(\"./outputTabs/profilingStatus\");\nvar FaultInjectionStatus = require(\"./outputTabs/faultInjectionStatus\");\nvar FaultSummary = require(\"./outputTabs/faultSummary\");\nvar ErrorDisplay = require(\"./outputTabs/errorDisplay\");\nvar Console = require(\"./outputTabs/console\");\n\n\nvar OutputSummary = React.createClass({\n\tgetInitialState: function(){\n\t\treturn {\n\t\t\tselectedTab: 'profilingStatus'\n\t\t};\n\t},\n\trender: function() {\n\t\treturn (\n\t\t\t<div class=\"outputSummaryWindow\">\n\t\t\t\t<ul class=\"nav nav-tabs outputTabs\">\n\t\t\t\t\t<li class={this.state.selectedTab === \"profilingStatus\" ? \"active\" : ''} onClick={()=> this.onTabChange(\"profilingStatus\")}>\n\t\t\t\t\t\t<a>Profiling Status</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li class={this.state.selectedTab === \"faultInjectionStatus\" ? \"active\" : ''} onClick={()=> this.onTabChange(\"faultInjectionStatus\")}>\n\t\t\t\t\t\t<a>Fault Injection Status</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li class={this.state.selectedTab === \"faultSummary\" ? \"active\" : ''} onClick={()=> this.onTabChange(\"faultSummary\")}>\n\t\t\t\t\t\t<a>Fault Summary</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li class={this.state.selectedTab === \"errorDisplay\" ? \"active\" : ''} onClick={()=> this.onTabChange(\"errorDisplay\")}>\n\t\t\t\t\t\t<a>Error Display</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li class={this.state.selectedTab === \"console\" ? \"active\" : ''} onClick={()=> this.onTabChange(\"console\")}>\n\t\t\t\t\t\t<a>Console</a>\n\t\t\t\t\t</li>\n\t\t\t\t</ul>\n\t\t\t\t<ProfilingStatus shouldDisplay={this.state.selectedTab === \"profilingStatus\" ? true : false}></ProfilingStatus>\n\t\t\t\t<FaultInjectionStatus shouldDisplay={this.state.selectedTab === \"faultInjectionStatus\" ? true : false}></FaultInjectionStatus>\n\t\t\t\t<FaultSummary shouldDisplay={this.state.selectedTab === \"faultSummary\" ? true : false}></FaultSummary>\n\t\t\t\t<ErrorDisplay shouldDisplay={this.state.selectedTab === \"errorDisplay\" ? true : false}></ErrorDisplay>\n\t\t\t\t<Console shouldDisplay={this.state.selectedTab === \"console\" ? true : false}></Console>\n\t\t\t</div>\n\t\t);\n\t},\n\tonTabChange: function(tabName) {\n\t\tthis.setState({\n\t\t\tselectedTab: tabName\n\t\t});\n\t},\n});\n\nmodule.exports = OutputSummary;" }, { "alpha_fraction": 0.5883466005325317, "alphanum_fraction": 0.5891032814979553, "avg_line_length": 30.464284896850586, "blob_id": "483c646f6f21dd0f86605a475035717edf1657d2", "content_id": "d6627cfffdbbe19a7c9648f1c92b220a220f11b2", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2643, "license_type": "permissive", "max_line_length": 100, "num_lines": 84, "path": "/llvm_passes/core/FIRegSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/Type.h\"\n#include \"llvm/Support/Debug.h\"\n#include \"llvm/Support/CommandLine.h\"\n#include \"llvm/Support/raw_ostream.h\"\n\n#include \"FIRegSelector.h\"\n\nusing namespace llvm;\n\nnamespace llfi {\n\nextern cl::opt< std::string > llfilogfile;\n\nvoid FIRegSelector::getFIInstRegMap(\n const std::set< Instruction* > *instset, \n std::map<Instruction*, std::list< int >* > *instregmap) {\n std::string err;\n raw_fd_ostream logFile(llfilogfile.c_str(), err, sys::fs::F_Append);\n\n for (std::set<Instruction*>::const_iterator inst_it = instset->begin();\n inst_it != instset->end(); ++inst_it) {\n Instruction *inst = *inst_it;\n std::list<int> *reglist = new std::list<int>();\n // dstination register\n if (isRegofInstFITarget(inst, inst)) {\n if (isRegofInstInjectable(inst, inst))\n reglist->push_back(DST_REG_POS);\n else if (err == \"\") {\n logFile << \"LLFI cannot inject faults in destination reg of \" << *inst\n << \"\\n\";\n }\n }\n // source register\n int pos = 0;\n for (User::op_iterator op_it = inst->op_begin(); op_it != inst->op_end();\n ++op_it, ++pos) {\n Value *src = *op_it;\n if (isRegofInstFITarget(src, inst, pos)) {\n if (isRegofInstInjectable(src, inst)) {\n reglist->push_back(pos);\n //dbgs()<<\"srcreg \"<<\" inst:\"<<*inst<<\" reg:\"<<*inst->getOperand(pos)<<\" pos:\"<<pos<<\"\\n\";\n } else if (err == \"\") {\n logFile << \"LLFI cannot inject faults in source reg \";\n if (isa<BasicBlock>(src)) \n logFile << src->getName();\n else\n logFile << *src;\n logFile << \" of instruction \" << *inst << \"\\n\";\n }\n }\n }\n \n if (reglist->size() != 0) {\n instregmap->insert(\n std::pair<Instruction*, std::list< int >* >(inst, reglist));\n } else if (err == \"\") {\n logFile << \"The selected instruction \" << *inst << \n \"does not have any valid registers for fault injection\\n\";\n }\n }\n logFile.close();\n}\n\nbool FIRegSelector::isRegofInstInjectable(Value *reg, Instruction *inst) {\n // TODO: keep updating\n // if we find anything that can be covered, remove them from the checks\n // if we find new cases that we cannot handle, add them to the checks\n if (reg == inst) {\n if (inst->getType()->isVoidTy() || isa<TerminatorInst>(inst)) {\n return false;\n }\n } else {\n if (isa<BasicBlock>(reg) || isa<PHINode>(inst))\n return false;\n }\n return true;\n}\n\nbool FIRegSelector::isRegofInstFITarget(Value* reg, Instruction* inst, int pos){\n return isRegofInstFITarget(reg, inst);\n}\n\n}\n" }, { "alpha_fraction": 0.640141487121582, "alphanum_fraction": 0.6560565829277039, "avg_line_length": 30.41666603088379, "blob_id": "b1aa0c61143ed031ea792daf87ee8c1682ed694f", "content_id": "3f09091107f4b777fc4bca7a137cdc98ff8262e0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1131, "license_type": "permissive", "max_line_length": 75, "num_lines": 36, "path": "/runtime_lib/CommonFaultInjectors.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"FaultInjector.h\"\n#include \"FaultInjectorManager.h\"\n\nclass BitFlipFI: public HardwareFaultInjector {\n public:\n virtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,\n char *buf) {\n unsigned fi_bytepos = fi_bit / 8;\n unsigned fi_bitpos = fi_bit % 8;\n buf[fi_bytepos] ^= 0x1 << fi_bitpos;\n }\n};\n\nclass StuckAt0FI: public HardwareFaultInjector {\n public:\n virtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,\n char *buf) {\n unsigned fi_bytepos = fi_bit / 8;\n unsigned fi_bitpos = fi_bit % 8;\n buf[fi_bytepos] &= ~(0x1 << fi_bitpos);\n }\n};\n\nclass StuckAt1FI: public HardwareFaultInjector {\n public:\n virtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,\n char *buf) {\n unsigned fi_bytepos = fi_bit / 8;\n unsigned fi_bitpos = fi_bit % 8;\n buf[fi_bytepos] |= 0x1 << fi_bitpos;\n }\n};\n\nstatic RegisterFaultInjector X(\"bitflip\", new BitFlipFI());\nstatic RegisterFaultInjector Y(\"stuck_at_0\", new StuckAt0FI());\nstatic RegisterFaultInjector Z(\"stuck_at_1\", new StuckAt1FI());\n" }, { "alpha_fraction": 0.6443095803260803, "alphanum_fraction": 0.6503793597221375, "avg_line_length": 25.788618087768555, "blob_id": "d56155aee305066ecb741dce9ff3d898817fe1b1", "content_id": "49f69d986c4e02c9289913c4b2915f0741848a62", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3295, "license_type": "permissive", "max_line_length": 72, "num_lines": 123, "path": "/gui/application/MyThread.java", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "package application;\n\nimport java.io.BufferedReader;\nimport java.io.Console;\nimport java.io.IOException;\nimport java.io.InputStreamReader;\nimport java.util.ArrayList;\n\n//import com.sun.glass.ui.Platform;\n\nimport javafx.fxml.FXMLLoader;\nimport javafx.scene.Node;\nimport javafx.scene.Parent;\nimport javafx.scene.Scene;\nimport javafx.stage.Stage;\n\n/**\n * The Fault Injection Thread. Called by the ProgressBarController class\n */\n@Deprecated\npublic class MyThread extends Thread {\n\n\tprivate int startIdx, nThreads, maxIdx;\n\tprivate boolean errorFlag;\n\n\tpublic MyThread() {\n\t}\n\n\t//\n\t@Override\n\tpublic void run() {\n\n\t\tParent root;\n\t\ttry {\n\t\t\tController.console = new ArrayList<String>();\n\n\t\t\t// #SFIT\n\t\t\t// changes how injection is done if we are in batch mode\n\t\t\tString execName;\n\t\t\tif (!Controller.isBatchMode) {\n\t\t\t\texecName = \"bin/injectfault \"\n\t\t\t\t\t\t+ Controller.currentProgramFolder + \"/llfi/\"\n\t\t\t\t\t\t+ Controller.currentProgramFolder + \"-faultinjection.exe \"\n\t\t\t\t\t\t+ Controller.inputString;\n\t\t\t} else {\n\t\t\t\texecName = \"bin/batchInjectfault \"\n\t\t\t\t\t\t+ Controller.currentProgramFolder + \"/\"\n\t\t\t\t\t\t+ Controller.currentProgramFolder + \".ll \"\n\t\t\t\t\t\t+ Controller.inputString;\n\t\t\t}\n\t\t\t\n\t\t\tProcessBuilder p = new ProcessBuilder(\"/bin/tcsh\", \"-c\",\n\t\t\t\t\tController.llfibuildPath + execName);\n\t\t\t// add the log to the GUI console\n\t\t\tController.console.add(\"$ \" + Controller.llfibuildPath + execName);\n\t\t\tp.redirectErrorStream(true);\n\t\t\tProcess pr = p.start();\n\n\t\t\tBufferedReader in1 = new BufferedReader(new InputStreamReader(\n\t\t\t\t\tpr.getInputStream()));\n\t\t\tString line1;\n\t\t\twhile ((line1 = in1.readLine()) != null) {\n\t\t\t\tController.console.add(line1);\n\t\t\t\tController.errorString.add(line1);\n\t\t\t\tif (line1.contains(\"error\") || line1.contains(\"Error\")\n\t\t\t\t\t\t|| line1.contains(\"ERROR\"))\n\t\t\t\t\terrorFlag = true;\n\n\t\t\t}\n\t\t\tpr.waitFor();\n\t\t\tin1.close();\n\t\t\tpr.destroy();\n\t\t\tjavafx.application.Platform.runLater(new Runnable() {\n\t\t\t\t@Override\n\t\t\t\tpublic void run() {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tif (errorFlag == true) {\n\t\t\t\t\t\t\t// progressBar.visibleProperty().unbind();\n\t\t\t\t\t\t\t// progressBar.setVisible(false);\n\t\t\t\t\t\t\t// indicator.setVisible(false);\n\t\t\t\t\t\t\terrorFlag = false;\n\n\t\t\t\t\t\t\tParent root = FXMLLoader.load(getClass()\n\t\t\t\t\t\t\t\t\t.getClassLoader().getResource(\n\t\t\t\t\t\t\t\t\t\t\t\"application/ErrorDisplay.fxml\"));\n\t\t\t\t\t\t\tStage stage = new Stage();\n\t\t\t\t\t\t\tstage.setTitle(\"Error\");\n\t\t\t\t\t\t\tstage.setScene(new Scene(root, 450, 100));\n\t\t\t\t\t\t\tstage.show();\n\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tController.errorString = new ArrayList<>();\n\t\t\t\t\t\t\t// progressBar.visibleProperty().unbind();\n\t\t\t\t\t\t\t// progressBar.setVisible(false);\n\t\t\t\t\t\t\t// indicator.setVisible(false);\n\t\t\t\t\t\t\t// onGeneratingResultTable();\n\t\t\t\t\t\t\t// generateFaultSummaryGraph();\n\n\t\t\t\t\t\t\t// tabBottom.getSelectionModel().select(faultStatus);\n\t\t\t\t\t\t\t// Node source = (Node) event.getSource();\n\t\t\t\t\t\t\t// Stage stage = (Stage)\n\t\t\t\t\t\t\t// source.getScene().getWindow();\n\t\t\t\t\t\t\t// stage.close();\n\t\t\t\t\t\t}\n\t\t\t\t\t} catch (IOException e) {\n\t\t\t\t\t\tSystem.err\n\t\t\t\t\t\t\t\t.println(\"Problem writing to the file statsTest.txt\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\t\t\tThread.sleep(1000);\n\n\t\t} catch (IOException e) {\n\t\t\tSystem.err.println(\"Problem writing to the file statsTest.txt\");\n\t\t} catch (InterruptedException e) {\n\t\t\tSystem.out.println(e);\n\t\t\t// TODO Auto-generated catch block\n\t\t\te.printStackTrace();\n\t\t}\n\n\t}\n\n}\n" }, { "alpha_fraction": 0.6172919869422913, "alphanum_fraction": 0.6348904371261597, "avg_line_length": 38.87963104248047, "blob_id": "372d20984ea9642ecaece41addda4f4c4ab4e439", "content_id": "b4a8c8fe59253bdc6a12605d1d1e90ef51582d39", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21536, "license_type": "permissive", "max_line_length": 149, "num_lines": 540, "path": "/installer/InstallLLFI.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "from __future__ import ( division, absolute_import, print_function, unicode_literals )\nimport sys, os, tempfile, logging\nif sys.version_info >= (3,):\n import urllib.request as urllib2\n import urllib.parse as urlparse\nelse:\n import urllib2\n import urlparse\nimport subprocess\nimport hashlib\nimport imp\nimport site\nimport xml.etree.ElementTree as ET\nimport argparse\n\n#Update These to change download targets\nLLVM34DOWNLOAD = {'URL':\"http://llvm.org/releases/3.4/llvm-3.4.src.tar.gz\",\n 'FILENAME':\"llvm-3.4.src.tar.gz\",\n 'MD5':\"46ed668a1ce38985120dbf6344cf6116\",\n 'EXTRACTPATH':\"llvmsrc\",\n 'EXTRACTEDNAME':'llvm-3.4',\n 'ARCHIVETYPE':'.tar.gz',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\nCLANG34DOWNLOAD = {'URL':\"http://llvm.org/releases/3.4/clang-3.4.src.tar.gz\",\n 'FILENAME':\"clang-3.4.src.tar.gz\",\n 'MD5':\"b378f1e2c424e03289effc75268d3d2c\",\n 'EXTRACTPATH':\"llvmsrc/tools/clang\",\n 'EXTRACTEDNAME':'clang-3.4',\n 'ARCHIVETYPE':'.tar.gz',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\nPYAML311DOWNLOAD = {'URL':\"http://pyyaml.org/download/pyyaml/PyYAML-3.11.tar.gz\",\n 'FILENAME':\"PyYAML-3.11.tar.gz\",\n 'MD5':\"f50e08ef0fe55178479d3a618efe21db\",\n 'EXTRACTPATH':\"pyyamlsrc\",\n 'EXTRACTEDNAME':'PyYAML-3.11',\n 'ARCHIVETYPE':'.tar.gz',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\nLLFIDOWNLOAD = {'URL':'https://github.com/scoult3r/LLFI/archive/master.zip', #\"https://github.com/DependableSystemsLab/LLFI/archive/master.zip\",\n 'FILENAME':\"master.zip\",\n 'MD5':\"fc3ba3cfea7ae3236bf027b847058105\", #\"c9a8c3ffcbd033a4d3cf1dc9a25de09c\" #You will have to change this outside of the git repo\n 'EXTRACTPATH':\"llfisrc\", #If you change this md5 within the repo, the md5 of the\n 'EXTRACTEDNAME':'LLFI-master', #repo will change\n 'ARCHIVETYPE':'.zip',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\n\n#LLVM33 Targets:\nLLVM33DOWNLOAD = {'URL':\"http://llvm.org/releases/3.3/llvm-3.3.src.tar.gz\",\n 'FILENAME':\"llvm-3.3.src.tar.gz\",\n 'MD5':\"40564e1dc390f9844f1711c08b08e391\",\n 'EXTRACTPATH':\"llvmsrc\",\n 'EXTRACTEDNAME':'llvm-3.3.src',\n 'ARCHIVETYPE':'.tar.gz',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\nCLANG33DOWNLOAD = {'URL':\"http://llvm.org/releases/3.3/cfe-3.3.src.tar.gz\",\n 'FILENAME':\"cfe-3.3.src.tar.gz\",\n 'MD5':\"8284891e3e311829b8e44ac813d0c9ef\",\n 'EXTRACTPATH':\"llvmsrc/tools/clang\",\n 'EXTRACTEDNAME':'cfe-3.3.src',\n 'ARCHIVETYPE':'.tar.gz',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\n#Primary Repository LLFI\nLLFIPUBLICDOWNLOAD = {'URL':'https://github.com/DependableSystemsLab/LLFI/archive/master.zip',\n 'FILENAME':\"master.zip\",\n 'MD5':\"04fcd2c0dc23b97f72eaf6b76e021821\",\n 'EXTRACTPATH':\"llfisrc\",\n 'EXTRACTEDNAME':'LLFI-master',\n 'ARCHIVETYPE':'.zip',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\n\nLLFIMERGEDOWNLOAD = {'URL':'https://github.com/DependableSystemsLab/LLFI/archive/merge.zip',\n 'FILENAME':\"merge.zip\",\n 'MD5':\"04fcd2c0dc23b97f72eaf6b76e021821\",\n 'EXTRACTPATH':\"llfisrc\",\n 'EXTRACTEDNAME':'LLFI-merge',\n 'ARCHIVETYPE':'.zip',\n 'EXTRACTFLAG':True,\n 'DOWNLOADFLAG':True}\n\nDOWNLOADTARGETS = [LLVM34DOWNLOAD, CLANG34DOWNLOAD, PYAML311DOWNLOAD, LLFIPUBLICDOWNLOAD]\nDOWNLOADSDIRECTORY = \"./downloads/\"\nLLFIROOTDIRECTORY = \".\"\n\n\ndef checkDep(name, execName, versionArg, printParseFunc, parseFunc, minVersion, msg):\n try:\n which = subprocess.check_output(['which', execName])\n print(\"Success: \" + name + \" Found at: \" + str(which.strip()).lstrip(\"b\\'\").rstrip(\"\\'\"))\n version = str(subprocess.check_output([execName, versionArg], stderr=subprocess.STDOUT).strip())\n version = version.lstrip(\"b'\").rstrip('\\'').replace('\\\\n',' ')\n #print(\"v\", version)\n try:\n printVersion = str(printParseFunc(version))\n #print(\"pv\", printVersion)\n version = parseFunc(str(version).strip())\n #print(\"cv\", version)\n properVersion = True\n\n if int(version[0]) < minVersion[0]:\n properVersion = False\n elif (int(version[0]) == minVersion[0]) and (int(version[1]) < minVersion[1]):\n properVersion = False\n if properVersion:\n print(\"Success: \" + name + \"(\" + printVersion + \") is at or above version \" + \".\".join([str(x) for x in minVersion]))\n return True\n else:\n print(\"Error: \" + name + \"(\" + printVersion + \") is below version \" + \".\".join([str(x) for x in minVersion]))\n print(msg)\n return False\n except:\n print(\"Warning, \" + name + \" detected on path, but unable to parse version info.\")\n print(\"Please ensure that \" + name + \" is at least of version: \" + '.'.join([str(x) for x in minVersion]))\n return True \n except(subprocess.CalledProcessError):\n print(\"Error: \" + name + \" (\" + execName + \") not found on path\")\n print(\" Pease ensure \" + name + \" is installed and is available on the path\")\n print(msg)\n return False\n\n\ndef python3PrintParse(version):\n return version.split()[1]\n\ndef python3Parse(version):\n return version.split()[1].split('.')[:2]\n\npython3Msg = \"Error: Python 3 (python3) not found on path\" + \\\n \" Pease ensure python3 is installed and is available on the path\" + \\\n \" The latest version of Python3 can be downloaded from:\" + \\\n \" https://www.python.org/downloads/\"\n\ndef CmakePrintParse(version):\n return version.split()[2]\n\ndef CmakeParse(version):\n return version.split()[2].split('.')[:2]\n\ncmakeMsg = \"\\tCmake 2.8+ cant be downloaded from:\\n\\thttp://www.cmake.org/cmake/resources/software.html\" \n\ndef JavaPrintParse(version):\n return version.split()[2][1:-1]\n \ndef JavaParse(version):\n return version.split()[2][1:-1].split('.')[:2]\n\njavaMsg = (\"\\tThe latest version of the Oracle Java Development Kit (JDK) can be downloaded from\\n\"\n \"\\thttp://www.oracle.com/technetwork/java/javase/downloads/index.html\\n\"\n \"\\tPlease ensure you install the JDK, not only the Java Runtime Environment (JRE)\")\n\ndef JavaCPrintParse(version):\n return version.split()[1]\n\ndef JavaCParse(version):\n return version.split()[1].split('.')[:2]\n\njavacMsg = javaMsg\n\ndef AntPrintParse(version):\n return version.split()[3]\n\ndef AntParse(version):\n return [int(x) for x in version.split()[3].split('.')[:2]]\n\nantMsg = (\"\\tThe latest versino of Apache Ant can be downloaded from\\n\"\n \"\\thttp://ant.apache.org/bindownload.cgi\")\n\n\n#tcsh 6.18.01 (Astron) 2012-02-14 (x86_64-unknown-linux) options wide,nls,dl,al,kan,rh,nd,color,filec\n\ndef tcshPrintParse(version):\n return version.split()[1]\n\ndef tcshParse(version):\n return version.split()[1].split(\".\")[:2]\n\ntcshMsg = (\"\\ttcsh can be downloaded from: http://www.tcsh.org/MostRecentRelease\\n\"\n \"\\tor from your system package manager.\")\n\ndef checkDependencies(checkJava=True):\n hasAll = True\n hasAll = checkDep(\"Python 3\", \"python3\", \"--version\", python3PrintParse, python3Parse, [3,2], python3Msg) and hasAll\n hasAll = checkDep(\"Cmake\",\"cmake\",\"--version\", CmakePrintParse, CmakeParse, [2,8], cmakeMsg) and hasAll\n hasAll = checkDep(\"tcsh\", \"tcsh\", \"--version\", tcshPrintParse, tcshParse,[6,0], tcshMsg) and hasAll\n \n if checkJava:\n hasAll = checkDep(\"Java\", \"java\", \"-version\", JavaPrintParse, JavaParse, [1,7], javaMsg) and hasAll\n hasAll = checkDep(\"JavaC\", \"javac\", \"-version\", JavaCPrintParse, JavaCParse, [1,7], javacMsg) and hasAll\n #hasAll = checkDep(\"Ant\", \"ant\", \"-version\", AntPrintParse, AntParse, [1,7], antMsg) and hasAll\n\n return hasAll\n\n\n\ndef Touch(path):\n with open(path, 'a'):\n os.utime(path, None)\n\ndef DownloadSources(targets, downloadDirectory):\n FullDownloadsPath = os.path.abspath(downloadDirectory)\n\n CheckAndCreateDir(FullDownloadsPath)\n for target in targets:\n if target[\"DOWNLOADFLAG\"] == True:\n CheckAndDownload(target['FILENAME'], target['MD5'], target['URL'])\n\ndef CheckAndDownload(filename, md5, url):\n md5new = \"\"\n filepath = os.path.abspath(\"./downloads/\" + filename)\n if os.path.isfile(filepath):\n print(\"Download target \" + filename + \" already exists.\")\n with open(filepath, 'rb') as check:\n data = check.read()\n md5new = hashlib.md5(data).hexdigest()\n if md5 == md5new:\n print(\"MD5 Verified\")\n return True\n else:\n print(\"MD5 does not match! Deleting File.\")\n subprocess.call([\"rm\", filepath])\n DownloadFile(url, \"./downloads/\")\n return True\n\ndef CheckAndCreateDir(dir):\n FullPath = os.path.abspath(dir)\n if (os.path.exists(FullPath)):\n if (os.path.isdir(FullPath)):\n print(\"%s directory exists.\" % (dir))\n return True\n else:\n print(\"%s path occupied by file, deleting...\" % (dir))\n subprocess.call([\"rm\", FullPath])\n print(\"Creating %s directory.\" % (dir))\n subprocess.call([\"mkdir\", dir])\n return False\n\ndef DownloadFile(url, destinationDirectory, desc=None):\n u = urllib2.urlopen(url)\n\n scheme, netloc, path, query, fragment = urlparse.urlsplit(url)\n filename = os.path.basename(path)\n if not filename:\n filename = 'downloaded.file'\n if desc:\n filename = os.path.join(desc, filename)\n\n with open(os.path.join(destinationDirectory, filename), 'wb') as f:\n meta = u.info()\n meta_func = meta.getheaders if hasattr(meta, 'getheaders') else meta.get_all\n meta_length = meta_func(\"Content-Length\")\n file_size = None\n if meta_length:\n file_size = int(meta_length[0])\n print(\"Downloading: {0} Bytes: {1}\".format(url, file_size))\n\n file_size_dl = 0\n block_sz = 8192\n while True:\n buffer = u.read(block_sz)\n if not buffer:\n break\n\n file_size_dl += len(buffer)\n f.write(buffer)\n\n status = \"{0:16}\".format(file_size_dl)\n if file_size:\n status += \" [{0:6.2f}%]\".format(file_size_dl * 100 / file_size)\n status += chr(13)\n print(status, end=\"\")\n print()\n\n return filename\n\ndef ExtractSources(targets, downloadsDirectory, extractionDirectory):\n fullDownloadsPath = os.path.abspath(downloadsDirectory)\n fullExtractionPath = os.path.abspath(extractionDirectory)\n CheckAndCreateDir(extractionDirectory)\n print(\"Moving to extraction root directory.\")\n os.chdir(extractionDirectory)\n for target in targets:\n if target[\"EXTRACTFLAG\"] == True:\n path = target['EXTRACTPATH']\n dirName = target['EXTRACTEDNAME']\n print(\"Extracting \" + target['FILENAME'])\n archivePath = os.path.join(fullDownloadsPath, target['FILENAME'])\n if os.path.isfile(archivePath):\n ExtractArchive(target[\"ARCHIVETYPE\"], archivePath)\n print(\"Renaming \" + dirName + \" to \" + path)\n CheckAndCreateDir(path)\n subprocess.call(\"cp -R \" + dirName+\"/* \" + path, shell=True)\n subprocess.call([\"rm\", \"-rf\", dirName])\n os.chdir(fullExtractionPath)\n\ndef ExtractArchive(archiveType, archivePath):\n if archiveType == \".tar.gz\":\n subprocess.call([\"tar\", \"-xf\", archivePath])\n if archiveType == \".zip\":\n archivePath = archivePath[:-4]\n subprocess.call([\"unzip\", \"-q\", archivePath])\n\ndef UpdateFlags(targets, key, value):\n newList = []\n for target in targets:\n target[key] = value\n newList.append(target)\n return newList\n\ndef build(buildLLVM, forceMakeLLVM, noGUI):\n #Build LLVM\n if buildLLVM:\n CheckAndCreateDir(\"llvm\")\n os.chdir(\"llvm\")\n if (not os.path.exists(\"CMAKESUCCESS\")) or forceMakeLLVM:\n print(\"Running cmake for LLVM:\")\n p = subprocess.call([\"cmake\", \"../llvmsrc\", \"-DLLVM_REQUIRES_RTTI=1\", \"-DCMAKE_BUILD_TYPE=Release\"])\n if p != 0:\n sys.exit(p)\n Touch(\"CMAKESUCCESS\")\n\n if (not os.path.exists(\"MAKESUCCESS\")) or forceMakeLLVM:\n print(\"Running make for LLVM\")\n p = subprocess.call(\"make\")\n if p != 0:\n sys.exit(p)\n Touch(\"MAKESUCCESS\")\n\n os.chdir(\"..\")\n\n script_path = os.getcwd()\n\n #Configure and Build LLFI\n\n \"\"\"\n llvm_paths_cmake = os.path.join(script_path, \"llfisrc/config/llvm_paths.cmake\")\n llvm_paths_py = os.path.join(script_path, \"llfisrc/config/llvm_paths.py\")\n\n cmake_File = open(llvm_paths_cmake, \"w\")\n LLVM_DST_ROOT = os.path.realpath(\"llvm\")\n LLVM_SRC_ROOT = os.path.realpath(\"llvmsrc\")\n LLVM_GXX_BIN_DIR = os.path.realpath(\"llvm/bin\")\n\n cmake_File.write(\"set(LLVM_DST_ROOT \" + LLVM_DST_ROOT + \")\\n\")\n cmake_File.write(\"set(LLVM_SRC_ROOT \" + LLVM_SRC_ROOT + \")\\n\")\n cmake_File.close()\n\n py_File = open(llvm_paths_py, \"w\")\n py_File.write(\"LLVM_DST_ROOT = \" + '\"' + LLVM_DST_ROOT + '\"\\n')\n py_File.write(\"LLVM_SRC_ROOT = \" + '\"' + LLVM_SRC_ROOT + '\"\\n')\n py_File.write(\"LLVM_GXX_BIN_DIR = \" + '\"' + LLVM_GXX_BIN_DIR + '\"\\n')\n py_File.close()\n \"\"\"\n\n print(\"Running ./setup for LLFI:\")\n os.chdir(\"llfisrc\")\n setup = [\"./setup\", \"-LLVM_DST_ROOT\", \"../llvm\", \"-LLVM_SRC_ROOT\", \"../llvmsrc\", \"-LLFI_BUILD_ROOT\", \"../llfi\", \"-LLVM_GXX_BIN_DIR\", \"../llvm/bin\"]\n if noGUI:\n setup.append(\"--no_gui\")\n p = subprocess.call(setup)\n if p != 0:\n sys.exit(p)\n os.chdir(\"..\")\n\ndef buildGUI():\n #Build LLFI GUI\n updateGUIXMLBuildPath(getJavaFXLibLocation())\n currPath = os.getcwd()\n antPath = os.path.join(currPath, \"llfisrc/Gui_sourceCode/build.xml\")\n binPath = os.path.join(currPath, \"llfisrc/Gui_sourceCode/bin\")\n jarPath = os.path.join(currPath, \"llfisrc/LLFI-GUI/llfi_gui.jar\")\n CheckAndCreateDir(\"llfisrc/LLFI-GUI\")\n p = subprocess.call(\"cp llfisrc/LLFI-GUI/* llfi/LLFI-GUI/\", shell=True)\n p = subprocess.call([\"rm\", \"-rf\", jarPath])\n p = subprocess.call([\"rm\", \"-rf\", binPath])\n p = subprocess.call([\"ant\", \"-f\", antPath ], env=os.environ)\n p = subprocess.call([\"ant\", \"-f\", antPath, \"jar\" ], env=os.environ)\n\ndef buildPyYaml(forceBuild):\n script_path = os.getcwd()\n pyyaml_path = os.path.join(script_path,\"pyyaml\")\n os.chdir(\"pyyamlsrc\")\n\n if (not os.path.exists('YAMLBUILDSUCCESS')) or forceBuild:\n p = subprocess.call([\"python3\",\"setup.py\",\"install\",\"--prefix=\"+pyyaml_path])\n if p != 0:\n sys.exit(p)\n Touch(\"YAMLBUILDSUCCESS\")\n\n os.chdir(\"..\")\n\ndef updateGUIXMLBuildPath(newPath):\n print(\"Modifying LLFI-GUI build.xml\")\n tree = ET.parse('llfisrc/Gui_sourceCode/build.xml')\n root = tree.getroot()\n pathnode = root.findall(\"./path[@id='JavaFX SDK.libraryclasspath']/pathelement\")\n\n for path in root.iter('path'):\n if path.get('id') == \"JavaFX SDK.libraryclasspath\":\n pathelement = path.find('./pathelement[@location]')\n pathelement.set(\"location\", newPath + \"jfxrt.jar\")\n \n for path in root.iter('target'):\n if path.get('name') == \"jar\":\n buildelement = path.find('./jar[@destfile]')\n buildelement.set(\"destfile\", \"../../llfi/LLFI-GUI/llfi_gui.jar\")\n\n for target in root.iter('target'):\n if target.get('name') == \"jar\":\n element = target.find(\"./jar/zipfileset[@includes='jfxrt.jar']\")\n element.set(\"dir\", newPath)\n tree.write('llfisrc/Gui_sourceCode/build.xml') \n\n\ndef getJavaFXLibLocation():\n uname = subprocess.check_output(\"uname\").strip()\n javaLibPath = None\n if 'Darwin' in str(uname):\n javahome = subprocess.check_output([\"/usr/libexec/java_home\"], universal_newlines=True).strip()\n javaLibPath = javahome+\"/jre/lib/\"\n else:\n javaBinPath = subprocess.check_output(\"readlink -f $(which java)\", shell=True, universal_newlines=True)\n javaBinPath = javaBinPath.strip()\n pathSplit = javaBinPath.split(\"/\")\n if (str('jre') in [str(x) for x in pathSplit]):\n javaLibPath = javaBinPath[:-9] + \"/lib/\"\n else:\n javaLibPath = javaBinPath[:-9] + \"/jre/lib/\"\n \n if (os.path.exists(os.path.join(javaLibPath, \"ext/jfxrt.jar\"))):\n javaLibPath = os.path.join(javaLibPath, \"ext/\")\n print(\"Detecting JFX Lib at \" + str(javaLibPath))\n return javaLibPath\n\ndef addEnvs():\n scriptPath = os.path.dirname(os.path.realpath(__file__))\n llfibuildPath = os.path.join(scriptPath, \"llfi/\")\n\n versionString = subprocess.check_output([\"python3\", \"--version\"], stderr=subprocess.STDOUT)\n versionString = versionString.strip()\n versionSplit = versionString.split()\n versionSplit = str(versionSplit[1]).split('.')\n\n majorVer = versionSplit[0]\n minorVer = versionSplit[1]\n\n pyVersion = str(majorVer) + \".\" + str(minorVer)\n pyPath = os.path.join(scriptPath, \"pyyaml/lib/python\"+pyVersion.strip(\"b'\")+\"/site-packages/\")\n\n homePath = os.environ['HOME']\n tcshPath = os.path.join(homePath, \".tcshrc\")\n\n with open(tcshPath, \"a\") as rcFile:\n rcFile.write(\"setenv PYTHONPATH \" + pyPath + \"\\n\")\n rcFile.write(\"setenv llfibuild \" + llfibuildPath + \"\\n\")\n rcFile.write(\"setenv zgrviewer \" + llfibuildPath + \"tools/zgrviewer/\" + \"\\n\")\n\nparser = argparse.ArgumentParser(\n description=(\"Installer for UBC DependableSystemsLab's LLFI\"),\n epilog=\"More information available at www.github.com/DependableSystemsLab/LLFI\",\n usage='%(prog)s [options]')\nparser.add_argument(\"-v\", \"--version\", action='version', version=\"LLFI Installer v0.1, May 17th 2014\")\nparser.add_argument(\"-sDC\", \"--skipDependencyCheck\", action='store_true', help=\"Skip Dependency Checking\")\nparser.add_argument(\"-cD\", \"--cleanDownloads\", action='store_true', help=\"Clean (rm) already downloaded files before installing\")\nparser.add_argument(\"-cS\", \"--cleanSources\", action='store_true', help=\"Clean (rm) already extracted files before installing\")\nparser.add_argument(\"-nD\", \"--noDownload\", action='store_true', help=\"Do not download any files\")\nparser.add_argument(\"-nE\", \"--noExtract\", action='store_true', help=\"Do not extract the archives before installing\")\nparser.add_argument(\"-nB\", \"--noBuild\", action='store_true', help=\"Do not perform installation, only downloading + extracting\")\nparser.add_argument(\"-nGUI\", \"--noGUI\", action='store_true', help=\"Do not build the Java LLFI GUI\")\nparser.add_argument(\"-nBLLVM\", \"--noBuildLLVM\", action='store_true', help=\"Do not compile the LLVM\")\nparser.add_argument(\"-fBLLVM\", \"--forceBuildLLVM\", action='store_true', help=\"Force recompilation of LLVM\")\nparser.add_argument(\"-fBPyYaml\", \"--forceBuildPyYaml\", action='store_true', help=\"Force recompilation of PyYaml\")\nparser.add_argument(\"-rT\", \"--runTests\", action='store_true', help=\"Run all regression tests for LLFI after installation\")\nparser.add_argument(\"-tF\", \"--testFeature\", action='store_true', help=\"LLFI installer development use only\")\n\n\ndef testFeature():\n print(\"Testing Experimental Installer Feature\")\n updateGUIXMLBuildPath(getJavaFXLibLocation())\n currPath = os.getcwd()\n antPath = os.path.join(currPath, \"llfisrc/Gui_sourceCode/build.xml\")\n binPath = os.path.join(currPath, \"llfisrc/Gui_sourceCode/bin\")\n jarPath = os.path.join(currPath, \"llfisrc/LLFI-GUI/llfi_gui.jar\")\n CheckAndCreateDir(\"llfisrc/LLFI-GUI\")\n p = subprocess.call(\"cp llfisrc/LLFI-GUI/* llfi/LLFI-GUI/\", shell=True)\n p = subprocess.call([\"rm\", \"-rf\", jarPath])\n p = subprocess.call([\"rm\", \"-rf\", binPath])\n p = subprocess.call([\"ant\", \"-f\", antPath ], env=os.environ)\n p = subprocess.call([\"ant\", \"-f\", antPath, \"jar\" ], env=os.environ)\n\ndef runTests():\n LLFI_BUILD_DIR = os.path.dirname(os.path.realpath(__file__))\n subprocess.call([\"python3\", LLFI_BUILD_DIR + \"/llfi/test_suite/SCRIPTS/llfi_test\", \"--all\", \"--threads\", \"2\", \"--verbose\"])\n\nif __name__ == \"__main__\":\n args = parser.parse_args(sys.argv[1:])\n if args.testFeature:\n testFeature()\n sys.exit(0)\n if not args.skipDependencyCheck:\n print(\"Checking LLFI Pre-Requisites and Dependencies\")\n makeGUI = True\n if args.noGUI:\n makeGUI = False\n deps = checkDependencies(makeGUI)\n if not deps:\n print(\"Some LLFI Pre-Requisites are missing!\")\n print(\"Please see Errors above, and install the missing dependencies\")\n print(\"Exiting Installer...\")\n sys.exit(-1)\n if args.cleanDownloads:\n print(\"Cleaning downloads...\")\n subprocess.call([\"rm\", \"-rf\", DOWNLOADSDIRECTORY])\n print(\"Done.\")\n if args.cleanSources:\n print(\"Cleaning extracted sources...\")\n currPath = os.getcwd()\n if os.path.isdir(LLFIROOTDIRECTORY):\n os.chdir(LLFIROOTDIRECTORY)\n for target in DOWNLOADTARGETS: \n subprocess.call([\"rm\", \"-rf\", target['EXTRACTPATH']])\n print(\"Done.\")\n os.chdir(currPath) \n print(\"Installing LLFI to: \" + os.path.abspath(LLFIROOTDIRECTORY))\n if not args.noDownload:\n DownloadSources(DOWNLOADTARGETS, DOWNLOADSDIRECTORY)\n if not args.noExtract:\n ExtractSources(DOWNLOADTARGETS, DOWNLOADSDIRECTORY, LLFIROOTDIRECTORY)\n if not args.noBuild:\n build(not args.noBuildLLVM, args.forceBuildLLVM, args.noGUI)\n #if not args.noGUI:\n # buildGUI()\n addEnvs() #setenv...\n buildPyYaml(args.forceBuildPyYaml)\n if args.runTests:\n runTests()\n\n" }, { "alpha_fraction": 0.7694370150566101, "alphanum_fraction": 0.7694370150566101, "avg_line_length": 27.769229888916016, "blob_id": "c9b6cd5950a5932d8e701ef34e8dbf6d7e314108", "content_id": "4030712d0d9b9afcafd7eaec223819718d3db037", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 373, "license_type": "permissive", "max_line_length": 76, "num_lines": 13, "path": "/web-app/views/src/js/stores/profilingStatusStore.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\nvar profilingStatusActions = require(\"./../actions/profilingStatusActions\");\nvar profilingStatus = [];\nvar profilingStatusStore = Reflux.createStore({\n\tlistenables: [profilingStatusActions],\n\n\tonUpdateProfilingStatus: function(data) {\n\t\tprofilingStatus = data;\n\t\tthis.trigger(profilingStatus);\n\t},\n});\n\nmodule.exports = profilingStatusStore;" }, { "alpha_fraction": 0.6476510167121887, "alphanum_fraction": 0.6510066986083984, "avg_line_length": 24.95652198791504, "blob_id": "3da115e43d4ddb618c746c4cef1a6fca0c45086a", "content_id": "132f60d0254df607d3074cbf71662918b5a67701", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 596, "license_type": "permissive", "max_line_length": 104, "num_lines": 23, "path": "/web-app/views/src/js/components/mainWindow/bottomPannel/outputTabs/console.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar consoleLogStore = require(\"./../../../../stores/consoleLogStore\");\nvar Reflux = require(\"reflux\");\n\nvar Console = React.createClass({\n\tmixins: [Reflux.connect(consoleLogStore,\"consoleLog\")],\n\tgetInitialState: function() {\n\t\treturn {\n\t\t\tconsoleLog: ''\n\t\t};\n\t},\n\trender: function() {\n\t\tvar className = \"console\" + (this.props.shouldDisplay ? \"\" : \" hide\");\n\t\treturn (\n\t\t\t<div class={className}>\n\t\t\t\t<textarea value={this.state.consoleLog} class=\"consoleBox disabled\" id=\"console\" rows=\"10\" readOnly>\n\t\t\t\t</textarea>\n\t\t\t</div>\n\t\t);\n\t}\n});\n\nmodule.exports = Console;" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 19, "blob_id": "0d166fe77be3a825f41f4faf6c61bb8dd46a10ce", "content_id": "9cd1ef7dccbaf8311d908282e5999eb581a71331", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 40, "license_type": "permissive", "max_line_length": 19, "num_lines": 2, "path": "/sample_programs/bfs/readme.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "run with parameter:\n\t-i graph_input.dat\n" }, { "alpha_fraction": 0.5110593438148499, "alphanum_fraction": 0.5878928899765015, "avg_line_length": 25.030303955078125, "blob_id": "8e8d7b62bb2e417577beeddd99eca9ffb2b05dea", "content_id": "becb4430fa452b0947cd7a378778305e1c8e50c9", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 859, "license_type": "permissive", "max_line_length": 75, "num_lines": 33, "path": "/test_suite/MakefileGeneration/readable_IR/pstart.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nPSTART.H of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Sun Nov 21 16:22:58 2004 by Andreas Loebel (boss.local.de) */\n/* $Id: pstart.h,v 1.10 2005/02/17 19:42:21 bzfloebe Exp $ */\n\n\n\n#ifndef _PSTART_H\n#define _PSTART_H\n\n\n#include \"defines.h\"\n\n\nextern long primal_start_artificial _PROTO_(( network_t * ));\n\n\n#endif\n" }, { "alpha_fraction": 0.6413692831993103, "alphanum_fraction": 0.646859347820282, "avg_line_length": 26.89639663696289, "blob_id": "26cfd59b2afcdd82c21726f8428879449108af2c", "content_id": "9c572dbdca90c1519014cf487743a84fe263fc9d", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6193, "license_type": "permissive", "max_line_length": 115, "num_lines": 222, "path": "/tools/FIDL/tests/test_FIDL.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n\"\"\"\nThis script is DEPRECATED! This was used to initially test if the 38/39 software failures\nconverted from .cpp format to the FIDL yaml format was working properly.\n\nTests FIDL-Algorithm.py.\n\nUsage: %(prog)s [OPTIONS]\n\n-a: adds FIDL test cases\n-r: removes FIDL test cases\n-t: executes tests\n-h: shows help\n\nSteps:\n1. Execute with -a in LLFI_SRC\n2. Build LLFI\n3. Execute this script in LLFI_DST with -t\n4. Execute -r in LLFI_SRC and rebuild LLFI to get rid of test cases\n\nRun this file *after* successfully executing the llfi regression test\nThis tests compares the generated FIDL-Algorithm.py's generated faultinjection.ll and profiling.ll\nagainst the actual faultinjection.ll and profiling.ll from the real selector cpp file.\n\"\"\"\n\nimport sys, os, subprocess, shutil\nimport yaml\nfrom distutils.dir_util import copy_tree\n\nscript_path = os.path.realpath(os.path.dirname(__file__))\nprog = os.path.basename(sys.argv[0])\n\nllfiroot = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))\n\ntest_config_path = os.path.join(script_path, 'test_config.yaml')\nfidl_al_path = os.path.join(script_path, '../FIDL-Algorithm.py')\n\nprograms_dir = os.path.join(llfiroot, 'test_suite/PROGRAMS/')\nfidl_config_dir = os.path.join(script_path, 'fidl_config/')\nfidl_tests_dir = os.path.join(script_path, 'fidl_test/')\n\nbin_path = os.path.join(script_path, '../../../bin')\ninstrument_path = os.path.join(bin_path, 'instrument')\nprofile_path = os.path.join(bin_path, 'profile')\ninjectfault_path = os.path.join(bin_path, 'injectfault')\n\nir_ext = '.ll'\nexpected = 'Expected'\noutput = 'Output'\n\ndef ir_file_equals(ir1_path, ir2_path):\n with open(ir1_path) as f:\n lines1 = f.read().splitlines()\n with open(ir2_path) as f:\n lines2 = f.read().splitlines()\n \n if len(lines1) != len(lines2):\n return False\n \n # check the file content (ignoring the first line where the path\n # of the files will be different\n for i in range(1, len(lines1)):\n if lines1[i] != lines2[i]:\n return False\n \n return True\n \ndef is_same_result(test_dir_path, program_name):\n file_list = ['llfi/%s-faultinjection%s' % (program_name, ir_ext), 'llfi/%s-profiling%s' % (program_name, ir_ext)]\n\n # check if the generated ir files are equal\n for n in file_list:\n if not ir_file_equals(os.path.join(test_dir_path, expected, n), os.path.join(test_dir_path, output, n)):\n return False\n\n return True\n \ndef execute_tests():\n global doc\n \n # delete and create a new tests folder\n del_mkdir(fidl_tests_dir)\n \n # create folder for each test case\n for n in doc['tests']:\n dir_name, name = extract_names(n)\n dir_path = os.path.join(fidl_tests_dir, dir_name)\n os.makedirs(dir_path)\n print('Testing %s' % dir_name)\n \n program_name = n['config']['program']\n \n l = [[expected, n['config']['simulate']], [output, name]]\n for i in l:\n # create inner directory and cd to it\n inner_dir_path = os.path.join(dir_path, i[0])\n os.makedirs(inner_dir_path)\n os.chdir(inner_dir_path)\n \n # creates the input.yaml\n dump_yaml(os.path.join(inner_dir_path, 'input.yaml'), create_input_yaml(n, i[1]))\n \n # copy in the program\n copy_tree(os.path.join(programs_dir, program_name), inner_dir_path)\n \n # instrument\n execlist = [instrument_path, '--readable', '-lpthread', program_name + ir_ext]\n ret_val = subprocess.call(execlist, stdout = open(os.devnull, 'wb'), stderr = open(os.devnull, 'wb'))\n if (ret_val != 0):\n print('Error: Instrument failed!')\n exit(1)\n \n # check results\n if is_same_result(dir_path, program_name):\n print('Success: %s' % dir_name)\n else:\n print('Error: %s' % dir_name)\n \ndef create_input_yaml(test, selector):\n global doc\n \n template = doc['inputTemplate'].copy()\n template['compileOption']['instSelMethod'][0]['customInstselector']['include'] = [selector]\n \n return template\n\ndef del_mkdir(dir_path):\n if os.path.exists(dir_path):\n shutil.rmtree(dir_path)\n os.makedirs(dir_path)\n \ndef extract_names(test):\n f_mode = test['FIDL']['Failure_Mode']\n f_class = test['FIDL']['Failure_Class']\n \n filename = '_%s_%s' % (f_class, f_mode)\n name = '%s(%s)' % (f_mode, f_class)\n \n return (filename, name)\n \ndef dump_yaml(path, yaml_object):\n f = open(path, 'w')\n f.write(yaml.dump(yaml_object))\n f.close()\n\ndef run_fidl_algorithm(add):\n global doc\n \n # delete and create a new fidl script config(s) folder\n if add:\n del_mkdir(fidl_config_dir)\n \n for n in doc['tests']:\n filename, name = extract_names(n)\n filename = filename + '.yaml'\n \n # create new fidl script from ones specified in test_config.yaml\n filename_path = os.path.join(fidl_config_dir, filename)\n dump_yaml(filename_path, n['FIDL'])\n \n if add:\n option = [fidl_al_path, '-a', filename_path]\n else:\n option = [fidl_al_path, '-r', name]\n \n #TODO what is the proper way for this?\n # redirect error and such...\n \n # executes the fidl algorithm on the script\n retVal = subprocess.call(option)\n if retVal != 0:\n print('Error: %s is not a valid fidl script!' % filename)\n exit(1)\n \n # delete fidl script config(s) folder if removing\n if os.path.exists(fidl_config_dir) and not add:\n shutil.rmtree(fidl_config_dir)\n \ndef remove_tests():\n run_fidl_algorithm(False)\n\ndef add_tests():\n run_fidl_algorithm(True)\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = 1\n msg = 'ERROR: ' + msg\n print(msg, file = sys.stderr)\n print(__doc__ % globals(), file = sys.stderr)\n sys.exit(retval)\n\ndef parse_args(args):\n option = args[0]\n if option == '-a':\n add_tests()\n elif option == '-r':\n remove_tests()\n elif option == '-t':\n execute_tests()\n elif option == '-h':\n usage()\n else:\n usage('Invalid Argument: ' + option)\n\ndef read_yaml():\n global doc\n f = open(test_config_path)\n doc = yaml.load(f)\n f.close()\n\ndef main(args):\n print('This script is DEPRECATED! See -h')\n read_yaml()\n parse_args(args)\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n usage('Please specify an option.')\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.6833650469779968, "alphanum_fraction": 0.6930881142616272, "avg_line_length": 24.159574508666992, "blob_id": "418aeb14d16e36072ce414ee72e48e2ab6a2f7d8", "content_id": "d4e3663462187907d7c03bda2ce899086b4f68ee", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4731, "license_type": "permissive", "max_line_length": 94, "num_lines": 188, "path": "/runtime_lib/_SoftwareFaultInjectors.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"FaultInjector.h\"\n#include \"FaultInjectorManager.h\"\n#include <fstream>\n#include <iostream>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <pthread.h>\n#include <unistd.h>\n\n//2^20 == 32MB\n#define MEM_EXHAUSTION_UNIT 33554432\n\nclass BitCorruptionInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tunsigned int fi_bytepos = fi_bit/8;\n\t\tunsigned int fi_bitpos = fi_bit%8;\n\t\tbuf[fi_bytepos] ^= 0x1 << fi_bitpos;\n\t\treturn;\n\t}\n\t\n\tstatic BitCorruptionInjector* getBitCorruptionInjector(){\n\t\tstatic BitCorruptionInjector* injector_ptr = NULL;\n\t\tif(injector_ptr == NULL){\n\t\t\tinjector_ptr = new BitCorruptionInjector();\n\t\t\treturn injector_ptr;\n\t\t}else\treturn injector_ptr;\n\t}\n};\n\nclass MemoryLeakInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tvoid* fake_p = malloc(1024 * sizeof(char));\n\t\tvoid** newbuf = (void**) buf;\n\t\t*newbuf = fake_p;\n\t\treturn;\n\t}\n};\n\nclass HangInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\twhile(1);\n\t\treturn;\n\t}\n};\n\nclass SleepInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tsleep(3);\n\t\treturn;\n\t}\n};\n\nstatic RegisterFaultInjector DA(\"HighFrequentEvent(Timing)\", new SleepInjector());\n\nclass ChangeValueInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tif(is_replace == false){\n\t\t\tint* newbuf = (int*) buf;\n\t\t\t*newbuf = *newbuf + add_val;\n\t\t}\n\t\telse{\n\t\t\tint* newbuf = (int*) buf;\n\t\t\t*newbuf = rep_val;\n\t\t}\n\t\treturn;\n\t}\n\t\n\tChangeValueInjector(int val, bool replace):add_val(val), rep_val(val), is_replace(replace){};\n\t\n\tprivate:\n\tint add_val;\n\tint rep_val;\n\tbool is_replace;\n};\n\nclass InappropriateCloseInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tif(add_close){\n\t\t\tFILE** newbuf = (FILE**) buf;\n\t\t\tfclose(*newbuf);\n\t\t}else{\n\t\t\tFILE* fp = fopen(\"fake_file.txt\", \"w\");\n\t\t\tFILE** newbuf = (FILE**) buf;\n\t\t\t*newbuf = fp;\n\t\t}\n\t\treturn;\n\t}\n\tInappropriateCloseInjector(bool addclose):add_close(addclose){};\n\t\n\tprivate:\n\tbool add_close;\n};\n\nclass StalePointerInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tvoid** newbuf = (void**) buf;\n\t\tfree(*newbuf);\n\t}\n};\n\nclass MemoryExhaustionInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tvoid* p = NULL;\n\t\tvoid* left_space = NULL;\n\t\tdo{\n\t\t\tp = malloc(MEM_EXHAUSTION_UNIT);\n\t\t\tif(p == NULL)\tp = malloc(MEM_EXHAUSTION_UNIT>>4);\n\t\t\tif(p == NULL)\tp = malloc(MEM_EXHAUSTION_UNIT>>8);\n\t\t\tif(p == NULL)\tp = malloc(MEM_EXHAUSTION_UNIT>>12);\n\t\t\tif(p != NULL)\tleft_space = p;\n\t\t}while(p != NULL);\n\t\tif(non_left_space){\n\t\t\tvoid** newbuf = (void**) buf;\n\t\t\t*newbuf = p;\n\t\t}else{\n\t\t\tvoid** newbuf = (void**) buf;\n\t\t\t*newbuf = left_space;\n\t\t}\n\t\treturn;\n\t}\n\t\n\tMemoryExhaustionInjector(bool nonleftspace):non_left_space(nonleftspace) {};\n\tprivate:\n\tbool non_left_space;\n};\n\nclass WrongFormatInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tswitch(*buf){\n\t\t\tcase 1:\n\t\t\t\t*buf = 2; break;\n\t\t\tcase 2:\n\t\t\t\t*buf = 4; break;\n\t\t\tcase 4:\n\t\t\t\t*buf = 8; break;\n\t\t\tcase 8:\n\t\t\t\t*buf = 4; break;\n\t\t\tcase 10:\n\t\t\t\t*buf = 4; break;\n\t\t\tdefault:\n\t\t\t\tbreak;\n\t\t}\n\t\treturn;\n\t}\n};\n\nclass PthreadDeadLockInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tpthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;\n\t\tpthread_mutex_lock(&mutex1);\n\t\tpthread_t thread1 = pthread_t(*buf);\n\t\tpthread_join(thread1, NULL);\n\t\tpthread_mutex_lock(&mutex1);\n\t\treturn;\n\t}\n};\n\nclass PthreadThreadKillerInjector: public SoftwareFaultInjector {\n\tpublic:\n\tvirtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf){\n\t\tpthread_t t = pthread_t(*buf);\n\t\tsleep(0.02);\n\t\tpthread_cancel(t);\n\t\treturn;\n\t}\n};\n\nclass PthreadRaceConditionInjector: public SoftwareFaultInjector {\n public:\n virtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,char *buf) {\n pthread_mutex_t *fake_mutex = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t));\n pthread_mutex_init(fake_mutex, NULL);\n pthread_mutex_t **newbuf = (pthread_mutex_t**) buf;\n *newbuf = fake_mutex;\n return;\n }\n};\n\n" }, { "alpha_fraction": 0.5464426875114441, "alphanum_fraction": 0.5533596873283386, "avg_line_length": 19.653060913085938, "blob_id": "4e5d5d0836a09e43d5b8e1e00c081a73842e214e", "content_id": "2dfd10a9f507f4b1528277c5d82f258066ee6233", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1012, "license_type": "permissive", "max_line_length": 75, "num_lines": 49, "path": "/test_suite/PROGRAMS/memcpy1/memcpy1.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\nint main ()\n{\n int size=50;\n char* src= malloc (size);\n if (src==NULL)\n { \n printf (\" src can't be created;The system is ran out of memory :\\n\" );\n return (0);\n }\n \n char* dest= malloc (size);\n if (dest==NULL)\n { \n printf (\" dest can't be created;The system is ran out of memory :\\n\" );\n return (0);\n }\n \n FILE *fp;\n fp=fopen(\"sample.txt\",\"r\");\n if (fp == NULL)\n printf(\"ERROR!\");\n fread(src,sizeof(char),size ,fp);\n fclose (fp);\n \n printf(\"The content of input file is: %s\\n\", src);\n int i;\n memmove(dest, src,50); \n printf(\"The content of buffer is: %s\\n\", dest);\n fp=fopen(\"output.txt\",\"w\");\n if (fp != NULL)\n fwrite(dest,sizeof(char),size ,fp);\n fclose (fp);\n char ch;\n fopen(\"output.txt\",\"r\");\n printf (\" the content of OUTPUT file is :\\n\" );\n ch = fgetc(fp);\n while(ch != EOF)\n {\n printf(\"%c\", ch);\n ch = fgetc(fp);\n }\n fclose (fp);\n free(src);\n free(dest); \n return(0);\n}\n" }, { "alpha_fraction": 0.6725146174430847, "alphanum_fraction": 0.6725146174430847, "avg_line_length": 18.11111068725586, "blob_id": "af1486dc319b8651f09fcdf893492dc9badc7c23", "content_id": "2d8129152dba84262c4bf7d7851999f865d650ec", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 171, "license_type": "permissive", "max_line_length": 46, "num_lines": 9, "path": "/web-app/views/src/js/actions/fileUploadActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar fileUploadActions = Reflux.createActions([\n 'addFile',\n 'addFiles',\n 'fetchFile'\n ]);\n\nmodule.exports = fileUploadActions;" }, { "alpha_fraction": 0.674141526222229, "alphanum_fraction": 0.6772950291633606, "avg_line_length": 27.549999237060547, "blob_id": "5a1d5a9a674ae29a3feda1ccd31245847e8d0ddd", "content_id": "50cc499d94ea6507775c03c767bb76e6ce45fa86", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2854, "license_type": "permissive", "max_line_length": 91, "num_lines": 100, "path": "/test_suite/SCRIPTS/test_generate_makefile.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport yaml\nimport subprocess\n\ngenerate_makefile_script = ''\nllvm_interpreter_bin = ''\n\ndef callGenerateMakefile(work_dir, resources):\n\tglobal generate_makefile_script\n\n\texeclist = [generate_makefile_script, '--dir', work_dir]\n\texeclist.extend(resources['makefile_generation_args'].split(' '))\n\tprint(' '.join(execlist))\n\tp = subprocess.Popen(execlist)\n\tp.wait()\n\tif p.returncode != 0:\n\t\treturn (\"FAIL: ERROR in calling \" + generate_makefile_script + \" on \" + work_dir)\n\telse:\n\t\treturn (\"PASS\")\n\ndef callLLVMInterpreter(work_dir, resources):\n\tglobal llvm_interpreter_bin\n\n\tcwd = os.getcwd()\n\tos.chdir(work_dir)\n\tos.system('make clean')\n\tos.system('make')\n\tprog = resources['prog']\n\tif 'readable' in os.path.basename(work_dir):\n\t\tprog = prog + '.ll'\n\telse:\n\t\tprog = prog + '.bc'\n\n\tprog = os.path.join(work_dir, prog)\n\texeclist = [llvm_interpreter_bin, prog]\n\texeclist.extend(resources['inputs'].split(' '))\n\tprint(' '.join(execlist))\n\tp = subprocess.Popen(execlist)\n\tp.wait()\n\tos.chdir(cwd)\n\n\tif p.returncode != 0:\n\t\treturn (\"FAIL: ERROR in running on lli: \" + llvm_interpreter_bin + \", test: \" + work_dir)\n\telse:\n\t\treturn (\"PASS\")\n\n\ndef test_generate_makefile(*test_list):\n\tglobal generate_makefile_script\n\tglobal llvm_interpreter_bin\n\n\tr = 0\n\tsuite = {}\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\tllfi_tools_dir = os.path.join(script_dir, os.pardir, os.pardir, 'tools')\n\tgenerate_makefile_script = os.path.join(llfi_tools_dir, 'GenerateMakefile')\n\tsys.path.append(os.path.join(script_dir, os.pardir, os.pardir, 'config'))\n\timport llvm_paths\n\tllvm_interpreter_bin = os.path.join(llvm_paths.LLVM_DST_ROOT, \"bin/lli\")\n\n\ttestsuite_dir = os.path.join(script_dir, os.pardir)\n\twith open(os.path.join(testsuite_dir, \"test_suite.yaml\")) as f:\n\t\ttry:\n\t\t\tsuite = yaml.load(f)\n\t\texcept:\n\t\t\tprint(\"ERROR: Unable to load yaml file: test_suite.yaml\", file=sys.stderr)\n\t\t\treturn -1\n\n\twork_dict = {}\n\tfor test in suite[\"MakefileGeneration\"]:\n\t\tif len(test_list) == 0 or test in test_list or \"all\" in test_list:\n\t\t\twork_dict[\"./MakefileGeneration/\"+test] = suite[\"MakefileGeneration\"][test]\n\n\tresult_list = []\n\tfor test_path in work_dict:\n\t\tprint (\"MSG: Testing GenerateMakfile on:\", test_path)\n\t\twork_dir = os.path.abspath(os.path.join(testsuite_dir, test_path))\n\t\tresult = callGenerateMakefile(work_dir, work_dict[test_path])\n\t\tif result != 'PASS':\n\t\t\tr += 1\n\t\telse:\n\t\t\tresult = callLLVMInterpreter(work_dir, work_dict[test_path])\n\t\t\tif result != 'PASS':\n\t\t\t\tr += 1\n\t\trecord = {\"name\": test_path, \"result\": result}\n\t\tresult_list.append(record)\n\n\treturn r, result_list\n\nif __name__ == \"__main__\":\n\tr, result_list = test_generate_makefile(*sys.argv[1:])\n\tprint (\"=============== Result ===============\")\n\tfor record in result_list:\n\t\tprint(record[\"name\"], \"\\t\\t\", record[\"result\"])\n\n\tsys.exit(r)" }, { "alpha_fraction": 0.44212964177131653, "alphanum_fraction": 0.4861111044883728, "avg_line_length": 13.896552085876465, "blob_id": "8951a4618638b3cd7fb54ba7caa9ae80c1b2ce82", "content_id": "f2b55b10631ff42967056b94a453a1188880bcb8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 432, "license_type": "permissive", "max_line_length": 34, "num_lines": 29, "path": "/sample_programs/fib/fib.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n\n\nint main (int argc, char **argv) {\n\t\n\tunsigned int fib0 = 0, fib1 = 1;\n\tunsigned int n = atoi(argv[1]);\n\tunsigned int result;\t\n\n\tif (n == 0) {\n\t\tresult = 0;\n\t} else if (n == 1) {\n\t\tresult = 1;\n\t} else {\n\t\tint i;\n\t\tfor (i = 0; i < n - 1; ++i) {\n\t\t\tif (i % 2 == 0) {\n\t\t\t\tfib0 += fib1;\n\t\t\t\tresult = fib0;\n\t\t\t} else {\n\t\t\t\tfib1 += fib0;\n\t\t\t\tresult = fib1;\n\t\t\t}\n\t\t}\n\t}\n\n\tprintf(\"%u\\n\", result);\n}\n" }, { "alpha_fraction": 0.5421686768531799, "alphanum_fraction": 0.5602409839630127, "avg_line_length": 10.928571701049805, "blob_id": "31ef67319887824a61fae5e4f21b3b93995751b2", "content_id": "d7aace89a8091bf6fb8fef95dca55c6b786c7cb1", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 166, "license_type": "permissive", "max_line_length": 20, "num_lines": 14, "path": "/test_suite/PROGRAMS/factorial/factorial.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include<stdio.h>\nmain(argc, argv)\nint argc;\nchar *argv[];\n{\nint i,fact, n;\nn = atoi(argv[1]);\nfact = 1;\nfor(i=1;i<=n;i++)\n{\nfact = fact * i;\n}\nprintf(\"%d\\n\",fact);\n}" }, { "alpha_fraction": 0.6827333569526672, "alphanum_fraction": 0.6851738691329956, "avg_line_length": 31.799999237060547, "blob_id": "8e4ebf8bd6fccdc1a2cdf8e8b5324699147961e2", "content_id": "904ec22d20bd5d7ff4408bf6dfb271d7f6dd866f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1639, "license_type": "permissive", "max_line_length": 118, "num_lines": 50, "path": "/web-app/server/preInstrument.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var fs = require('fs');\nvar readline = require('readline');\nvar LLFI_BUILD_ROOT = require('./utils/config').LLFI_BUILD_ROOT;\nvar execPromise = require('./utils/execPromise').execPromise;\n\n// Do a hardware and software auto scan, send the applicable injection types back to the client\nexports.processPreInstrument = function (req, res) {\n\tvar errorStatus = false;\n\tvar fileName = req.body.fileName;\n\n\t// Extract filename without extension\n\tfileName = fileName.replace(/\\.[^/.]+$/, \"\");\n\n\tvar cdDirCmd = \"cd ./uploads/\" + req.ip +\"/\";\n\n\tvar softwareFailureAutoScanCmd = LLFI_BUILD_ROOT + \"bin/SoftwareFailureAutoScan --no_input_yaml \" + fileName + \".ll\";\n\tvar commands = [];\n\n\tcommands.push(cdDirCmd + \" && \" + softwareFailureAutoScanCmd);\n\n\tvar softwareInjectionTypes = [];\n\n\t// Execute the auto scan script\n\tcommands.reduce(function(p, cmd) {\n\t\treturn p.then(function(results) {\n\t\t\treturn execPromise(cmd).then(function(stdout) {\n\t\t\t\tresults.push(stdout);\n\t\t\t\treturn results;\n\t\t\t});\n\t\t});\n\t}, Promise.resolve([])).then(function(results) {\n\t\t// Read software injection types\n\t\tvar softwareFile = \"./uploads/\" + req.ip +\"/llfi.applicable.software.failures.txt\";\n\t\tfs.readFileSync(softwareFile).toString().split('\\n').forEach(function (line) {\n\t\t\tif (line.includes(\"- \")) {\n\t\t\t\tvar injectionType = line.substring(line.indexOf(\"- \")+ 2);\n\t\t\t\tsoftwareInjectionTypes.push(injectionType);\n\t\t\t}\n\t\t});\n\t}, function(err) {\n\t\t// error here\n\t\tres.status(500);\n\t\tres.send({error: err});\n\t\tconsole.log(\"err in preInstrument process\", err);\n\t\terrorStatus = true;\n\t}).then(function(){\n\t\tif (errorStatus) return;\n\t\tres.send(softwareInjectionTypes);\n\t});\n};" }, { "alpha_fraction": 0.731249988079071, "alphanum_fraction": 0.731249988079071, "avg_line_length": 23.69230842590332, "blob_id": "e785bff3535560594e3c0bae7acc0a8731d44986", "content_id": "c7310e573eea307457e08d1b6b9f0580f275fb6f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 320, "license_type": "permissive", "max_line_length": 62, "num_lines": 13, "path": "/web-app/views/src/js/stores/errorLogStore.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\nvar errorLogActions = require(\"./../actions/errorLogActions\");\nvar errorLog = \"\";\nvar errorLogStore = Reflux.createStore({\n\tlistenables: [errorLogActions],\n\n\tonUpdateErrorLog: function(errorLogs) {\n\t\terrorLog = errorLogs;\n\t\tthis.trigger(errorLog);\n\t},\n});\n\nmodule.exports = errorLogStore;" }, { "alpha_fraction": 0.7040677070617676, "alphanum_fraction": 0.7095277309417725, "avg_line_length": 32.30908966064453, "blob_id": "6f3161c742232a78a51f0c6ba12890064b5cb11b", "content_id": "165dbeb4e4a07601cbfe3f1dd0afe5be288a661f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3663, "license_type": "permissive", "max_line_length": 349, "num_lines": 110, "path": "/bin/batchInjectfault.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n\"\"\"\n\n%(prog)s is a wrapper for LLFI profile command to run profile command through all the work directories generated by \\'batchInstrument\\'. Each work directory should have an input.yaml file which only contains one software failure model defined. All the software failure modes should be defined in the master input.yaml under current (base) directory.\n\nUsage: %(prog)s <source IR file> <arguemnts>\n\nPrerequisite:\nYou need to run \\'batchInstrument\\' first, then run %(prog)s under the same directory, the directory that contains multiple sub directories for different software faults. Same as \\'batchInstrument\\', %(prog)s is only applicable when multiple software failure modes are defined in input.yaml.\n\"\"\"\n\nimport sys, os, shutil\nimport yaml\nimport subprocess\n\nprog = os.path.basename(sys.argv[0])\nscript_path = os.path.realpath(os.path.dirname(__file__))\nsys.path.append(os.path.join(script_path, '../config'))\nimport llvm_paths\n\ninjectfault_script = os.path.join(script_path, 'injectfault')\n# basedir and options are assigned in parseArgs(args)\nbasedir = \"\"\noptions = []\n\ndef parseArgs(args):\n\tglobal basedir\n\tglobal options\n\tcwd = os.getcwd()\n\tfor arg in args:\n\t\toption = arg\n\t\tif os.path.isfile(arg):\n\t\t\tbasedir = os.path.realpath(os.path.dirname(arg))\n\t\t\toption = os.path.basename(arg)\n\t\toptions.append(option)\n\tos.chdir(basedir)\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = -1\n msg = \"ERROR: \" + msg\n print(msg, file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(retval)\n\ndef phraseMasterYaml():\n\tmaster_yaml_dict = {}\n\tmodel_list = []\n\ttry:\n\t\twith open('input.yaml', 'r') as master_yaml_file:\n\t\t\tmaster_yaml_dict = yaml.load(master_yaml_file)\n\texcept:\n\t\tprint (\"ERROR: Unable to find input.yaml or load the input.yaml under current directory\")\n\t\tprint (basedir)\n\t\tsys.exit(-1)\n\ttry:\n\t\tmodel_list = list(master_yaml_dict['compileOption']['instSelMethod'][0]['customInstselector']['include'])\n\texcept:\n\t\tprint (\"ERROR: this wrapper script is not applicable on the input.yaml under current directory. Please note this script is only applicable on input.yaml files with multiple software failure models defined.\")\n\t\tprint (basedir)\n\t\tsys.exit(-1)\n\treturn master_yaml_dict, model_list\n\ndef callInjectfault(model_list, *argv):\n\tnum_failed = 0\n\tfor model in model_list:\n\t\tworkdir = os.path.join(basedir, \"llfi-\"+model)\n\t\ttry:\n\t\t\tos.chdir(workdir)\n\t\texcept:\n\t\t\tprint (\"ERROR: Unable to change to directory:\", workdir)\n\t\t\tsys.exit(-1)\n\t\tfaultinjection_exe_name = argv[0]\n\t\tif faultinjection_exe_name.endswith('.ll'):\n\t\t\tfaultinjection_exe_name = faultinjection_exe_name.split('.ll')[0]\n\t\telif faultinjection_exe_name.endswith('.bc'):\n\t\t\tfaultinjection_exe_name = faultinjection_exe_name.split('.bc')[0]\n\t\tfaultinjection_exe_name = faultinjection_exe_name + '-faultinjection.exe'\n\t\tcommand = [injectfault_script]\n\t\tcommand.extend(['./llfi/'+faultinjection_exe_name])\n\t\tcommand.extend(argv[1:])\n\t\tprint (\"\\nRun injectfault command:\", ' '.join(command))\n\t\ttry:\n\t\t\to = subprocess.check_output(command, stderr=sys.stderr)\n\t\texcept subprocess.CalledProcessError:\n\t\t\tprint (\"injectfault:\", model, \" failed!\")\n\t\t\tnum_failed += 1\n\t\telse:\n\t\t\tprint (o.decode())\n\t\t\tprint (\"injectfault:\", model, \" succeed!\")\n\t\tos.chdir(basedir)\n\treturn num_failed\n\ndef main(*argv):\n\tglobal options\n\tparseArgs(argv)\n\tmaster_yaml_dict, model_list = phraseMasterYaml()\n\tr = callInjectfault(model_list, *options)\n\treturn r\n\nif __name__ == \"__main__\":\n\tif len(sys.argv[1:]) < 1 or sys.argv[1] == '--help' or sys.argv[1] == '-h':\n\t\tusage()\n\t\tsys.exit(0)\n\telse:\n\t\targv = sys.argv[1:]\n\tr = main(*argv)\n\tsys.exit(r)" }, { "alpha_fraction": 0.6477611660957336, "alphanum_fraction": 0.6529850959777832, "avg_line_length": 24.788461685180664, "blob_id": "0272427fa82cc51859222553958e0518c2aa12fb", "content_id": "aa1766d3133343feb4816af8cfbf877f763fab93", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1340, "license_type": "permissive", "max_line_length": 89, "num_lines": 52, "path": "/test_suite/SCRIPTS/build_prog.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport yaml\nimport subprocess\n\ndef build_prog(*prog_list):\n\tr = 0\n\tsuite = {}\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\ttestsuite_dir = os.path.join(script_dir, os.pardir)\n\twith open(os.path.join(testsuite_dir, \"test_suite.yaml\")) as f:\n\t\ttry:\n\t\t\tsuite = yaml.load(f)\n\t\texcept:\n\t\t\tprint(\"ERROR: Unable to load yaml file: test_suite.yaml\", file=sys.stderr)\n\t\t\treturn -1\n\n\tif len(prog_list) == 0:\n\t\t## build all programs\n\t\tcwd = os.path.abspath(os.path.curdir)\n\t\tos.chdir(os.path.join(testsuite_dir, \"PROGRAMS\"))\n\t\tp = subprocess.Popen([\"make\"])\n\t\tp.wait()\n\t\tif p.returncode != 0:\n\t\t\tprint (\"ERROR: Failed in building all programs\\n\", file=sys.stderr)\n\t\t\tr = p.returncode\n\t\tos.chdir(cwd)\n\telse:\n\t\tcwd = os.path.abspath(os.path.curdir)\n\t\tfor prog in prog_list:\n\t\t\t## build prog\n\t\t\tif prog not in list(suite[\"PROGRAMS\"].keys()):\n\t\t\t\tprint(\"WARNING: program:\", prog, \"not defined in test_suite.yaml\\n\", file=sys.stderr)\n\t\t\t\tcontinue\n\t\t\tpd = os.path.join(testsuite_dir, \"PROGRAMS\", prog)\n\t\t\tos.chdir(pd)\n\t\t\tp = subprocess.Popen([\"make\"])\n\t\t\tp.wait()\n\t\t\tif p.returncode != 0:\n\t\t\t\tprint (\"ERROR: Failed in building program:\", prog, file=sys.stderr)\n\t\t\t\tr = p.returncode\n\t\t\n\t\tos.chdir(cwd)\n\n\treturn r\n\nif __name__ == \"__main__\":\n\tr = build_prog(*sys.argv[1:])\n\tsys.exit(r)" }, { "alpha_fraction": 0.6763884425163269, "alphanum_fraction": 0.6802994608879089, "avg_line_length": 31.07526969909668, "blob_id": "4c95773d5db48baa2590e3bf96cd52a9b87efb15", "content_id": "926654c323f0415b56d0e641e0d8324ab1c38cbf", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8949, "license_type": "permissive", "max_line_length": 136, "num_lines": 279, "path": "/test_suite/SCRIPTS/llfi_test.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n\"\"\"\n\n%(prog)s is a test suite driver script to run all the steps of LLFI regression test. \n\nUsage: %(prog)s [OPTIONS]\n\nList of options:\n\n--threads <number of threads to use>: number of threads to be used for fault injections, default value: 1.\n--all: Test all the test cases of LLFI test suite, including fault injection tests, trace analysis tests and make file generation tests.\n--all_fault_injections: Test all the test cases of fault injections, including HardwareFaults, SoftwareFaults and BatchMode tests.\n--all_software_faults: Test all the test cases of SoftwareFaults.\n--all_hardware_faults: Test all the test cases of HardwareFaults.\n--all_batchmode: Test all the test cases of BatchMode fault injections.\n--all_trace_tools_tests: Test all the tests for trace analysis tools.\n--all_makefile_generation: Test all the tests for makefile generation script.\n--test_cases [test case names]: Test only specified test case.\n--clean_after_test: Clean all the generate files after testing.\n\n--verbose: Show verbose information\n--help(-h): Show help information\n\n\"\"\"\n\nimport sys\nimport os\nimport subprocess\nimport time\n\noptions = {\n\t'all':False,\n\t'all_fault_injections':False,\n\t'all_software_faults':False,\n\t'all_hardware_faults':False,\n\t'all_batchmode':False,\n\t'all_trace_tools_tests':False,\n\t'all_makefile_generation':False,\n\t'test_cases':[],\n\t'threads':1,\n\t'clean_after_test':False,\n}\n\nprog = os.path.basename(sys.argv[0])\nverbose = False\n\ndef verbosePrint(msg):\n\tglobal verbose\n\tif verbose:\n\t\tprint(msg)\n\ndef usage(msg = None):\n\tretval = 0\n\tif msg is not None:\n\t\tretval = 1\n\t\tmsg = \"ERROR: \" + msg\n\t\tprint(msg, file=sys.stderr)\n\tprint(__doc__ % globals(), file=sys.stderr)\n\tsys.exit(retval)\n\ndef parseArgs(args):\n\tglobal options\n\tglobal verbose\n\targid = 0\n\twhile argid < len(args):\n\t\targ = args[argid]\n\t\t\n\t\tif arg == \"--all\":\n\t\t\toptions['all'] = True\n\t\t\t\n\t\telif arg == \"--all_fault_injections\":\n\t\t\toptions['all_fault_injections'] = True\n\t\t\t\n\t\telif arg == \"--all_software_faults\":\n\t\t\toptions['all_software_faults'] = True\n\t\t\t\n\t\telif arg == \"--all_hardware_faults\":\n\t\t\toptions['all_hardware_faults'] = True\n\t\t\t\n\t\telif arg == \"--all_batchmode\":\n\t\t\toptions['all_batchmode'] = True\n\t\t\t\n\t\telif arg == \"--test_cases\":\n\t\t\targid += 1\n\t\t\twhile(argid < len(args) and args[argid][0] != '-'):\n\t\t\t\toptions['test_cases'].append(str(args[argid]))\n\t\t\t\targid += 1\n\n\t\telif arg == \"--threads\":\n\t\t\targid += 1\n\t\t\toptions['threads'] = int(args[argid])\n\n\t\telif arg == \"--all_trace_tools_tests\":\n\t\t\toptions['all_trace_tools_tests'] = True\n\n\t\telif arg == \"--all_makefile_generation\":\n\t\t\toptions['all_makefile_generation'] = True\n\n\t\telif arg == \"--clean_after_test\":\n\t\t\toptions['clean_after_test'] = True\n\n\t\telif arg == \"--help\" or arg == \"-h\":\n\t\t\tusage()\n\n\t\telif arg == \"--verbose\":\n\t\t\tverbose = True\n\n\t\targid += 1\n\ndef startTestRoutine():\n\tglobal options\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\tsys.path.append(script_dir)\n\tbuild_prog_script = os.path.join(script_dir, 'build_prog.py')\n\tdeploy_prog_script = os.path.join(script_dir, 'deploy_prog.py')\n\tinject_prog_script = os.path.join(script_dir, 'inject_prog.py')\n\tcheck_injection_script = os.path.join(script_dir, 'check_injection.py')\n\ttest_trace_tools_script = os.path.join(script_dir, 'test_trace_tools.py')\n\ttest_generate_makefile_script = os.path.join(script_dir, 'test_generate_makefile_script.py')\n\tclear_all_script = os.path.join(script_dir, 'clear_all.py')\n\n\tinjection_result_list = []\n\ttrace_result_list = []\n\tgenerate_makefile_result_list = []\n\n\tif options['all'] or options['all_batchmode'] or options['all_hardware_faults']\\\n\tor options['all_software_faults'] or options['all_fault_injections']\\\n\tor options['test_cases'] != []:\n\t\t## build all the test program\n\t\texeclist = ['python3', '-u', build_prog_script]\n\t\tverbosePrint(' '.join(execlist))\n\t\tp = subprocess.Popen(execlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tp.wait()\n\t\tr = p.returncode\n\t\tif r != 0:\n\t\t\tprint(\"ERROR: Failed in building all test programs\")\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint(\"Build test programs successfully.\")\n\t\n\t\t## deploy programs\n\t\texeclist = ['python3', '-u', deploy_prog_script]\n\t\tif options['all_batchmode']:\n\t\t\texeclist.append('BatchMode')\n\t\telif options['all_software_faults']:\n\t\t\texeclist.append('SoftwareFaults')\n\t\telif options['all_hardware_faults']:\n\t\t\texeclist.append('HardwareFaults')\n\t\telif options['test_cases'] != []:\n\t\t\texeclist.extend(options['test_cases'])\n\t\telif options['all'] or options['all_fault_injections']:\n\t\t\tpass\n\t\tverbosePrint(' '.join(execlist))\n\t\tp = subprocess.Popen(execlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tp.wait()\n\t\tr = p.returncode\n\t\tif r != 0:\n\t\t\tprint(\"ERROR: Failed in deploy test programs\")\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint(\"Deploy test programs successfully.\")\n\n\t\t## start fault injection\n\t\texeclist = ['python3', '-u', inject_prog_script, str(options['threads'])]\n\t\tif options['all_batchmode']:\n\t\t\texeclist.append('BatchMode')\n\t\telif options['all_software_faults']:\n\t\t\texeclist.append('SoftwareFaults')\n\t\telif options['all_hardware_faults']:\n\t\t\texeclist.append('HardwareFaults')\n\t\telif options['test_cases'] != []:\n\t\t\texeclist.extend(options['test_cases'])\n\t\telif options['all'] or options['all_fault_injections']:\n\t\t\tpass\n\t\tverbosePrint(' '.join(execlist))\n\t\tp = subprocess.Popen(execlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tp.wait()\n\t\tr = p.returncode\n\t\tif r != 0:\n\t\t\tprint(\"WARNING: error occurs during fault injection. Continue on testing.\")\n\t\telse:\n\t\t\tprint(\"Fault injection ends normally.\")\n\n\t\t## check the injection\n\t\timport check_injection\n\t\tprog_list = []\n\t\tif options['all_batchmode']:\n\t\t\tprog_list.append('BatchMode')\n\t\telif options['all_software_faults']:\n\t\t\tprog_list.append('SoftwareFaults')\n\t\telif options['all_hardware_faults']:\n\t\t\tprog_list.append('HardwareFaults')\n\t\telif options['test_cases'] != []:\n\t\t\tprog_list.extend(options['test_cases'])\n\t\telif options['all'] or options['all_fault_injections']:\n\t\t\tpass\n\t\tverbosePrint('Calling: check_injection.check_injection(' + ' '.join(prog_list) + ')')\n\t\tcheck_injection_returncode, injection_result_list = check_injection.check_injection(*prog_list)\n\n\t## run trace tools's tests\n\tif options['all_trace_tools_tests'] or options['all'] or options['test_cases'] != []:\n\t\timport test_trace_tools\n\t\tprog_list = []\n\t\tif options['test_cases'] != []:\n\t\t\tprog_list.extend(options['test_cases'])\n\t\telif options['all_trace_tools_tests'] or options['all']:\n\t\t\tpass\n\t\tverbosePrint('Calling: test_trace_tools.test_trace_tools(' + ' '.join(prog_list) + ')')\n\t\ttest_trace_tools_returncode, trace_result_list = test_trace_tools.test_trace_tools(*prog_list)\n\n\t## run MakefileGeneration tests\n\tif options['all_makefile_generation'] or options['all'] or options['test_cases'] != []:\n\t\timport test_generate_makefile\n\t\tprog_list = []\n\t\tif options['test_cases'] != []:\n\t\t\tprog_list.extend(options['test_cases'])\n\t\telif options['all_makefile_generation'] or options['all']:\n\t\t\tpass\n\t\tverbosePrint('Calling: test_generate_makefile.test_generate_makefile(' + ' '.join(prog_list) + ')')\n\t\ttest_generate_makefile_returncode, generate_makefile_result_list = test_generate_makefile.test_generate_makefile(*prog_list)\n\n\t## collect the results\n\ttotal = 0\n\tpassed = 0\n\tif len(injection_result_list) > 0:\n\t\tprint (\"==== Check Injection Result ====\")\n\t\tfor record in injection_result_list:\n\t\t\tprint(record[\"name\"], \"\\t\\t\", record[\"result\"])\n\t\t\ttotal += 1\n\t\t\tif record['result'] == 'PASS':\n\t\t\t\tpassed += 1\n\tif len(trace_result_list) > 0:\n\t\tprint (\"==== Test Trace Tools Result ====\")\n\t\tfor record in trace_result_list:\n\t\t\tprint(record[\"name\"], \"\\t\\t\", record[\"result\"])\n\t\t\ttotal += 1\n\t\t\tif record['result'] == 'PASS':\n\t\t\t\tpassed += 1\n\n\tif len(generate_makefile_result_list) > 0:\n\t\tprint(\"==== Test MakefileGeneration Tool Result ====\")\n\t\tfor record in generate_makefile_result_list:\n\t\t\tprint(record[\"name\"], '\\t\\t', record[\"result\"])\n\t\t\ttotal += 1\n\t\t\tif record['result'] == 'PASS':\n\t\t\t\tpassed += 1\n\n\tprint(\"=== Overall Counts ====\")\n\tprint(\"Total tests:\\t\", total)\n\tprint(\"Passed tests:\\t\", passed)\n\tprint(\"Failed tests:\\t\", total - passed)\n\n\tif options['clean_after_test']:\n\t\texeclist = ['python3', '-u', clear_all_script]\n\t\tverbosePrint(' '.join(execlist))\n\t\tp = subprocess.Popen(execlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tp.wait()\n\t\tos.chdir(os.path.join(script_dir, os.pardir, 'PROGRAMS'))\n\t\tos.system('make clean')\n\t\tdirs = [d for d in os.listdir(os.path.join(script_dir, os.pardir, 'MakefileGeneration')) \n\t\tif os.path.isdir(os.path.join(script_dir, os.pardir, 'MakefileGeneration',d))]\n\t\tprint(dirs)\n\t\tfor d in dirs:\n\t\t\tp = os.path.join(script_dir, os.pardir, 'MakefileGeneration', d)\n\t\t\tos.chdir(p)\n\t\t\tos.system('make clean')\n\n\n\treturn 0\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) == 1:\n\t\tusage()\n\tparseArgs(sys.argv[1:])\n\tprint(\"Tests Start on: \", time.ctime())\n\tr = startTestRoutine()\n\tprint(\"Tests Ends on: \", time.ctime())\n\tsys.exit(r)\n" }, { "alpha_fraction": 0.7323688864707947, "alphanum_fraction": 0.7323688864707947, "avg_line_length": 21.1200008392334, "blob_id": "647c839b5578a3a2ac41495f6462f95b300d615a", "content_id": "84265c01cce35ee0fc01bc0c2f99990c9e052776", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 553, "license_type": "permissive", "max_line_length": 74, "num_lines": 25, "path": "/llvm_passes/SampleFIRegSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Value.h\"\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Constants.h\"\n\n#include \"FIRegSelector.h\"\n#include \"FICustomSelectorManager.h\"\n\nnamespace llfi {\n\n/**\n * This sample register selector only selects constant int as target\n */\nclass SampleFIRegSelector: public HardwareFIRegSelector {\n private:\n virtual bool isRegofInstFITarget(Value *reg, Instruction *inst) {\n if (isa<ConstantInt>(reg))\n return true;\n else\n return false;\n }\n};\n\nstatic RegisterFIRegSelector X(\"onlyconstint\", new SampleFIRegSelector());\n\n}\n" }, { "alpha_fraction": 0.7402666807174683, "alphanum_fraction": 0.7434666752815247, "avg_line_length": 43.117645263671875, "blob_id": "71214ead06de5602c8425e605297197367705bfc", "content_id": "7f2d4208126be36d816ac91b3687abc982a2c4e8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7500, "license_type": "permissive", "max_line_length": 179, "num_lines": 170, "path": "/tools/tracetodot.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n# tracetodot.py\n# Author: Yilun Song\n# This python script is part of the LLFI tracing system\n# This script generates all intermediate and final trace files needed to view trace propogation flows.\n# Requires: Please run this scripts in the folder that contains the llfi trace files e.g. the llfi_stat_output folder by default\n# Output: Generate trace different report files and its .dot files to the folder trace_report_output\n\n\n\n\"\"\"\n\n%(prog)s needs to be called in the folder that contains the llfi trace files (e.g. /llfi_stat_output)\n\nThe trace different report files and .dot files will be generated to the folder trace_report_output in parallel with the folder llfi_stat_output\n\nUsage: %(prog)s [OPTIONS]\n\nList of options:\n\n--help(-h): Show help information\n\n\"\"\"\n\n\nimport sys, os\nimport subprocess\nimport shlex\n\nprog = os.path.basename(sys.argv[0])\n\n\n\ndef parseArgs(args):\n argid = 0\n while argid < len(args):\n arg = args[argid]\n if arg.startswith(\"-\"):\n if arg == \"--help\" or arg == \"-h\":\n usage()\n else:\n usage(\"Invalid argument: \" + arg)\n argid += 1\n\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = 1\n msg = \"ERROR: \" + msg\n print(msg, file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(retval)\n\n\ndef findPath():\n\tglobal currentpath, scriptdir\n\n\tcurrentpath = os.getcwd()\n\t#print (currentpath)\n\n\tscriptdir = os.path.dirname(os.path.abspath(__file__))\n\n\n\ndef makeTraceOutputFolder():\n\tglobal traceOutputFolder, goldenTraceFilePath\n\ttraceOutputFolder = os.path.abspath(os.path.join(currentpath, \"../trace_report_output\"))\n\t#print (traceOutputFolder)\n\tgoldenTraceFilePath = os.path.abspath(os.path.join(currentpath, \"../baseline/llfi.stat.trace.prof.txt\"))\n\tif not os.path.exists(traceOutputFolder):\n\t\tos.makedirs(traceOutputFolder)\n\telse:\n\t\t# Remove the contents in traceOutputFolder\n\t\tfor f in os.listdir(traceOutputFolder):\n\t\t\tfile_path = os.path.join(traceOutputFolder,f)\n\t\t\tif os.path.isfile(file_path):\n\t\t\t\tos.unlink(file_path)\n\tif not os.path.isfile(goldenTraceFilePath):\n\t\tprint (\"Cannot find golden Trace File 'llfi.stat.trace.prof.txt'\")\n\n\n\ndef executeTraceDiff():\n\ttraceFileCount = 0\n\tlog_path =os.path.abspath(os.path.join(traceOutputFolder, \"stderr_log.txt\"))\n\tlog_file =open(log_path ,'w')\n\t#Parse the goldenTraceFile path\n\ttempgoldenTraceFilePath = goldenTraceFilePath\n\twhile \"(\" in tempgoldenTraceFilePath and not \"\\(\" in tempgoldenTraceFilePath:\n\t\ttempgoldenTraceFilePath = tempgoldenTraceFilePath[:tempgoldenTraceFilePath.find(\"(\")]+'\\('+ tempgoldenTraceFilePath[tempgoldenTraceFilePath.find(\"(\")+1:]\n\twhile \")\" in tempgoldenTraceFilePath and not \"\\)\" in tempgoldenTraceFilePath:\n\t\ttempgoldenTraceFilePath = tempgoldenTraceFilePath[:tempgoldenTraceFilePath.find(\")\")]+'\\)'+ tempgoldenTraceFilePath[tempgoldenTraceFilePath.find(\")\")+1:]\n\ttempScriptdir = scriptdir\n\t#Parse the scriptdir path\n\twhile \"(\" in tempScriptdir and not \"\\(\" in tempScriptdir:\n\t\ttempScriptdir = tempScriptdir[:tempScriptdir.find(\"(\")]+'\\('+ tempScriptdir[tempScriptdir.find(\"(\")+1:]\n\twhile \")\" in tempScriptdir and not \"\\)\" in tempScriptdir:\n\t\ttempScriptdir = tempScriptdir[:tempScriptdir.find(\")\")]+'\\)'+ tempScriptdir[tempScriptdir.find(\")\")+1:]\n\ttemptraceOutputFolder = traceOutputFolder\n\t#Parse the traceOutputFolder path\n\twhile \"(\" in temptraceOutputFolder and not \"\\(\" in temptraceOutputFolder:\n\t\ttemptraceOutputFolder = temptraceOutputFolder[:temptraceOutputFolder.find(\"(\")]+'\\('+ temptraceOutputFolder[temptraceOutputFolder.find(\"(\")+1:]\n\twhile \")\" in temptraceOutputFolder and not \"\\)\" in temptraceOutputFolder:\n\t\ttemptraceOutputFolder = temptraceOutputFolder[:temptraceOutputFolder.find(\")\")]+'\\)'+ temptraceOutputFolder[temptraceOutputFolder.find(\")\")+1:]\n\tfor file in os.listdir(currentpath):\n\t\tif file.endswith(\".txt\") and file.startswith(\"llfi.stat.trace.\"):\n\t\t\tcmd = tempScriptdir+\"/tracediff \"+tempgoldenTraceFilePath+\" \"+file+\" > \"+temptraceOutputFolder+\"/TraceDiffReportFile\"+file[file.find(\"llfi.stat.trace\")+len(\"llfi.stat.trace\"):]\n\t\t\tp =subprocess.call(cmd,shell=True,stderr=log_file)\n\t\t\ttraceFileCount += 1\n\t#Check if trace files present, if not show error messages\n\tif not traceFileCount > 0:\n\t\tprint (\"Cannot find Trace input files.\")\n\t\tprint (\"Please make sure you are running this script in the llfi_stat_output folder\")\n\ndef generateDotFile():\n\tlog_path =os.path.abspath(os.path.join(traceOutputFolder, \"stderr_log.txt\"))\n\tlog_file =open(log_path ,'a')\n\tgoldenTraceDotFile = os.path.abspath(os.path.join(currentpath, \"../../../llfi.stat.graph.dot\"))\n\tif not os.path.isfile(goldenTraceDotFile):\n\t\tgoldenTraceDotFile = os.path.abspath(os.path.join(currentpath, \"../../llfi.stat.graph.dot\"))\n\t\tif not os.path.isfile(goldenTraceDotFile):\n\t\t\tprint (\"Cannot find golden Trace Dot File 'llfi.stat.graph.dot'\")\n\n\t#Parse the goldenTraceFile path\n\ttempgoldenTraceFilePath = goldenTraceFilePath\n\twhile \"(\" in tempgoldenTraceFilePath and not \"\\(\" in tempgoldenTraceFilePath:\n\t\ttempgoldenTraceFilePath = tempgoldenTraceFilePath[:tempgoldenTraceFilePath.find(\"(\")]+'\\('+ tempgoldenTraceFilePath[tempgoldenTraceFilePath.find(\"(\")+1:]\n\twhile \")\" in tempgoldenTraceFilePath and not \"\\)\" in tempgoldenTraceFilePath:\n\t\ttempgoldenTraceFilePath = tempgoldenTraceFilePath[:tempgoldenTraceFilePath.find(\")\")]+'\\)'+ tempgoldenTraceFilePath[tempgoldenTraceFilePath.find(\")\")+1:]\n\ttempScriptdir = scriptdir\n\t#Parse the scriptdir path\n\twhile \"(\" in tempScriptdir and not \"\\(\" in tempScriptdir:\n\t\ttempScriptdir = tempScriptdir[:tempScriptdir.find(\"(\")]+'\\('+ tempScriptdir[tempScriptdir.find(\"(\")+1:]\n\twhile \")\" in tempScriptdir and not \"\\)\" in tempScriptdir:\n\t\ttempScriptdir = tempScriptdir[:tempScriptdir.find(\")\")]+'\\)'+ tempScriptdir[tempScriptdir.find(\")\")+1:]\n\ttemptraceOutputFolder = traceOutputFolder\n\t#Parse the traceOutputFolder path\n\twhile \"(\" in temptraceOutputFolder and not \"\\(\" in temptraceOutputFolder:\n\t\ttemptraceOutputFolder = temptraceOutputFolder[:temptraceOutputFolder.find(\"(\")]+'\\('+ temptraceOutputFolder[temptraceOutputFolder.find(\"(\")+1:]\n\twhile \")\" in temptraceOutputFolder and not \"\\)\" in temptraceOutputFolder:\n\t\ttemptraceOutputFolder = temptraceOutputFolder[:temptraceOutputFolder.find(\")\")]+'\\)'+ temptraceOutputFolder[temptraceOutputFolder.find(\")\")+1:]\n\ttempgoldenTraceDotFile = goldenTraceDotFile\n\t#Parse the traceOutputFolder path\n\twhile \"(\" in tempgoldenTraceDotFile and not \"\\(\" in tempgoldenTraceDotFile:\n\t\ttempgoldenTraceDotFile = tempgoldenTraceDotFile[:tempgoldenTraceDotFile.find(\"(\")]+'\\('+ tempgoldenTraceDotFile[tempgoldenTraceDotFile.find(\"(\")+1:]\n\twhile \")\" in tempgoldenTraceDotFile and not \"\\)\" in tempgoldenTraceDotFile:\n\t\ttempgoldenTraceDotFile = tempgoldenTraceDotFile[:tempgoldenTraceDotFile.find(\")\")]+'\\)'+ tempgoldenTraceDotFile[tempgoldenTraceDotFile.find(\")\")+1:]\n\n\n\tfor file in os.listdir(traceOutputFolder):\n\t\tif file.startswith(\"TraceDiffReportFile\"):\n\t\t\t# Parse the name\n\t\t\tname = file[file.find(\"TraceDiffReportFile\")+len(\"TraceDiffReportFile\"):]\n\t\t\tname = name.replace(\"txt\", \"dot\")\n\t\t\tcmd = tempScriptdir+\"/traceontograph \"+temptraceOutputFolder+\"/\"+file+\" \"+tempgoldenTraceDotFile+\" > \"+ temptraceOutputFolder+\"/TraceGraph\"+name\n\t\t\tp =subprocess.call(cmd,shell=True,stderr=log_file)\n\n\ndef main(args):\n\tglobal currentpath, scriptdir, traceOutputFolder, goldenTraceFilePath\n\tparseArgs(args)\n\tfindPath()\n\tmakeTraceOutputFolder()\n\texecuteTraceDiff()\n\tgenerateDotFile()\n\nif __name__==\"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.7265193462371826, "alphanum_fraction": 0.7274401187896729, "avg_line_length": 24.255813598632812, "blob_id": "83f64ce3e8a5b7fdc05ceaad13bef00767076db6", "content_id": "f701b366a8534b343cd22dbe57064bc909230af0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1086, "license_type": "permissive", "max_line_length": 75, "num_lines": 43, "path": "/llvm_passes/core/FIRegSelector.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FI_REG_SELECTOR_H\n#define FI_REG_SELECTOR_H\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Value.h\"\n#include \"Controller.h\"\n\n#include <set>\n#include <map>\n#include <list>\n#include <string>\n\nusing namespace llvm;\nnamespace llfi {\nclass FIRegSelector {\n public:\n void getFIInstRegMap(const std::set< Instruction* > *instset, \n std::map<Instruction*, std::list< int >* > *instregmap);\n virtual std::string getRegSelectorClass(){\n \treturn std::string(\"Unknown\");\n }\n\n private:\n virtual bool isRegofInstFITarget(Value *reg, Instruction *inst) = 0;\n virtual bool isRegofInstFITarget(Value* reg, Instruction* inst, int pos);\n // determine whether LLFI is able to inject into the specified reg or not\n bool isRegofInstInjectable(Value *reg, Instruction *inst);\n};\n\nclass SoftwareFIRegSelector: public FIRegSelector {\n\tvirtual std::string getRegSelectorClass(){\n\t\treturn std::string(\"SoftwareFault\");\n\t}\n};\n\nclass HardwareFIRegSelector: public FIRegSelector {\n\tvirtual std::string getRegSelectorClass(){\n\t\treturn std::string(\"HardwareFault\");\n\t}\n};\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.7015920281410217, "alphanum_fraction": 0.7175120115280151, "avg_line_length": 53.1224479675293, "blob_id": "1fe275e14632a035da143224de996263cb2e0b11", "content_id": "a05ca737f9060f1eed2ed37523690b068a687ddb", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2701, "license_type": "permissive", "max_line_length": 186, "num_lines": 49, "path": "/README.TXT", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "LLVM Fault Injector - LLFI \r\n Description: An LLVM Tool for fault injection, easily map between fault at IR and source level, configurable and extensible.\r\n\r\n======\r\nPre-requisites\r\n 1. CMake installed \r\n 2. LLVM version 3.4, built with CMake\r\n 3. Python 3\r\n 4. Python YAML library installed (PyYAML)\r\n 5. clang-3.4 ( frontend for llvm 3.4 )\r\n 6. 64 bit Machines with 64 bit Linux/OS X.\r\n\r\n======\r\nInstallation\r\n A. Install CMake, Python, PyYAML library\r\n\r\n B. Install llvm-3.4 and clang 3.4\r\n 1. Go to \"http://llvm.org/releases/download.html#3.4\" to download LLVM source code and clang source code/binaries for your system.\r\n 2. If building clang from source code, copy the source code under tools/. Access \"http://llvm.org/releases/3.4/docs/GettingStarted.html#installcf\" for instructions.\r\n 2. Build llvm-3.4 ***WITH CMAKE*** 'using flag -DLLVM_REQUIRES_RTTI=1'. Access \"http://llvm.org/docs/CMake.html\" for instructions.\r\n\t\r\n C. Build LLFI\r\n 1. Extract the code from LLFI archive (/LLFI)\r\n 2. Go to /LLFI directory and run './setup --help' to see how to build LLFI to a different directory\r\n\t\r\n D. Testing LLFI\r\n You can use example programs in /LLFI/test_suite/PROGRAMS/factorial to test LLFI.\r\n\r\n Example program: factorial\r\n 1. Copy test_suite/factorial/ to your project directory and change to that directory. \r\n 2. Build a single IR file with LLFI tool compiletoIR\r\n <LLFI_BUILD_ROOT>/tools/GenerateMakefile --readable -o factorial.ll --all\r\n 3. Instrument factorial with calls to LLFI libraries and create executables under llfi/\r\n <LLFI_BUILD_ROOT>/bin/instrument --readable factorial.ll\r\n 4. Run factorial executable with profiling functions instrumented\r\n <LLFI_BUILD_ROOT>/bin/profile llfi/factorial-profiling.exe 6\r\n In file llfi/baseline/golden_std_output, you should be able to see 720\r\n 5. Run factorial executable with fault injection functions instrumented\r\n <LLFI_BUILD_ROOT>/bin/injectfault llfi/factorial-faultinjection.exe 6\r\n You should be able to see result files in llfi/std_output/, fault injection stats in llfi/prog_output/, failure report (crash/hang) in llfi/error_output/\r\n\r\n For complete test of whole of LLFI, please use LLFI test suite and refer to wiki page: 'https://github.com/DependableSystemsLab/LLFI/wiki/Test-Suite-for-Regression-Test' for details.\r\n\r\n======\r\nRunning LLFI on your target applications\r\n You can follow the same flow as the Step D of Installation (Testing LLFI). For more details, you can follow the instructions on https://github.com/DependableSystemsLab/LLFI/wiki.\r\n\r\n======\t\t\r\nRead caveats.txt for caveats and known problems.\r\n" }, { "alpha_fraction": 0.6588306427001953, "alphanum_fraction": 0.6624472737312317, "avg_line_length": 28.122806549072266, "blob_id": "b397baf49bbea470d94c6ac9d0355a95890006b8", "content_id": "d5ed703eb4d0167f2d588d537f92823dca0fe16e", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1659, "license_type": "permissive", "max_line_length": 116, "num_lines": 57, "path": "/runtime_lib/InjectorScanner.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <cstdlib>\n#include <vector>\n#include <string>\n#include \"FaultInjector.h\"\n#include \"FaultInjectorManager.h\"\n\nusing namespace std;\n\nint main(int argc, char* argv[]) {\n\tstring output_file_path(\"\");\n\tofstream output_file;\n\tfor(int i = 0; i < argc; i++){\n\t\tif(string(argv[i]) == string(\"-o\")){\n\t\t\toutput_file_path = string(argv[i+1]);\n\t\t}\n\t\t// cout << \"argv[\" << i << \"] = \" << argv[i] << endl;\n\t}\n\n\tif(output_file_path.length() != 0){\n\t\toutput_file.open(output_file_path.c_str());\n\t}\n\n\tFaultInjectorManager* faultinjectormanager = FaultInjectorManager::getFaultInjectorManager();\n\tvector<string> hardwarefaultinjectornames = faultinjectormanager->getInjectorNamesForType(string(\"HardwareFault\"));\n\tif(output_file.is_open()){\n\t\toutput_file << \"HardwareFaultInjector:\" << endl;\n\t}else{\n\t\tcout << \"HardwareFaultInjector:\" << endl;\n\t}\n\tfor(int i = 0; i<hardwarefaultinjectornames.size(); i++){\n\t\tif(output_file.is_open()){\n\t\t\toutput_file << \" - \" << hardwarefaultinjectornames[i] << endl;\n\t\t}else{\n\t\t\tcout << \" - \" << hardwarefaultinjectornames[i] << endl;\n\t\t}\n\t}\n\n\tvector<string> softwarefaultinjectornames = faultinjectormanager->getInjectorNamesForType(string(\"SoftwareFault\"));\n\tif(output_file.is_open()){\n\t\toutput_file << \"SoftwareFaultInjector:\" << endl;\n\t}else{\n\t\tcout << \"SoftwareFaultInjector:\" << endl;\n\t}\n\tfor(int i = 0; i<softwarefaultinjectornames.size(); i++){\n\t\tif(output_file.is_open()){\n\t\t\toutput_file << \" - \" << softwarefaultinjectornames[i] << endl;\n\t\t}else{\n\t\t\tcout << \" - \" << softwarefaultinjectornames[i] << endl;\n\t\t}\n\t}\n\n\tif(output_file.is_open())\toutput_file.close();\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.6603494882583618, "alphanum_fraction": 0.666010320186615, "avg_line_length": 34.64912414550781, "blob_id": "38469d79d6d5c36d046a41032b30f829628b5b4f", "content_id": "172faac01cd80e6bfa9e7fc02fa4869293193b62", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4063, "license_type": "permissive", "max_line_length": 184, "num_lines": 114, "path": "/web-app/server/traceGraph.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var fs = require('fs');\nvar readline = require('readline');\nvar LLFI_BUILD_ROOT = require('./utils/config').LLFI_BUILD_ROOT;\nvar execPromise = require('./utils/execPromise').execPromise;\n\nexports.processTrace = function (req, res) {\n\tvar errorStatus = false;\n\n\tvar traceRunIndex = req.body.selectedRunIndex;\n\n\t// If no trace is selected, end the resquest\n\tif (traceRunIndex.length <= 0) {\n\t\tres.status(500);\n\t\tres.send({error: \"No trace is selected\"});\n\t\treturn;\n\t}\n\n\tvar traceFolder = \"./uploads/\" + req.ip +\"/llfi/trace_report_output/\";\n\t// Make a dir to store the files from a client\n\tif (!fs.existsSync(traceFolder)) {\n\t\tfs.mkdirSync(traceFolder);\n\t}\n\n\tvar goldenFile = \"./llfi/baseline/llfi.stat.trace.prof.txt\";\n\tvar llfi_stat_output = \"./uploads/\" + req.ip +\"/llfi/llfi_stat_output/\";\n\tvar runNumbers = [];\n\tvar selectedTraceFileNames = [];\n\tvar traceDiffFileNames = [];\n\tvar commands = [];\n\tvar consoleLog = [];\n\tvar cdDirCmd = \"cd ./uploads/\" + req.ip +\"/\";\n\t// Get the number of runs in each run option\n\tfs.readdir(llfi_stat_output, (err, files) => {\n\t\tif (err) {\n\t\t\tres.status(500);\n\t\t\tres.send(err);\n\t\t\terrorStatus = true;\n\t\t\tconsole.log(\"err in file reading, \", err);\n\t\t}\n\t\tif (errorStatus) return;\n\t\t// Get the selected Trace file names\n\t\tvar currentRunOptionNumber = 0;\n\t\tvar currentRunNumberOffset = 0;\n\t\tfor (var i = 0; i < traceRunIndex.length; i++) {\n\t\t\tvar traceFileName = \"llfi.stat.trace.\" + traceRunIndex[i] + \".txt\";\n\t\t\tif (files.indexOf(traceFileName) > -1) {\n\t\t\t\tselectedTraceFileNames.push(\"llfi.stat.trace.\" + traceRunIndex[i] + \".txt\");\n\t\t\t}\n\t\t}\n\n\t\t// TraceDiff commands\n\t\tfor (var i = 0; i < selectedTraceFileNames.length; i++) {\n\t\t\tif (files.indexOf(selectedTraceFileNames[i]) > -1) {\n\t\t\t\tvar nameParser = selectedTraceFileNames[i].split(\"llfi.stat.trace.\")[1];\n\t\t\t\tvar runOption = parseInt(nameParser.split(\"-\")[0]);\n\t\t\t\tvar runNumber = nameParser.split(\"-\")[1];\n\t\t\t\trunNumber = parseInt(runNumber.split(\".txt\")[0]);\n\t\t\t\tvar tradeDiffFileName = \"TraceDiffReportFile.\" + runOption + \"-\" + runNumber + \".txt\";\n\t\t\t\ttraceDiffFileNames.push(tradeDiffFileName);\n\t\t\t\tvar tradeFile = \"./llfi/llfi_stat_output/\" + selectedTraceFileNames[i];\n\t\t\t\tvar command = LLFI_BUILD_ROOT + \"tools/tracediff \" + goldenFile + \" \" + tradeFile + \" > \" + \"./llfi/trace_report_output/\" + tradeDiffFileName;\n\n\t\t\t\tcommands.push(cdDirCmd + \" && \" + command);\n\t\t\t}\n\t\t}\n\n\t\t//Trace Union command\n\t\tvar traceUnionCmd = LLFI_BUILD_ROOT + \"tools/traceunion \";\n\t\tfor (var i = 0; i < traceDiffFileNames.length; i++) {\n\t\t\ttraceUnionCmd += \"./llfi/trace_report_output/\" + traceDiffFileNames[i] + \" \";\n\t\t}\n\t\ttraceUnionCmd += \"> ./llfi/trace_report_output/UnionedDiffReportFile.txt\";\n\t\tcommands.push(cdDirCmd + \" && \" + traceUnionCmd);\n\n\t\t// traceontograph command\n\t\tvar tracetoGraphCmd = LLFI_BUILD_ROOT + \"tools/traceontograph ./llfi/trace_report_output/UnionedDiffReportFile.txt ./llfi.stat.graph.dot > ./llfi/trace_report_output/TraceGraph.dot\";\n\t\tcommands.push(cdDirCmd + \" && \" + tracetoGraphCmd);\n\n\t\t//Covert dot file to pdf file\n\t\tvar traceCovertCmd = \"dot -Tpdf ./llfi/trace_report_output/TraceGraph.dot -o ./llfi/trace_report_output/TraceGraph.pdf\";\n\t\tcommands.push(cdDirCmd + \" && \" + traceCovertCmd);\n\n\t\t//Execute the commands\n\t\tcommands.reduce(function(p, cmd) {\n\t\t\treturn p.then(function(results) {\n\t\t\t\treturn execPromise(cmd).then(function(stdout) {\n\t\t\t\t\tresults.push(stdout);\n\t\t\t\t\tconsoleLog = results;\n\t\t\t\t\treturn results;\n\t\t\t\t}, function(err) {\n\t\t\t\t\tconsole.log(\"Trace onto graph err: \", err);\n\t\t\t\t});\n\t\t\t});\n\t\t}, Promise.resolve([])).then(function(results) {\n\t\t\tif (errorStatus) return;\n\t\t\tres.send({consoleLog: consoleLog});\n\t\t}, function(err) {\n\t\t\t// error here\n\t\t\tif (errorStatus) return;\n\t\t\tres.status(500);\n\t\t\tres.send({error: err});\n\t\t\tconsole.log(\"err in traceGraph process\", err);\n\t\t\terrorStatus = true;\n\t\t});\n\t});\n};\n\n// Parse the file data to get the value of a status\nvar getStatusValue = function (statusType, fileData) {\n\tvar keyword = statusType + \"=\";\n\tvar value = fileData.split(keyword)[1];\n\tvalue = value.split(\",\")[0];\n\treturn value;\n};" }, { "alpha_fraction": 0.689450204372406, "alphanum_fraction": 0.6926077008247375, "avg_line_length": 36.13103485107422, "blob_id": "e6a29eb219e6c250918147d6cd321a606fed0af4", "content_id": "4d5ffe589d841e670f7ff5716e4a3434784dd512", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5384, "license_type": "permissive", "max_line_length": 95, "num_lines": 145, "path": "/test_suite/SCRIPTS/test_trace_tools.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport yaml\nimport subprocess\n\ntracediff_script = \"\"\ntraceunion_script = \"\"\ntraceontograph_script = \"\"\ntracetodot_script = \"\"\n\ndef callTraceTools(work_dir, resources):\n\tglobal tracediff_script\n\tglobal traceunion_script\n\tglobal traceontograph_script\n\tglobal tracetodot_script\n\n\tgolden_trace = resources['trace_prof']\n\tgolden_trace_file = os.path.join(work_dir, golden_trace)\n\tif os.path.isfile(golden_trace_file) == False:\n\t\treturn (\"FAIL: golden_trace_file not found:\", golden_trace)\n\t## call tracediff to generate the trace report file\n\treports_list = []\n\tfor faulty_trace in resources[\"trace_inject\"]:\n\t\tfaulty_trace_file = os.path.join(work_dir, faulty_trace)\n\t\tif os.path.isfile(faulty_trace_file) == False:\n\t\t\tprint (\"WARNING: faulty_trace_file not found:\", faulty_trace, \"work_dir:\", work_dir)\n\t\t\tpass\n\t\telse:\n\t\t\treport_name = '.'.join(faulty_trace.split('.')[0:-1])+'.report.'+faulty_trace.split('.')[-1]\n\t\t\treport_file = os.path.join(work_dir, report_name)\n\t\t\tcommands = [tracediff_script, golden_trace_file, faulty_trace_file, '>', report_file]\n\t\t\tp = subprocess.Popen(' '.join(commands), shell=True)\n\t\t\tp.wait()\n\t\t\tif p.returncode != 0:\n\t\t\t\treturn (\"FAIL: \\'tracediff\\' quits unnormally!\")\n\t\t\tif os.path.isfile(report_file) == False:\n\t\t\t\treturn (\"FAIL: report_file not generated by \\'tracediff\\':\", report_name)\n\t\t\tif os.path.getsize(report_file) == 0:\n\t\t\t\treturn (\"FAIL: report_file generated by \\'tracediff\\' is empty:\", report_name)\n\t\t\treports_list.append(report_file)\n\n\t## call traceunion to generate a union of all reports\n\tunited_report_name = 'llfi.united.trace.report.txt'\n\tunited_report_file = os.path.join(work_dir, united_report_name)\n\tcommands = [traceunion_script]\n\tcommands.extend(reports_list)\n\tcommands.extend(['>', united_report_file])\n\tp = subprocess.Popen(' '.join(commands), shell=True)\n\tp.wait()\n\tif p.returncode != 0:\n\t\treturn (\"FAIL: \\'traceunion\\' quits unnormally!\")\n\tif os.path.isfile(report_file) == False:\n\t\treturn (\"FAIL: united report_file not generated by \\'traceunion\\':\", united_report_name)\n\tif os.path.getsize(report_file) == 0:\n\t\treturn (\"FAIL: united report_file generated by \\'traceunion\\' is empty:\", united_report_name)\n\n\t## call traceontograph to generate the dot file\n\tcdfg_prof_file = os.path.join(work_dir, resources['cdfg_prof'])\n\tif os.path.isfile(cdfg_prof_file) == False:\n\t\treturn (\"FAIL: cdfg_prof_file not found:\", resources['cdfg_prof'])\n\tcommands = [traceontograph_script, united_report_file, cdfg_prof_file, '>']\n\tcdfg_faulty_name = 'llfi.faulty.graph.dot'\n\tcdfg_faulty_file = os.path.join(work_dir, cdfg_faulty_name)\n\tcommands.append(cdfg_faulty_file)\n\tp = subprocess.Popen(' '.join(commands), shell=True)\n\tp.wait()\n\tif p.returncode != 0:\n\t\treturn (\"FAIL: \\'traceontograph\\' quits unnormally!\")\n\tif os.path.isfile(report_file) == False:\n\t\treturn (\"FAIL: cdfg_faulty_file not generated by \\'traceontograph\\':\", cdfg_faulty_name)\n\tif os.path.getsize(report_file) == 0:\n\t\treturn (\"FAIL: cdfg_faulty_file generated by \\'traceontograph\\' is empty:\", cdfg_faulty_name)\n\n\t## call tracetodot to generate all dot and report files\n\tcurrent_dir = os.path.abspath(os.path.curdir)\n\tllfi_stat_dir = os.path.join(work_dir, 'llfi', 'llfi_stat_output')\n\tos.chdir(llfi_stat_dir)\n\t#print (os.getcwd())\n\tp = subprocess.Popen(tracetodot_script, shell=True)\n\tp.wait()\n\tos.chdir(current_dir)\n\tif p.returncode != 0:\n\t\treturn (\"FAIL: \\'tracetodot\\' quits unnormally!\")\n\ttrace_dir = os.path.join(work_dir, 'llfi', 'trace_report_output')\n\t#print(trace_dir)\n\tif os.path.isdir(trace_dir) == False:\n\t\treturn (\"FAIL: trace_report_output/ not generated by\\'tracetodot\\'!\", work_dir)\n\tt = [f for f in os.listdir(trace_dir)]\n\tif len(t) < 2:\n\t\treturn (\"FAIL: dot/report files generated by\\'tracetodot\\' not complete!\", work_dir)\n\n\treturn \"PASS\"\n\n\n\ndef test_trace_tools(*test_list):\n\tglobal tracediff_script\n\tglobal traceunion_script\n\tglobal traceontograph_script\n\tglobal tracetodot_script\n\n\tr = 0\n\tsuite = {}\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\tllfi_tools_dir = os.path.join(script_dir, '../../tools')\n\ttracediff_script = os.path.join(llfi_tools_dir, \"tracediff\")\n\ttraceunion_script = os.path.join(llfi_tools_dir, \"traceunion\")\n\ttraceontograph_script = os.path.join(llfi_tools_dir, \"traceontograph\")\n\ttracetodot_script = os.path.join(llfi_tools_dir, \"tracetodot\")\n\t\n\ttestsuite_dir = os.path.join(script_dir, os.pardir)\n\twith open(os.path.join(testsuite_dir, \"test_suite.yaml\")) as f:\n\t\ttry:\n\t\t\tsuite = yaml.load(f)\n\t\texcept:\n\t\t\tprint(\"ERROR: Unable to load yaml file: test_suite.yaml\", file=sys.stderr)\n\t\t\treturn -1\n\n\twork_dict = {}\n\tfor test in suite[\"Traces\"]:\n\t\tif len(test_list) == 0 or test in test_list or \"all\" in test_list:\n\t\t\twork_dict[\"./Traces/\"+test] = suite[\"Traces\"][test]\n\n\tresult_list = []\n\tfor test_path in work_dict:\n\t\tprint (\"MSG: Testing on trace files of:\", test_path)\n\t\twork_dir = os.path.abspath(os.path.join(testsuite_dir, test_path))\n\t\tresult = callTraceTools(work_dir, work_dict[test_path])\n\t\tif result != 'PASS':\n\t\t\tr += 1\n\t\trecord = {\"name\": test_path, \"result\": result}\n\t\tresult_list.append(record)\n\n\treturn r, result_list\n\nif __name__ == \"__main__\":\n\tr, result_list = test_trace_tools(*sys.argv[1:])\n\tprint (\"=============== Result ===============\")\n\tfor record in result_list:\n\t\tprint(record[\"name\"], \"\\t\\t\", record[\"result\"])\n\n\tsys.exit(r)\n" }, { "alpha_fraction": 0.5550691485404968, "alphanum_fraction": 0.6239631175994873, "avg_line_length": 26.125, "blob_id": "334ef0397f39e08644c594452377a9c527af5133", "content_id": "4ffddb09b71de4108b19926d6d24eed1cbe72b27", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4340, "license_type": "permissive", "max_line_length": 136, "num_lines": 160, "path": "/test_suite/PROGRAMS/sudoku2/sudoku2.c", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/*The following program is an implementation of a Sudoku Solver in C.\nSudoku is a 9*9 grid in which each row,each column and each 3*3 grid contains all numbers from 1 to 9 only once.\nThe program uses backtracking approach to solve the sudoku. There is a recursive function to solve the sudoku.\n*/\n#include<stdio.h>\nint sudoku[9][9];//The array which stores entries for the sudoku\nvoid solvesudoku(int,int);\nint checkrow(int row,int num)\n{//This function checks whether we can put the number(num) in the row(row) of the Sudoku or not\nint column;\nfor(column=0;column<9;column++)\n if(sudoku[row][column]==num)\n return 0 ;//If the number is found already present at certain location we return zero\nreturn 1;//If the number is not found anywhere we return 1\n}\nint checkcolumn(int column,int num)\n{//This function checks whether we can put the number(num) in the column(column) of the Sudoku or not\nint row;\nfor(row=0;row<9;row++)\n if(sudoku[row][column]==num)\n return 0;//If the number is found already present at certain location we return zero\nreturn 1;//If the number is not found anywhere we return 1\n}\nint checkgrid(int row,int column,int num)\n{//This function checks whether we can put the number(num) in the 3*3 grid or not\n//We get the starting row and column for the 3*3 grid\nrow=(row/3)*3 ;\ncolumn=(column/3)*3;\nint r,c;\nfor(r=0;r<3;r++)\n for(c=0;c<3;c++)\n if(sudoku[row+r][column+c]==num)\n return 0;//If the number is found already present at certain location we return zero\nreturn 1;//If the number is not found anywhere we return 1\n}\nvoid navigate(int row,int column)\n{//Function to move to the next cell in case we have filled one cell\nif(column<8)\n solvesudoku(row,column+1);\n else\n solvesudoku(row+1,0);\n}\nvoid display()\n{//The function to display the solved Sudoku\nint row,column;\n\nfor(row=0;row<9;row++)\n{\nfor(column=0;column<9;column++)\nprintf(\"%d \",sudoku[row][column]);\n}\n}\nvoid solvesudoku(int row,int column)\n{\n if(row>8)//If the row number is greater than 8 than we have filled all cells hence we have solved the sudoku\n display();\n if(sudoku[row][column]!=0)\n navigate(row,column);//If the value filled at a cell is not zero than it is filled with some value from 0 to 9 hence we move further\n else\n {\n int ctr;//This is a counter to check numbers from 1 to 9 whether the number can be filled in the cell or not\n for(ctr=1;ctr<=9;ctr++)\n {//We check row,column and the grid\n if((checkrow(row,ctr)==1)&&(checkcolumn(column,ctr)==1)&&(checkgrid(row,column,ctr)==1))\n {\n sudoku[row][column]=ctr;\n navigate(row,column);\n }\n }\n sudoku[row][column]=0;//No valid number was found so we clean up and return to the caller.\n }\n \n}\nint main()\n{\nint row,column;\n\nsudoku[0][0] = 1;\nsudoku[0][1] = 0;\nsudoku[0][2] = 3;\nsudoku[0][3] = 4;\nsudoku[0][4] = 0;\nsudoku[0][5] = 0;\nsudoku[0][6] = 7;\nsudoku[0][7] = 0;\nsudoku[0][8] = 9;\nsudoku[1][0] = 0;\nsudoku[1][1] = 5;\nsudoku[1][2] = 6;\nsudoku[1][3] = 0;\nsudoku[1][4] = 8;\nsudoku[1][5] = 9;\nsudoku[1][6] = 0;\nsudoku[1][7] = 2;\nsudoku[1][8] = 3;\nsudoku[2][0] = 0;\nsudoku[2][1] = 8;\nsudoku[2][2] = 9;\nsudoku[2][3] = 1;\nsudoku[2][4] = 0;\nsudoku[2][5] = 3;\nsudoku[2][6] = 4;\nsudoku[2][7] = 0;\nsudoku[2][8] = 6;\nsudoku[3][0] = 2;\nsudoku[3][1] = 1;\nsudoku[3][2] = 4;\nsudoku[3][3] = 0;\nsudoku[3][4] = 6;\nsudoku[3][5] = 5;\nsudoku[3][6] = 0;\nsudoku[3][7] = 9;\nsudoku[3][8] = 7;\nsudoku[4][0] = 3;\nsudoku[4][1] = 0;\nsudoku[4][2] = 0;\nsudoku[4][3] = 8;\nsudoku[4][4] = 0;\nsudoku[4][5] = 7;\nsudoku[4][6] = 0;\nsudoku[4][7] = 1;\nsudoku[4][8] = 4;\nsudoku[5][0] = 8;\nsudoku[5][1] = 0;\nsudoku[5][2] = 7;\nsudoku[5][3] = 0;\nsudoku[5][4] = 1;\nsudoku[5][5] = 4;\nsudoku[5][6] = 0;\nsudoku[5][7] = 6;\nsudoku[5][8] = 5;\nsudoku[6][0] = 0;\nsudoku[6][1] = 3;\nsudoku[6][2] = 1;\nsudoku[6][3] = 0;\nsudoku[6][4] = 4;\nsudoku[6][5] = 0;\nsudoku[6][6] = 9;\nsudoku[6][7] = 7;\nsudoku[6][8] = 8;\nsudoku[7][0] = 6;\nsudoku[7][1] = 4;\nsudoku[7][2] = 0;\nsudoku[7][3] = 9;\nsudoku[7][4] = 7;\nsudoku[7][5] = 0;\nsudoku[7][6] = 5;\nsudoku[7][7] = 3;\nsudoku[7][8] = 1;\nsudoku[8][0] = 0;\nsudoku[8][1] = 7;\nsudoku[8][2] = 8;\nsudoku[8][3] = 0;\nsudoku[8][4] = 0;\nsudoku[8][5] = 1;\nsudoku[8][6] = 0;\nsudoku[8][7] = 4;\nsudoku[8][8] = 2;\nsolvesudoku(0,0);//We start solving the sudoku.\n}\n" }, { "alpha_fraction": 0.7007778882980347, "alphanum_fraction": 0.7007778882980347, "avg_line_length": 33.683719635009766, "blob_id": "6cf661f794fce601b933cc02128c3b01f3ddb487", "content_id": "f5efd5a8c541f8acf0d911ad302c2ccb82cdc4fd", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7456, "license_type": "permissive", "max_line_length": 157, "num_lines": 215, "path": "/web-app/views/src/js/components/mainWindow/functionTabs.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar Reflux = require(\"reflux\");\n\nvar InstrumentModal = require('./instrumentModal');\nvar RuntimeOptionModal = require('./runtimeOptionModal');\n\nvar targetFileNameStore = require(\"./../../stores/targetFileNameStore\");\nvar injectionModeStore = require(\"./../../stores/injectionModeStore\");\nvar runOptionsStore = require(\"./../../stores/runOptionsStore\");\nvar selectedTraceRunNumberStore = require(\"./../../stores/selectedTraceRunNumberStore\");\n\nvar fileUploadActions = require(\"./../../actions/fileUploadActions\");\nvar consoleLogActions = require(\"./../../actions/consoleLogActions\");\nvar errorLogActions = require(\"./../../actions/errorLogActions\");\nvar profilingStatusActions = require(\"./../../actions/profilingStatusActions\");\nvar faultInjectionStatusActions = require(\"./../../actions/faultInjectionStatusActions\");\nvar faultSummaryActions = require(\"./../../actions/faultSummaryActions\");\n\n\n\nvar FunctionTabs = React.createClass({\n\tmixins: [Reflux.connect(targetFileNameStore,\"fileName\"),\n\t\tReflux.connect(injectionModeStore,\"injectionMode\"),\n\t\tReflux.connect(runOptionsStore,\"runOptions\"),\n\t\tReflux.connect(selectedTraceRunNumberStore,\"selectedTraceRunNumber\")],\n\tgetInitialState: function() {\n\t\treturn {\n\t\t\tfileName: '',\n\t\t\tinjectionMode: {},\n\t\t\trunOptions: [],\n\t\t\tselectedTraceRunNumber: []\n\t\t};\n\t},\n\trender: function() {\n\t\treturn (\n\t\t\t<div class = \"functionTabs\">\n\t\t\t\t<div class=\"btn-toolbar\">\n\t\t\t\t\t<button id=\"compileToIRBtn\" class={\"btn \" + (this.state.fileName ? \"btn-primary\" : \"disabled\")} onClick={this.onCompileToIRClick}>Compile To IR</button>\n\t\t\t\t\t<button id=\"instrumentBtn\" class=\"btn disabled\" onClick={this.onInstrumentClick}>Instrument</button>\n\t\t\t\t\t<button id=\"profilingBtn\" class=\"btn disabled\" onClick={this.onProfilingClick}>Profiling</button>\n\t\t\t\t\t<button id=\"runtimeOptionBtn\" class=\"btn disabled\" onClick={this.onRuntimeOptionClick}>Runtime Options</button>\n\t\t\t\t\t<button id=\"injectFaultBtn\" class=\"btn disabled\" onClick={this.onFaultInjectionClick}>Inject Fault</button>\n\t\t\t\t\t<button id=\"traceGraphBtn\" class=\"btn disabled\" onClick={this.onGenerateTraceClick}>Trace Graph</button>\n\t\t\t\t</div>\n\t\t\t\t<InstrumentModal/>\n\t\t\t\t<RuntimeOptionModal/>\n\t\t\t</div>\n\t\t);\n\t},\n\t// CompileToIR\n\tonCompileToIRClick: function(event) {\n\t\tif ($(\"#\"+event.currentTarget.id).hasClass(\"disabled\")) {\n\t\t\treturn;\n\t\t}\n\t\tvar data = {};\n\t\tdata.fileName = this.state.fileName;\n\t\t$.ajax({\n\t\t\turl: '/compileToIR',\n\t\t\ttype: 'POST',\n\t\t\tdata: JSON.stringify(data),\n\t\t\tprocessData: false,\n\t\t\tcontentType: 'application/json',\n\t\t\tsuccess: function(data){\n\t\t\t\tvar consoleLog = data.consoleLog;\n\t\t\t\tvar files = data.files;\n\t\t\t\tconsoleLogActions.updateConsoleLog(consoleLog);\n\t\t\t\tfileUploadActions.addFiles(files);\n\t\t\t\twindow.alert(\"Your program is successfully compiled to the IR format\");\n\t\t\t},\n\t\t\terror: function (error) {\n\t\t\t\tif (error.responseJSON.error) {\n\t\t\t\t\terrorLogActions.updateErrorLog(error.responseJSON.error.cmd);\n\t\t\t\t}\n\t\t\t\tconsole.log(error);\n\t\t\t\twindow.alert(\"An error has occured in compileToIR, please refresh the page.\");\n\t\t\t}\n\t\t});\n\t\tthis.changeButtonStatus(event);\n\t},\n\tonInstrumentClick: function (event) {\n\t\tif ($(\"#\"+event.currentTarget.id).hasClass(\"disabled\")) {\n\t\t\treturn;\n\t\t}\n\t\t$(\"#InstrumentModalID\").click();\n\t\tthis.changeButtonStatus(event);\n\t},\n\tonProfilingClick: function (event) {\n\t\tif ($(\"#\"+event.currentTarget.id).hasClass(\"disabled\")) {\n\t\t\treturn;\n\t\t}\n\t\tvar data = {};\n\t\tdata.fileName = this.state.fileName;\n\t\tdata.injectionMode = this.state.injectionMode;\n\t\tdata.input = document.getElementById(\"profilingInput\").value;\n\t\t$.ajax({\n\t\t\turl: '/profiling',\n\t\t\ttype: 'POST',\n\t\t\tdata: JSON.stringify(data),\n\t\t\tprocessData: false,\n\t\t\tcontentType: 'application/json',\n\t\t\tsuccess: function(data){\n\t\t\t\tvar profilingStats = data.profilingStats;\n\t\t\t\tvar consoleLog = data.consoleLog;\n\t\t\t\tprofilingStatusActions.updateProfilingStatus(profilingStats);\n\t\t\t\tconsoleLogActions.updateConsoleLog(consoleLog);\n\t\t\t\tconsole.log(\"profiling success\");\n\t\t\t\twindow.alert(\"Profiling Successful\");\n\t\t\t},\n\t\t\terror: function (error) {\n\t\t\t\tif (error.responseJSON.error) {\n\t\t\t\t\terrorLogActions.updateErrorLog(error.responseJSON.error.cmd);\n\t\t\t\t}\n\t\t\t\tconsole.log(error);\n\t\t\t\twindow.alert(\"An error has occured in Profiling, please refresh the page.\");\n\t\t\t}\n\t\t});\n\t\tthis.changeButtonStatus(event);\n\t},\n\tonRuntimeOptionClick: function (event) {\n\t\tif ($(\"#\"+event.currentTarget.id).hasClass(\"disabled\")) {\n\t\t\treturn;\n\t\t}\n\t\t$(\"#RuntimeOptionModalID\").click();\n\t\tthis.changeButtonStatus(event);\n\t},\n\tonFaultInjectionClick: function (event) {\n\t\tif ($(\"#\"+event.currentTarget.id).hasClass(\"disabled\")) {\n\t\t\treturn;\n\t\t}\n\t\tvar data = {};\n\t\tdata.fileName = this.state.fileName;\n\t\tdata.injectionMode = this.state.injectionMode;\n\t\tdata.input = document.getElementById(\"profilingInput\").value;\n\t\tdata.runOptions = this.state.runOptions;\n\t\t$.ajax({\n\t\t\turl: '/faultInjection',\n\t\t\ttype: 'POST',\n\t\t\tdata: JSON.stringify(data),\n\t\t\tprocessData: false,\n\t\t\tcontentType: 'application/json',\n\t\t\tsuccess: function(data){\n\t\t\t\tconsole.log(data);\n\t\t\t\tvar consoleLog = data.consoleLog;\n\t\t\t\tvar faultSummary = data.faultSummary;\n\t\t\t\tvar faultInjectionStatus = data.faultInjectionStatus;\n\t\t\t\tfaultInjectionStatusActions.updateFaultInjectionStatus(faultInjectionStatus);\n\t\t\t\tfaultSummaryActions.updateFaultSummary(faultSummary);\n\t\t\t\tconsoleLogActions.updateConsoleLog(consoleLog);\n\t\t\t\tconsole.log(\"faultInjection success\");\n\t\t\t\twindow.alert(\"FaultInjection Successful\");\n\t\t\t},\n\t\t\terror: function (error) {\n\t\t\t\tif (error.responseJSON.error) {\n\t\t\t\t\terrorLogActions.updateErrorLog(error.responseJSON.error.cmd);\n\t\t\t\t}\n\t\t\t\tconsole.log(error);\n\t\t\t\twindow.alert(\"An error has occured in FaultInjection, please refresh the page.\");\n\t\t\t}\n\t\t});\n\t\tthis.changeButtonStatus(event);\n\t},\n\tonGenerateTraceClick: function (event) {\n\t\tif ($(\"#\"+event.currentTarget.id).hasClass(\"disabled\")) {\n\t\t\treturn;\n\t\t}\n\t\tvar data = {};\n\t\tdata.selectedRunIndex = this.state.selectedTraceRunNumber;\n\t\t$.ajax({\n\t\t\turl: '/traceGraph',\n\t\t\ttype: 'POST',\n\t\t\tdata: JSON.stringify(data),\n\t\t\tprocessData: false,\n\t\t\tcontentType: 'application/json',\n\t\t\tsuccess: function(data){\n\t\t\t\tvar consoleLog = data.consoleLog;\n\t\t\t\tconsoleLogActions.updateConsoleLog(consoleLog);\n\t\t\t\t// Simulate a download a tag to download the pdf trace file\n\t\t\t\tvar a = document.createElement(\"a\");\n\t\t\t\ta.href = \"/tracepdf\";\n\t\t\t\ta.download = \"TraceGraph\";\n\t\t\t\tdocument.body.appendChild(a);\n\t\t\t\ta.click();\n\t\t\t\t// remove the a tag after the download \n\t\t\t\tdocument.body.removeChild(a);\n\t\t\t\tconsole.log(\"Generate Trace success\");\n\t\t\t},\n\t\t\terror: function (error) {\n\t\t\t\tif (error.responseJSON.error) {\n\t\t\t\t\terrorLogActions.updateErrorLog(error.responseJSON.error.cmd);\n\t\t\t\t}\n\t\t\t\tconsole.log(error);\n\t\t\t\twindow.alert(\"An error has occured in TraceGraph, please refresh the page.\");\n\t\t\t}\n\t\t});\n\t\tthis.changeButtonStatus(event);\n\t},\n\tchangeButtonStatus: function(event) {\n\t\t// If the current button clicked is disabled, do nothing\n\t\tif ($(\"#\"+event.currentTarget.id).hasClass(\"disabled\")) {\n\t\t\treturn;\n\t\t}\n\n\t\t// Disable all the following buttons and enable the next button only\n\t\t$(\"#\"+event.currentTarget.id).nextAll().removeClass(\"btn-primary\");\n\t\t$(\"#\"+event.currentTarget.id).nextAll().addClass(\"disabled\");\n\t\t$(\"#\"+event.currentTarget.id).next().removeClass(\"disabled\");\n\t\t$(\"#\"+event.currentTarget.id).next().addClass(\"btn-primary\");\n\t}\n});\n\nfunction compareNumbers(a, b) {\n\treturn a - b;\n}\n\nmodule.exports = FunctionTabs;" }, { "alpha_fraction": 0.6631439924240112, "alphanum_fraction": 0.6684280037879944, "avg_line_length": 30.87368392944336, "blob_id": "a7bb93217f77e71b422bc44ef931b49499d54a09", "content_id": "e26db449ff18c3c931d618ca0b614f58b0d25a78", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3028, "license_type": "permissive", "max_line_length": 192, "num_lines": 95, "path": "/bin/HardwareFailureAutoScan.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n\"\"\"\n%(prog)s takes a single IR file as input and scan all instructions to find potential applicable target points for fault injection, and to create applicable failure modes list.\nNote %(prog)s won't generate input.yaml file like SoftwareFailureAuto does.\n\nUsage: %(prog)s [OPTIONS] <source IR file>\n\nList of options:\n\n-outputfilename=<filename>: set the name of the file that stores the list of applicable hardware selectors (instruction selector/reg selector) (default: llfi.applicable.hardware.selectors.txt)\nNote: If <filename> is a relative path instead of an absolute path, the base path of <filename> will be the path of the targeting IR file instead of the calling path.\n\n--help: print this message.\n\"\"\"\n\n# this script creates a text file ; Applicable-Failure-Modes.txt\n\n\nimport os\nimport subprocess\nimport sys\nfrom subprocess import call\nimport yaml\n\nscript_path = os.path.realpath(os.path.dirname(__file__))\nsys.path.append(os.path.join(script_path, '../config'))\nimport llvm_paths\n\n\noptbin = os.path.join(llvm_paths.LLVM_DST_ROOT, \"bin/opt\")\nllcbin = os.path.join(llvm_paths.LLVM_DST_ROOT, \"bin/llc\")\nllfipasses = os.path.join(script_path, \"../llvm_passes/llfi-passes.so\")\nllfilinklib = os.path.join(script_path, \"../runtime_lib\")\nprog = os.path.basename(sys.argv[0])\n# option list for AutoScan pass\noptions = []\n# output file name of AutoScan pass\nfilename = \"llfi.applicable.hardware.selectors.txt\"\n# directory of the target IR\nbasedir = \"\"\n\ndef parseArgs(args):\n global basedir\n global options\n global filename\n \n cwd = os.getcwd()\n for i, arg in enumerate(args):\n option = arg\n if os.path.isfile(arg):\n basedir = os.path.realpath(os.path.dirname(arg))\n option = os.path.basename(arg)\n options.append(option)\n elif arg.startswith('-outputfilename='):\n filename = arg.split('-outputfilename=')[-1]\n options.append('-hardwarescan_outputfilename='+filename)\n os.chdir(basedir)\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = 1\n msg = \"ERROR: \" + msg\n print(msg, file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(retval)\n\ndef runAutoScan(args):\n global filename\n execlist = [optbin , \"-load\", llfipasses, \"-HardwareFailureAutoScanPass\", \"-analyze\"]\n execlist.extend(args)\n print(' '.join(execlist))\n p = subprocess.Popen(execlist)\n p.wait()\n if p.returncode != 0:\n print(\"ERROR: Hardware Auto scan pass return code !=0\\n\")\n exit(p.returncode)\n elif os.path.isfile(os.path.join(basedir, filename)) == False:\n print(\"ERROR: No output file found at: \"+os.path.join(basedir, filename)+\"!\\n\")\n exit(1)\n return 0\n\n\ndef main(args):\n parseArgs(args)\n r = runAutoScan(options)\n return 0\n\nif __name__ == \"__main__\":\n if len(sys.argv[1:]) < 1 or sys.argv[1] == '--help' or sys.argv[1] == '-h':\n usage()\n sys.exit(0)\n r = main(sys.argv[1:])\n sys.exit(r)\n" }, { "alpha_fraction": 0.7680000066757202, "alphanum_fraction": 0.7720000147819519, "avg_line_length": 26.77777862548828, "blob_id": "c1ad30bbd9477a52587bd932d76c5cffdbfe1f29", "content_id": "ae8f87ed8a7f91f6c41e3edb9b97c4cdcac9239a", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 500, "license_type": "permissive", "max_line_length": 57, "num_lines": 18, "path": "/test_suite/SCRIPTS/CMakeLists.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\n\ninclude(../../config/copy_utils.cmake)\n\nproject(test_suite-SCRIPTS)\n\ncopy(build_prog.py build_prog.py)\ncopy(check_injection.py check_injection.py)\ncopy(clean_prog.py clean_prog.py)\ncopy(clear_all.py clear_all.py)\ncopy(clear_llfi.py clear_llfi.py)\ncopy(deploy_prog.py deploy_prog.py)\ncopy(inject_prog.py inject_prog.py)\ncopy(test_trace_tools.py test_trace_tools.py)\ncopy(llfi_test.py llfi_test)\ncopy(test_generate_makefile.py test_generate_makefile.py)\n\ngenCopy()\n" }, { "alpha_fraction": 0.6411960124969482, "alphanum_fraction": 0.655315637588501, "avg_line_length": 19.939130783081055, "blob_id": "8a0dc5527dad2c47f155f24ede19395c3af90738", "content_id": "04dc01215cc1da1c3bba4879224caf8b0ad882b0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2408, "license_type": "permissive", "max_line_length": 82, "num_lines": 115, "path": "/web-app/server/server.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var express = require('express');\nvar app = express();\nvar path = require('path');\nvar formidable = require('formidable');\nvar fs = require('fs');\nvar http = require('http');\n\nvar Port = 8080;\nvar fileUpload = require('./fileUpload');\nvar compileToIR = require('./compileToIR');\nvar preInstrument = require('./preInstrument');\nvar instrument = require('./instrument');\nvar profiling = require('./profiling');\nvar runtimeOptions = require('./runtimeOptions');\nvar faultInjection = require('./faultInjection');\nvar traceGraph = require('./traceGraph');\nvar bodyParser = require('body-parser');\n\napp.use(express.static(path.join(__dirname, '../views')));\napp.use(bodyParser.json());\n\napp.get('/', function(req, res){\n\ttry {\n\t\tres.sendFile(path.join(__dirname, 'index.html'));\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/uploadFile', function(req, res){\n\ttry {\n\t\tfileUpload.processFileUpload(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/compileToIR', function(req, res){\n\ttry {\n\t\tcompileToIR.processCompileToIR(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/preInstrument', function(req, res){\n\ttry {\n\t\tpreInstrument.processPreInstrument(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/instrument', function(req, res){\n\ttry {\n\t\tinstrument.processInstrument(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/profiling', function(req, res){\n\ttry {\n\t\tprofiling.processProfiling(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/runtimeOptions', function(req, res){\n\ttry {\n\t\truntimeOptions.processRuntimeOptions(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/faultInjection', function(req, res){\n\ttry {\n\t\tfaultInjection.processFaultInjection(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.post('/traceGraph', function(req, res){\n\ttry {\n\t\ttraceGraph.processTrace(req,res);\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\napp.get('/tracepdf', function(req, res){\n\ttry {\n\t\tres.download(\"./uploads/\" + req.ip +\"/llfi/trace_report_output/TraceGraph.pdf\");\n\t\tconsole.log('Trace graph sent');\n\t} catch (err) {\n\t\tres.status(500);\n\t\tres.send(err);\n\t}\n});\n\nvar server = app.listen(Port, function(){\n\tconsole.log('Server listening on port' + Port);\n});\n" }, { "alpha_fraction": 0.7193146347999573, "alphanum_fraction": 0.736760139465332, "avg_line_length": 31.079999923706055, "blob_id": "66cab585e84e16e3e6135e99adfde79ea45b9371", "content_id": "b4999d2d4d6997e7e7e07ba26eb4c36e34cb2968", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3210, "license_type": "permissive", "max_line_length": 251, "num_lines": 100, "path": "/tutorials/ISSRE19/README.md", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "# ISSRE 2019 Tutorial Activity - Instructions\n\n## Setup\nWe provide a VM image with LLFI and all dependencies pre-installed, with the tutorial files also included. To use the VM image please install [VirtualBox](https://www.virtualbox.org/wiki/Downloads).\nYou are welcome to use your own installation of LLFI, but these instructions assume you are using the VM image with specific experiment folder paths.\n\nYou can download the VirtualBox image [here](https://drive.google.com/file/d/15KK7-zy7ba-I9tkGloX_XOqipkeQtnWL/view?usp=sharing).\n\nusername: `llfi`\n\npassword: `root`\n\n## Benchmarks\nWe provide two benchmarks to run fault injections for this tutorial. They are simple deterministic programs with predefined inputs.\n\n### sqrt\nThis program implements a square root operation using Taylor series. The predefined input to the sqrt program is the floating point number `123.123`.\n\n### matmult\nThis program multiplies two 20 by 20 matrices (predefined). The program outputs all the values of the result matrix.\n\n## Part 1: Fault Injection\n1. Open a terminal and navigate to the ISSRE19 folder on the desktop: `cd ~/Desktop/ISSRE19/`\n\n2. Navigate to the first benchmark folder: `cd 1-sqrt`\n\n3. The compiled LLVM IR file (.ll) is already provided. To re-compile from C to IR use the command:\n\n`clang -S -emit-llvm sqrt.c -o sqrt.ll`\n\n4. Instrumentation phase:\n\n`$LLFI/instrument --readable sqrt.ll`\n\n5. Profiling phase:\n\n`$LLFI/profile llfi/sqrt-profiling.exe`\n\n6. Fault injection phase:\n\n`$LLFI/injectfault llfi/sqrt-faultinjection.exe`\n\n7. Execute the `measure.py` script in each benchmark folder to measure the SDC and crash rates.\n\n`python3 ./measure.py`\n\n8. (Optional) Navigate to the second benchmark folder `cd ../2-matmult` and repeat Steps 4-7 (make sure to change `sqrt` to `matmult` in the commands). Observe the differences in SDC and crash rates.\n\n\n## Part 2: Specify injection targets\n\n1. Open the input.yaml file and change the target instruction type from `all` to `add` and `sub` instructions only.\n\n```\ncompileOption:\n instSelMethod:\n - insttype:\n include: \n - add\n - sub\n exclude:\n - ret\n\n regSelMethod: regloc\n regloc: dstreg\n\nrunOption:\n - run:\n numOfRuns: 2000\n fi_type: bitflip\n \ndefaultTimeout: 30\n\n```\n\n2. Repeat the fault injection experiments and observe how the resulting SDC and crash rates are affected. (Please note: to re-run FI experiments in the same folder you must delete or re-name the existing `llfi` folder, e.g., `rm -rf ./llfi/ ./llfi.*`)\n\n\n## Part 3: Trace analysis\n1. Navigate to the last benchmark folder: `cd ../3-matmult_trace`\n\n2. Instrumentation phase:\n\n`$LLFI/instrument --readable matmult.ll`\n\n5. Profiling phase:\n\n`$LLFI/profile llfi/matmult-profiling.exe`\n\n6. Fault injection phase:\n\n`$LLFI/injectfault llfi/matmult-faultinjection.exe`\n\n7. Analyze trace propagation:\n\n`$LLFItools/tracediff llfi/baseline/llfi.stat.trace.prof.txt llfi/llfi_stat_output/llfi.stat.trace.0-5.txt > diffReport.txt`(Modify the 5 to the desired trace number.)\n\n`$LLFItools/traceontograph diffReport.txt llfi.stat.graph.dot > tracedGraph.dot`\n\n`$LLFItools/zgrviewer/run.sh tracedGraph.dot`\n\n\n" }, { "alpha_fraction": 0.6650426387786865, "alphanum_fraction": 0.6650426387786865, "avg_line_length": 24.65625, "blob_id": "64bbbd2d3c9b572d4d3d8ac8a42a787526dbc2d2", "content_id": "70ba9b2ead05a7857312ea832f64c0f1bbf5c63f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 821, "license_type": "permissive", "max_line_length": 68, "num_lines": 32, "path": "/config/copy_utils.cmake", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "set(DEFAULT_TARGETS \"\")\n\nset(DEFAULT_BIN_LOC \"/usr/local/bin\")\n\nmacro(copy SRC DST)\n set(TEMPDST \"${CMAKE_CURRENT_BINARY_DIR}/${DST}\")\n set(TEMPSRC \"${CMAKE_CURRENT_SOURCE_DIR}/${SRC}\")\n set(DEFAULT_TARGETS ${DEFAULT_TARGETS} ${TEMPDST})\n add_custom_command(\n OUTPUT ${TEMPDST}\n COMMAND ${CMAKE_COMMAND} -E copy ${TEMPSRC} ${TEMPDST}\n DEPENDS ${TEMPSRC}\n )\nendmacro()\n\nmacro(copydir SRC DST)\n set(TEMPDST \"${CMAKE_CURRENT_BINARY_DIR}/${DST}\")\n set(TEMPSRC \"${CMAKE_CURRENT_SOURCE_DIR}/${SRC}\")\n set(DEFAULT_TARGETS ${DEFAULT_TARGETS} ${TEMPDST})\n add_custom_command(\n OUTPUT ${TEMPDST}\n COMMAND ${CMAKE_COMMAND} -E copy_directory ${TEMPSRC} ${TEMPDST}\n DEPENDS ${TEMPSRC}\n )\nendmacro()\n\nmacro(genCopy)\n add_custom_target(\n ${PROJECT_NAME} ALL\n DEPENDS ${DEFAULT_TARGETS}\n )\nendmacro()\n" }, { "alpha_fraction": 0.7113401889801025, "alphanum_fraction": 0.7113401889801025, "avg_line_length": 18.399999618530273, "blob_id": "3209a3cdf49c9f4bd4021e82368090079e0afe34", "content_id": "e15bc42253d3a3ce1f46dc06e375843e70c82985", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 291, "license_type": "permissive", "max_line_length": 64, "num_lines": 15, "path": "/llvm_passes/hardware_failures/InstTypeFIInstSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"llvm/IR/Instructions.h\"\n\n#include \"InstTypeFIInstSelector.h\"\n\n\nnamespace llfi {\nbool InstTypeFIInstSelector::isInstFITarget(Instruction *inst) {\n unsigned opcode = inst->getOpcode();\n if (opcodelist->find(opcode) != opcodelist->end()) {\n return true;\n }\n return false;\n}\n\n}\n" }, { "alpha_fraction": 0.5278654098510742, "alphanum_fraction": 0.5962145328521729, "avg_line_length": 24.026315689086914, "blob_id": "dd3b5e29ab8128a2146d364ff2d682b2ab1cb49c", "content_id": "01e8841bcc45534954a90afa0500f58327c04fd7", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 951, "license_type": "permissive", "max_line_length": 75, "num_lines": 38, "path": "/test_suite/MakefileGeneration/readable_IR/mcf.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nMCF.H of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Sun Nov 21 16:21:29 2004 by Andreas Loebel (boss.local.de) */\n/* $Id: mcf.h,v 1.9 2005/02/17 19:42:21 bzfloebe Exp $ */\n\n\n\n#ifndef _MCF_H\n#define _MCF_H\n\n\n#include \"defines.h\"\n#include \"mcfutil.h\"\n#include \"readmin.h\"\n#include \"output.h\" \n#include \"pstart.h\"\n#include \"psimplex.h\"\n#include \"pbeampp.h\"\n#include \"implicit.h\"\n#include \"limits.h\"\n\n\n#endif\n" }, { "alpha_fraction": 0.581654965877533, "alphanum_fraction": 0.5818976163864136, "avg_line_length": 43.80434799194336, "blob_id": "9b11cf3569d1e6d7b798367e093e6550284bccba", "content_id": "f389fc199c56870b8179962aee7bfa4d2f3adef4", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4121, "license_type": "permissive", "max_line_length": 106, "num_lines": 92, "path": "/llvm_passes/SoftwareFailureAutoScanPass.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#define DEBUG_TYPE \"SoftwareFailureAutoScanPass\"\n\n#include \"FICustomSelectorManager.h\"\n#include \"Utils.h\"\n#include \"FIInstSelectorManager.h\"\n#include \"FIInstSelector.h\"\n#include \"InstTypeFIInstSelector.h\"\n#include \"FuncNameFIInstSelector.h\"\n#include \"FIRegSelector.h\"\n#include \"RegLocBasedFIRegSelector.h\"\n\n#include \"llvm/Pass.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Support/CommandLine.h\"\n\n#include <fstream>\n#include <iostream>\n\nusing namespace llvm;\nnamespace llfi{\n static cl::opt< std::string > outputpath(\"softwarescan_outputfilename\",\n cl::desc(\"The path to store a list of applicable software failures\"),\n cl::init(\"llfi.applicable.software.failures.txt\"));\n\n class SoftwareFailureAutoScanPass: public ModulePass{\n private:\n std::ofstream selector_record_file;\n public:\n static char ID;\n SoftwareFailureAutoScanPass():ModulePass(ID){}\n virtual bool runOnModule(Module &M){\n selector_record_file.open(std::string(outputpath).c_str(), std::ofstream::out);\n selector_record_file<<\"instSelMethod:\"<<\"\\n\";\n\n FICustomInstSelectorManager *im = FICustomInstSelectorManager::getCustomInstSelectorManager();\n FICustomRegSelectorManager *rm = FICustomRegSelectorManager::getCustomRegSelectorManager();\n std::set<std::string> all_software_failure_names;\n im->getAllSoftwareSelectors(all_software_failure_names);\n // errs()<<\"get all soft failures\\n\";\n for(std::set<std::string>::iterator name = all_software_failure_names.begin();\n name != all_software_failure_names.end(); name++){\n // errs()<<\"# start on: \"<<*name<<\"\\n\";\n FIInstSelectorManager *fiinstselector = new FIInstSelectorManager;\n fiinstselector->addSelector(im->getCustomInstSelector(*name));\n // errs()<<\"# inst selector done on: \"<<*name<<\"\\n\";\n FIRegSelector* firegselector = rm->getCustomRegSelector(*name);\n // errs()<<\"# reg selector done on: \"<<*name<<\"\\n\";\n // select fault injection instructions\n std::set<Instruction*> fiinstset;\n fiinstselector->getFIInsts(M, &fiinstset);\n // errs()<<\"# size of inst set: \"<<fiinstset.size()<<\"\\n\";\n std::map<Instruction*, std::list< int >* > fi_inst_regs_map;\n // select fault injection registers\n firegselector->getFIInstRegMap(&fiinstset, &fi_inst_regs_map);\n delete fiinstselector;\n // errs()<<\"# collection done on: \"<<*name<<\"\\n\";\n bool not_empty = false;\n for(std::map<Instruction*, std::list<int>* >::iterator MI = fi_inst_regs_map.begin();\n MI != fi_inst_regs_map.end(); MI++){\n if(MI->second->empty()) continue;\n else not_empty = true;\n }\n if(not_empty == true){\n recordInstSelector(*name);\n }\n // errs()<<\"# check done on: \"<<*name<<\"\\n\";\n for(std::map<Instruction*, std::list<int>* >::iterator MI = fi_inst_regs_map.begin();\n MI != fi_inst_regs_map.end(); MI++){\n delete MI->second;\n }\n }\n selector_record_file.close();\n }\n\n void recordInstSelector(std::string selector_name){\n if(selector_record_file.is_open() == false){\n std::cerr<<\"ERROR: can not open file to record applicable selectors: \";\n std::cerr<<outputpath<<\"\\n\";\n selector_record_file.close();\n return;\n }\n selector_record_file<<\" - \"<<selector_name<<\"\\n\";\n return;\n }\n };\n char SoftwareFailureAutoScanPass::ID = 0;\n static RegisterPass<SoftwareFailureAutoScanPass> \n X(\"SoftwareFailureAutoScanPass\", \"Automatic scanner of software failure modes\", \n false, false);\n}" }, { "alpha_fraction": 0.6798747777938843, "alphanum_fraction": 0.6854891180992126, "avg_line_length": 33.30370330810547, "blob_id": "b9555815066a9047252c96fdc6a8448b003f4477", "content_id": "259492b2c2bdaba3e8c1e50e3144dcaa5440769b", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9262, "license_type": "permissive", "max_line_length": 112, "num_lines": 270, "path": "/test_suite/SCRIPTS/inject_prog.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport yaml\nimport subprocess\nimport time\nfrom threading import Thread\n\ntry:\n\tfrom Queue import Queue, Empty\nexcept ImportError:\n\tfrom queue import Queue, Empty # python 3.x\nON_POSIX = 'posix' in sys.builtin_module_names\n\ninstrument_script = \"\"\nprofile_script = \"\"\ninjectfault_script = \"\"\nbatchinstrument_script = \"\"\nbatchprofile_script = \"\"\nbatchinjectfault_script = \"\"\nautoscan_script = \"\"\n\ndef enqueue_output(out, queue):\n\tfor line in iter(out.readline, b''):\n\t\tqueue.put(line)\n\tout.close()\n\ndef startEchoServer(work_dir):\n\tprint(\"using startEchoServer\")\n\texeclist = [\"stdbuf\", '-i0', '-o0', '-e0']\n\texeclist.extend([os.path.join(work_dir, \"echoServer.exe\")])\n\tserver = subprocess.Popen(execlist, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\tq = Queue()\n\tt = Thread(target=enqueue_output, args=(server.stdout, q))\n\tt.daemon = True # thread dies with the program\n\tt.start()\n\tcount = 0\n\twhile server.poll() == None:\n\t\tif count > 50:\n\t\t\tserver.terminate()\n\t\t\treturn startEchoServer(work_dir)\n\t\ttry: line = q.get_nowait() # or q.get(timeout=.1)\n\t\texcept Empty:\n\t\t\tprint('no output yet')\n\t\t\tcount += 1\n\t\t\ttime.sleep(1)\n\t\telse: \n\t\t\tprint (line)\n\t\t\tline = str(line)\n\t\t\tif \"Server running...waiting for connections.\" in line:\n\t\t\t\treturn server\n\t\t\telse:\n\t\t\t\tcount += 1\n\t\t\t\ttime.sleep(1)\n\ndef callLLFI(work_dir, target_IR, prog_input):\n\tglobal instrument_script\n\tglobal profile_script\n\tglobal injectfault_script\n\n\ttry:\n\t\tos.chdir(work_dir)\n\texcept:\n\t\tprint (\"ERROR: Unable to change directory to:\", work_dir)\n\t\treturn -1, None\n\twith open(\"llfi.test.log.instrument.txt\", 'w', buffering=1) as log:\n\t\tp = subprocess.Popen([instrument_script, \"--readable\", \"-lpthread\", target_IR], stdout=log, stderr=log)\n\t\tp.wait()\n\t\tif p.returncode != 0:\n\t\t\tprint (\"ERROR: instrument failed for:\", work_dir, target_IR)\n\t\t\treturn -1, None\n\t\telse:\n\t\t\tprint (\"MSG: instrument succeed for:\", work_dir, target_IR)\n\n\twith open(\"llfi.test.log.profile.txt\", 'w', buffering=1) as log:\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\tserver = startEchoServer(work_dir)\n\t\t\tprint (\"MSG: echoServer.ll started for profile, please make sure there is only one echoServer running\\n\")\n\t\t\ttime.sleep(2)\n\t\tprofile_exe = target_IR.split(\".ll\")[0]+\"-profiling.exe\"\n\t\texeclist = [profile_script, \"./llfi/\"+profile_exe]\n\t\texeclist.extend(prog_input.split(' '))\n\t\tp = subprocess.Popen(execlist, stdout=log, stderr=log)\n\t\tp.wait()\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\ttry:\n\t\t\t\tserver.terminate()\n\t\t\t\tprint (\"MSG: echoServer.exe terminated for profile.\\n\")\n\t\t\texcept:\n\t\t\t\tprint (\"ERROR: Unable to terminate echoServer.exe in profile for:\", work_dir)\n\t\tif p.returncode != 0:\n\t\t\tprint (\"ERROR: profile failed for:\", work_dir, target_IR)\n\t\t\treturn -1, None\n\t\telse:\n\t\t\tprint (\"MSG: profile succeed for:\", work_dir, target_IR, prog_input)\n\n\twith open(\"llfi.test.log.injectFault.txt\", 'w', buffering=1) as log:\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\tserver = startEchoServer(work_dir)\n\t\t\tprint (\"MSG: echoServer.ll started for injectfault, please make sure there is only one echoServer running\\n\")\n\t\t\ttime.sleep(2)\n\t\tfaultinjection_exe = target_IR.split(\".ll\")[0]+\"-faultinjection.exe\"\n\t\texeclist = [injectfault_script, \"./llfi/\"+faultinjection_exe]\n\t\texeclist.extend(prog_input.split(' '))\n\t\tp = subprocess.Popen(execlist, stdout=log, stderr=log)\n\t\tt = {\"name\":' '.join(work_dir.split('/')[-3:])+\"/\"+target_IR,\n\t\t\t\"process\":p}\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\tp.wait()\n\t\t\ttry:\n\t\t\t\tserver.terminate()\n\t\t\t\tprint (\"MSG: echoServer.exe terminated for profile.\\n\")\n\t\t\texcept:\n\t\t\t\tprint (\"ERROR: Unable to terminate echoServer.exe in injectfault for\", work_dir)\n\t\t\t\n\treturn 0, t\n\ndef callBatchLLFI(work_dir, target_IR, prog_input):\n\tglobal batchinstrument_script\n\tglobal batchprofile_script\n\tglobal batchinjectfault_script\n\tglobal autoscan_script\n\n\ttry:\n\t\tos.chdir(work_dir)\n\texcept:\n\t\tprint (\"ERROR: Unable to change directory to:\", work_dir)\n\t\treturn -1, None\n\n\tif 'SoftwareFailureAutoScan' in os.path.basename(work_dir):\n\t\twith open(\"llfi.test.log.SoftwareFailureAutoScan.txt\", 'w', buffering=1) as log:\n\t\t\tp = subprocess.Popen([autoscan_script, target_IR], stdout=log, stderr=log)\n\t\t\tp.wait()\n\t\t\tif p.returncode != 0:\n\t\t\t\tprint (\"ERROR: SoftwareFailureAutoScan failed for:\", work_dir, target_IR)\n\t\t\t\treturn -1, None\n\t\t\telse:\n\t\t\t\tprint (\"MSG: SoftwareFailureAutoScan succeed for:\", work_dir, target_IR)\n\n\twith open(\"llfi.test.log.instrument.txt\", 'w', buffering=1) as log:\n\t\tp = subprocess.Popen([batchinstrument_script, \"--readable\", \"-lpthread\", target_IR], stdout=log, stderr=log)\n\t\tp.wait()\n\t\tif p.returncode != 0:\n\t\t\tprint (\"ERROR: batchInstrument failed for:\", work_dir, target_IR)\n\t\t\treturn -1, None\n\t\telse:\n\t\t\tprint (\"MSG: batchInstrument succeed for:\", work_dir, target_IR)\n\n\twith open(\"llfi.test.log.profile.txt\", 'w', buffering=1) as log:\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\tserver = startEchoServer(work_dir)\n\t\t\tprint (\"MSG: echoServer.ll started for profile, please make sure there is only one echoServer running\\n\")\n\t\t\ttime.sleep(2)\n\t\texeclist = [batchprofile_script, target_IR]\n\t\texeclist.extend(prog_input.split(' '))\n\t\tp = subprocess.Popen(execlist, stdout=log, stderr=log)\n\t\tp.wait()\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\ttry:\n\t\t\t\tserver.terminate()\n\t\t\t\tprint (\"MSG: echoServer.exe terminated for profile.\\n\")\n\t\t\texcept:\n\t\t\t\tprint (\"ERROR: Unable to terminate echoServer.exe in profile for:\", work_dir)\n\t\tif p.returncode != 0:\n\t\t\tprint (\"ERROR: profile failed for:\", work_dir, target_IR)\n\t\t\treturn -1, None\n\t\telse:\n\t\t\tprint (\"MSG: profile succeed for:\", work_dir, target_IR, prog_input)\n\n\twith open(\"llfi.test.log.injectFault.txt\", 'w', buffering=1) as log:\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\tserver = startEchoServer(work_dir)\n\t\t\tprint (\"MSG: echoServer.ll started for injectfault, please make sure there is only one echoServer running\\n\")\n\t\t\ttime.sleep(2)\n\t\texeclist = [batchinjectfault_script, target_IR]\n\t\texeclist.extend(prog_input.split(' '))\n\t\tp = subprocess.Popen(execlist, stdout=log, stderr=log)\n\t\tt = {\"name\":' '.join(work_dir.split('/')[-3:])+\"/\"+target_IR,\n\t\t\t\"process\":p}\n\t\tif target_IR == \"echoClient.ll\":\n\t\t\tp.wait()\n\t\t\ttry:\n\t\t\t\tserver.terminate()\n\t\t\t\tprint (\"MSG: echoServer.exe terminated for profile.\\n\")\n\t\t\texcept:\n\t\t\t\tprint (\"ERROR: Unable to terminate echoServer.exe in injectfault for\", work_dir)\n\t\t\t\n\treturn 0, t\n\ndef inject_prog(num_threads, *prog_list):\n\tglobal instrument_script\n\tglobal profile_script\n\tglobal injectfault_script\n\tglobal batchinstrument_script\n\tglobal batchprofile_script\n\tglobal batchinjectfault_script\n\tglobal autoscan_script\n\n\tr = 0\n\tsuite = {}\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\tllfi_bin_dir = os.path.join(script_dir, '../../bin')\n\tinstrument_script = os.path.join(llfi_bin_dir, \"instrument\")\n\tprofile_script = os.path.join(llfi_bin_dir, \"profile\")\n\tinjectfault_script = os.path.join(llfi_bin_dir, \"injectfault\")\n\tbatchinstrument_script = os.path.join(llfi_bin_dir, \"batchInstrument\")\n\tbatchprofile_script = os.path.join(llfi_bin_dir, \"batchProfile\")\n\tbatchinjectfault_script = os.path.join(llfi_bin_dir, \"batchInjectfault\")\n\tautoscan_script = os.path.join(llfi_bin_dir, \"SoftwareFailureAutoScan\")\n\t\n\ttestsuite_dir = os.path.join(script_dir, os.pardir)\n\twith open(os.path.join(testsuite_dir, \"test_suite.yaml\")) as f:\n\t\ttry:\n\t\t\tsuite = yaml.load(f)\n\t\texcept:\n\t\t\tprint(\"ERROR: Unable to load yaml file: test_suite.yaml\", file=sys.stderr)\n\t\t\treturn -1\n\n\twork_dict = {}\n\tfor test in suite[\"SoftwareFaults\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"SoftwareFaults\" in prog_list:\n\t\t\twork_dict[\"./SoftwareFaults/\"+test] = suite[\"SoftwareFaults\"][test]\n\tfor test in suite[\"HardwareFaults\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"HardwareFaults\" in prog_list:\n\t\t\twork_dict[\"./HardwareFaults/\"+test] = suite[\"HardwareFaults\"][test]\n\tfor test in suite[\"BatchMode\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"BatchMode\" in prog_list:\n\t\t\twork_dict[\"./BatchMode/\"+test] = suite[\"BatchMode\"][test]\n\t\n\trunning_list = []\n\texitcode_list = []\n\tfor test_path in work_dict:\n\t\twhile(len(running_list) >= num_threads):\n\t\t\tfor t in running_list:\n\t\t\t\tif t[\"process\"].poll() is None:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint (\"MSG: Injection for:\", t[\"name\"], \"finished!\\n\")\n\t\t\t\t\trunning_list.remove(t)\n\t\t\t\t\trecord={\"name\":t[\"name\"], \"exitcode\":t[\"process\"].returncode}\n\t\t\t\t\texitcode_list.append(record)\n\n\t\tinject_dir = os.path.abspath(os.path.join(testsuite_dir, test_path))\n\t\tinject_prog = suite[\"PROGRAMS\"][work_dict[test_path]][0]\n\t\tinject_input = str(suite[\"INPUTS\"][work_dict[test_path]])\n\t\tif test_path.startswith('./BatchMode'):\n\t\t\tcode, t = callBatchLLFI(inject_dir, inject_prog, inject_input)\n\t\telse:\n\t\t\tcode, t = callLLFI(inject_dir, inject_prog, inject_input)\n\t\tif code != 0:\n\t\t\tprint (\"ERROR: Skip:\", test_path)\n\t\t\tcontinue\n\t\trunning_list.append(t)\n\n\twhile(len(running_list) > 0):\n\t\tfor t in running_list:\n\t\t\tif t[\"process\"].poll() is None:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprint (\"MSG: Injection for:\", t[\"name\"], \"finished!\\n\")\n\t\t\t\trunning_list.remove(t)\n\t\t\t\trecord={\"name\":t[\"name\"], \"exitcode\":t[\"process\"].returncode}\n\t\t\t\texitcode_list.append(record)\n\treturn r\n\nif __name__ == \"__main__\":\n\tr = inject_prog(int(sys.argv[1]), *sys.argv[2:])\n\tsys.exit(r)\n" }, { "alpha_fraction": 0.7152974605560303, "alphanum_fraction": 0.7181302905082703, "avg_line_length": 23.34482765197754, "blob_id": "2aec4c8fa2802a102e8ff310ebe1e46e40908e3b", "content_id": "a395fb8e61ce4f0e2b6e1a233a103951e88eadf0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 706, "license_type": "permissive", "max_line_length": 75, "num_lines": 29, "path": "/runtime_lib/FaultInjector.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FAULT_INJECTOR_H\n#define FAULT_INJECTOR_H\n\n#include <string>\n\nclass FaultInjector {\n // TODO: need to change the interface when we inject multiple bits faults\n public:\n virtual void injectFault(long llfi_index, unsigned size, unsigned fi_bit,\n char *buf) = 0;\n //virtual std::string getFaultInjectorType() = 0;\n virtual std::string getFaultInjectorType(){\n\t\treturn std::string(\"Unknown\");\n\t}\n};\n\nclass HardwareFaultInjector: public FaultInjector {\n\tstd::string getFaultInjectorType(){\n\t\treturn std::string(\"HardwareFault\");\n\t}\n};\n\nclass SoftwareFaultInjector: public FaultInjector {\n\tstd::string getFaultInjectorType(){\n\t\treturn std::string(\"SoftwareFault\");\n\t}\n};\n\n#endif\n" }, { "alpha_fraction": 0.6957672238349915, "alphanum_fraction": 0.6957672238349915, "avg_line_length": 20.05555534362793, "blob_id": "8fcb5dd5f43542cba5196ba69165a031e7e7ba11", "content_id": "11e049b017d06cee6d24c391fbed64283e1a7aac", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 378, "license_type": "permissive", "max_line_length": 60, "num_lines": 18, "path": "/web-app/views/src/js/components/mainWindow/bottomPannel.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar InputWindow = require(\"./bottomPannel/inputWindow\");\nvar OutputSummary = require(\"./bottomPannel/outputSummary\");\n\n\n\nvar BottomPannel = React.createClass({\n\trender: function() {\n\t\treturn (\n\t\t\t<div className=\"bottomPannel\">\n\t\t\t\t<InputWindow></InputWindow>\n\t\t\t\t<OutputSummary></OutputSummary>\n\t\t\t</div>\n\t\t);\n\t}\n});\n\nmodule.exports = BottomPannel;" }, { "alpha_fraction": 0.6950549483299255, "alphanum_fraction": 0.6950549483299255, "avg_line_length": 19.16666603088379, "blob_id": "37ec5bf0cc8bd0ee997c51f23a66ab913c53ecb8", "content_id": "34d34a97ee5465e405e40d7a71e658c16e7d1714", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 364, "license_type": "permissive", "max_line_length": 64, "num_lines": 18, "path": "/llvm_passes/hardware_failures/FuncNameFIInstSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "\n#include \"llvm/IR/Instructions.h\"\n\n#include \"FuncNameFIInstSelector.h\"\n#include \"Utils.h\"\n\nnamespace llfi {\n\nbool FuncNameFIInstSelector::isInstFITarget(Instruction *inst) {\n std::string func = inst->getParent()->getParent()->getName();\n func = demangleFuncName(func);\n\n if (funclist->find(func) != funclist->end()) {\n return true;\n }\n return false;\n}\n\n}\n" }, { "alpha_fraction": 0.7530864477157593, "alphanum_fraction": 0.7530864477157593, "avg_line_length": 22.285715103149414, "blob_id": "4dd1ab98ec34e0c9f033ff5fa60909508ca216a5", "content_id": "b46d5cf5ba8b4dcfb9ae4f321bc671d33860d429", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 162, "license_type": "permissive", "max_line_length": 51, "num_lines": 7, "path": "/web-app/views/src/js/actions/profilingStatusActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar profilingStatusActions = Reflux.createActions([\n 'updateProfilingStatus'\n ]);\n\nmodule.exports = profilingStatusActions;" }, { "alpha_fraction": 0.6957015991210938, "alphanum_fraction": 0.6996780633926392, "avg_line_length": 31.604938507080078, "blob_id": "19f0ec9941dfd1a50cfa00de97d2d551edc26989", "content_id": "96e4a0c13b93451b539ba529e993e8bb7054b4dd", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5281, "license_type": "permissive", "max_line_length": 388, "num_lines": 162, "path": "/bin/batchInstrument.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\n\"\"\"\n\n%(prog)s is a wrapper for LLFI instrument command to generate multiple work directories for multiple software failure modes (instruction selector) specified in single input.yaml file. The single input.yaml with multiple software failure modes will be splited into multiple input.yaml files, each with only one failure mode enabled and placed in a specific subdirectory under current path.\n\nUsage: %(prog)s [OPTIONS] <source IR file>\n\nList of options:\n\n-L <library directory>: Add <library directory> to the search directories for -l\n-l<library>: link <library>\n--readable: Generate human-readable IR files\n--IRonly: Only generate the instrumented IR files, and you will do the linking and create the executables manually\n--verbose: Show verbose information\n--help(-h): Show help information\n\nPrerequisite:\nYou need to have 'input.yaml' under the same directory as <source IR file>, which contains appropriate options for LLFI. Usually, this command is only applicable for input.yaml file with a list of software failure modes included, i.e. using customInstSelector in instSelMethod and including software fault instruction selector (e.g. BufferOverflow(API)).\n\"\"\"\n\nimport sys, os, shutil\nimport yaml\nimport subprocess\n\nprog = os.path.basename(sys.argv[0])\nscript_path = os.path.realpath(os.path.dirname(__file__))\nsys.path.append(os.path.join(script_path, '../config'))\nimport llvm_paths\n\ninstrument_script = os.path.join(script_path, 'instrument')\n# basedir and options are assigned in parseArgs(args)\nbasedir = \"\"\noptions = []\n\ndef parseArgs(args):\n\tglobal basedir\n\tglobal options\n\tcwd = os.getcwd()\n\tfor arg in args:\n\t\toption = arg\n\t\tif os.path.isfile(arg):\n\t\t\tbasedir = os.path.realpath(os.path.dirname(arg))\n\t\t\toption = os.path.basename(arg)\n\t\toptions.append(option)\n\tos.chdir(basedir)\n\ndef usage(msg = None):\n retval = 0\n if msg is not None:\n retval = -1\n msg = \"ERROR: \" + msg\n print(msg, file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(retval)\n\ndef parseMasterYaml():\n\tglobal basedir\n\tmaster_yaml_dict = {}\n\tmodel_list = []\n\ttry:\n\t\twith open('input.yaml', 'r') as master_yaml_file:\n\t\t\tmaster_yaml_dict = yaml.load(master_yaml_file)\n\texcept:\n\t\tprint (\"ERROR: Unable to find input.yaml or load the input.yaml under basedir directory\")\n\t\tprint (basedir)\n\t\tsys.exit(-1)\n\ttry:\n\t\tmodel_list = list(master_yaml_dict['compileOption']['instSelMethod'][0]['customInstselector']['include'])\n\texcept:\n\t\tprint (\"ERROR: this wrapper script is not applicable on the input.yaml under current directory. Please note this script is only applicable on input.yaml files with multiple software failure models defined.\")\n\t\tprint (basedir)\n\t\tsys.exit(-1)\n\treturn master_yaml_dict, model_list\n\ndef splitMasterYaml(master_yaml_dict, model_list):\n\tglobal basedir\n\tfor model in model_list:\n\t\tinclude_list = [model]\n\t\tslave_yaml_dict = dict(master_yaml_dict)\n\t\tslave_yaml_dict['compileOption']['instSelMethod'][0]['customInstselector']['include'] = include_list\n\t\tslave_yaml_text = yaml.dump(slave_yaml_dict, default_flow_style=False)\n\t\tworkdir = os.path.join(basedir, \"llfi-\"+model)\n\t\ttry:\n\t\t\twith open(os.path.join(workdir, 'input.yaml'), 'w') as f:\n\t\t\t\tf.write(slave_yaml_text)\n\t\texcept:\n\t\t\tprint (\"ERROR: Unable to write slave input.yaml file for model: \", model)\n\t\t\tprint (\"workdir: \", workdir)\n\t\t\tsys.exit(-1)\n\treturn 0\t\n\ndef maybeRequired(abs_path):\n\tbasename = os.path.basename(abs_path)\n\tif basename.startswith('llfi'):\n\t\treturn False\n\telif basename == 'input.yaml':\n\t\treturn False\n\treturn True\n\ndef prepareDirs(model_list):\n\tglobal basedir\n\tstuffs_under_basedir = [f for f in os.listdir(basedir) if maybeRequired(os.path.join(basedir, f))]\n\tfor model in model_list:\n\t\tworkdir = os.path.join(basedir, \"llfi-\"+model)\n\t\tif os.path.exists(workdir):\n\t\t\ttry:\n\t\t\t\tif os.path.isdir(workdir):\n\t\t\t\t\tshutil.rmtree(workdir)\n\t\t\t\telse:\n\t\t\t\t\tos.remove(workdir)\n\t\t\texcept:\n\t\t\t\tprint (\"ERROR: Unable to remove:\", workdir, \"for model:\", model)\n\t\t\t\tsys.exit(-1)\n\t\tos.makedirs(workdir)\n\t\tfor s in stuffs_under_basedir:\n\t\t\ts_path = os.path.join(basedir, s)\n\t\t\ttry:\n\t\t\t\tif os.path.isfile(s_path):\n\t\t\t\t\tshutil.copy(s_path, workdir)\n\t\t\t\telse:\n\t\t\t\t\tshutil.copytree(s_path, workdir)\n\t\t\texcept:\n\t\t\t\tprint (\"ERROR: Unable to copy:\", s_path, \"\\nto:\", workdir)\n\t\t\t\tsys.exit(-1)\n\treturn 0\n\n\ndef callInstrument(model_list):\n\tglobal basedir\n\tglobal options\n\tnum_failed = 0\n\tfor model in model_list:\n\t\tworkdir = os.path.join(basedir, \"llfi-\"+model)\n\t\tos.chdir(workdir)\n\t\tcommand = [instrument_script]\n\t\tcommand.extend(options)\n\t\ttry:\n\t\t\to = subprocess.check_output(command, stderr=sys.stderr)\n\t\texcept subprocess.CalledProcessError:\n\t\t\tprint (\"instrumenting:\", model, \" failed!\")\n\t\t\tnum_failed += 1\n\t\telse:\n\t\t\tprint (o.decode())\n\t\t\tprint (\"instrumenting:\", model, \" succeed!\")\n\t\tos.chdir(basedir)\n\treturn num_failed\n\ndef main():\n\tparseArgs(sys.argv[1:])\n\tmaster_yaml_dict, model_list = parseMasterYaml()\n\tprepareDirs(model_list)\n\tsplitMasterYaml(master_yaml_dict, model_list)\n\tr = callInstrument(model_list)\n\treturn r\n\nif __name__ == \"__main__\":\n\tif len(sys.argv[1:]) < 1 or sys.argv[1] == '--help' or sys.argv[1] == '-h':\n\t\tusage()\n\t\tsys.exit(0)\n\tr = main()\n\tsys.exit(r)" }, { "alpha_fraction": 0.5740740895271301, "alphanum_fraction": 0.6466049551963806, "avg_line_length": 27.173913955688477, "blob_id": "c06ae054738498a3d4b01db2f2aa8764605df64a", "content_id": "9a831c5527b9a62124c76fc277846eb372adab8f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1296, "license_type": "permissive", "max_line_length": 76, "num_lines": 46, "path": "/test_suite/MakefileGeneration/readable_IR/mcflimit.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nMCFLIMIT.H of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Thu Feb 17 22:24:36 2005 by Andreas Loebel (boss.local.de) */\n/* $Id: mcflimit.h,v 1.12 2005/02/17 21:43:12 bzfloebe Exp $ */\n\n\n#ifndef _MCF_LIMITS_H\n#define _MCF_LIMITS_H\n\n\n#define BIGM 1.0e7\n#define STRECHT(x) ((long)(1.25 * (double)(x)))\n\n#define MAX_NB_TRIPS_FOR_SMALL_NET 15000\n\n#define MAX_NEW_ARCS_SMALL_NET 3000000\n#define MAX_NEW_ARCS_LARGE_NET 28900000\n\n#define MAX_NB_ITERATIONS_SMALL_NET 5\n#define MAX_NB_ITERATIONS_LARGE_NET 5\n\n\n/*\n// Some operating systems and compiler, respectively, do not handle reallocs\n// properly. Thus, this program requires a somehow static memory handling\n// without reallocation of the main (and quite huge) arc array.\n*/\n#define SPEC_STATIC\n\n\n#endif\n" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 22.700000762939453, "blob_id": "e9ea8defe28f41a0d8c598ec37023071c24a6878", "content_id": "11dc803079e676a1daca851edbd2687235f0dfde", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 236, "license_type": "permissive", "max_line_length": 47, "num_lines": 10, "path": "/web-app/server/utils/execPromise.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var exec = require('child_process').exec;\n\nexports.execPromise = function(cmd) {\n\treturn new Promise(function(resolve, reject) {\n\t\texec(cmd, function(err, stdout) {\n\t\t\tif (err) return reject(err);\n\t\t\tresolve(cmd + stdout);\n\t\t});\n\t});\n};" }, { "alpha_fraction": 0.7278911471366882, "alphanum_fraction": 0.7278911471366882, "avg_line_length": 20.14285659790039, "blob_id": "57d7fa8129621f0eaf519977d05e27da9a770f6f", "content_id": "a8638519a63f298da2a97e46ec586d8efd919ab2", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 147, "license_type": "permissive", "max_line_length": 46, "num_lines": 7, "path": "/web-app/views/src/js/actions/consoleLogActions.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var Reflux = require(\"reflux\");\n\nvar consoleLogActions = Reflux.createActions([\n 'updateConsoleLog'\n ]);\n\nmodule.exports = consoleLogActions;" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 23.35714340209961, "blob_id": "6ef43b8506c1221d08a371dd9d975963ee472520", "content_id": "e09a73d701f24849e5c4ac83f4b34d555e801056", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1023, "license_type": "permissive", "max_line_length": 78, "num_lines": 42, "path": "/runtime_lib/FaultInjectorManager.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FAULT_INJECTOR_MANAGER_H\n#define FAULT_INJECTOR_MANAGER_H\n\n#include <iostream>\n#include <map>\n#include <string>\n#include <vector>\n\n#include <stdio.h>\n#include <cstdlib>\n\n#include \"Utils.h\"\n\nclass FaultInjector;\nclass FaultInjectorManager {\n public:\n FaultInjectorManager() {}\n\n public:\n static FaultInjectorManager *getFaultInjectorManager();\n void addFaultInjector(const std::string &name,\n FaultInjector *fi);\n FaultInjector *getFaultInjector(const std::string &name);\n\n std::vector<std::string> getAllInjectorNames();\n std::vector<std::string> getInjectorNamesForType(std::string type_str);\n\n private:\n std::map<const std::string, FaultInjector* > type_injector;\n};\n\nstruct RegisterFaultInjector {\n RegisterFaultInjector(const std::string &name, FaultInjector *fi) {\n //debug(( \"init\\n\"));\n FaultInjectorManager *m = FaultInjectorManager::getFaultInjectorManager();\n //debug((\"get manager\\n\"));\n m->addFaultInjector(name, fi);\n //debug((\"finish\\n\"));\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.5264830589294434, "alphanum_fraction": 0.5963982939720154, "avg_line_length": 26.764705657958984, "blob_id": "ef2673cb2486b161c682a34c65728f0a5b68083c", "content_id": "a6b35b496a8c3043f637f648c0ee72c95568d01f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 944, "license_type": "permissive", "max_line_length": 75, "num_lines": 34, "path": "/test_suite/PROGRAMS/mcf/implicit.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nIMPLICIT.H of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Sun Nov 21 16:21:18 2004 by Andreas Loebel (boss.local.de) */\n/* $Id: implicit.h,v 1.11 2005/02/17 19:42:21 bzfloebe Exp $ */\n\n\n#ifndef _IMPLICIT_H\n#define _IMPLICIT_H\n\n\n#include \"mcfutil.h\"\n#include \"mcflimit.h\"\n\n\nextern long price_out_impl _PROTO_(( network_t * ));\nextern long suspend_impl _PROTO_(( network_t *, cost_t, long ));\n\n\n#endif\n" }, { "alpha_fraction": 0.6690382361412048, "alphanum_fraction": 0.6719367504119873, "avg_line_length": 32.29824447631836, "blob_id": "04b582ec69c4831c8ac4b63d7cb74a6e7fe2f63b", "content_id": "899fc2787af01d2cab583d938ac8b704784f1a16", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3795, "license_type": "permissive", "max_line_length": 93, "num_lines": 114, "path": "/test_suite/SCRIPTS/check_injection.py", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport yaml\nimport subprocess\n\ndef examineTraceFile(work_dir):\n\ttry:\n\t\tinputyaml = open(os.path.join(work_dir, 'input.yaml'), 'r')\n\texcept:\n\t\tprint (\"FAIL: (ERROR) input.yaml not found! work_dir:\", work_dir)\n\t\treturn False\n\n\tconfig_dict = yaml.load(inputyaml)\n\ttry:\n\t\tif config_dict['compileOption']['tracingPropagation'] == True:\n\t\t\t## we should have trace file\n\t\t\ttracefile = os.path.join(work_dir, 'llfi', 'baseline', 'llfi.stat.trace.prof.txt')\n\t\t\tif os.path.isfile(tracefile) and os.path.getsize(tracefile):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\t## Tracing option disabled, pass\n\t\t\treturn True\n\texcept:\n\t\t## Tracing option disabled, pass\n\t\treturn True\n\n\ndef checkLLFIDir(work_dir, target_IR, prog_input):\n\tllfi_dir = os.path.join(work_dir, \"llfi\")\n\tif os.path.isdir(llfi_dir) == False:\n\t\treturn \"FAIL: No ./llfi folder found!\"\n\tstats_dir = os.path.join(llfi_dir, \"llfi_stat_output\")\n\tif os.path.isdir(stats_dir) == False:\n\t\treturn \"FAIL: No ./llfi/llfi_stat_output folder found!\"\n\tbaseline_dir = os.path.join(llfi_dir, \"baseline\")\n\tif os.path.isdir(baseline_dir) == False:\n\t\treturn \"FAIL: No ./llfi/baseline folder found!\"\n\tprog_output_dir = os.path.join(llfi_dir, \"prog_output\")\n\tif os.path.isdir(prog_output_dir) == False:\n\t\treturn \"FAIL: No ./llfi/prog_output folder found!\"\n\tstd_output_dir = os.path.join(llfi_dir, \"std_output\")\n\tif os.path.isdir(std_output_dir) == False:\n\t\treturn \"FAIL: No ./llfi/std_output folder found!\"\n\n\tstats = [f for f in os.listdir(stats_dir)]\n\tif len(stats) == 0:\n\t\treturn \"FAIL: No stats file found!\"\n\n\tif examineTraceFile(work_dir) == False:\n\t\treturn \"FAIL: Tracing was enabled byt trace file not generated!\"\n\n\treturn \"PASS\"\n\n\ndef check_injection(*prog_list):\n\tr = 0\n\tsuite = {}\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\ttestsuite_dir = os.path.join(script_dir, os.pardir)\n\twith open(os.path.join(testsuite_dir, \"test_suite.yaml\")) as f:\n\t\ttry:\n\t\t\tsuite = yaml.load(f)\n\t\texcept:\n\t\t\tprint(\"ERROR: Unable to load yaml file: test_suite.yaml\", file=sys.stderr)\n\t\t\treturn -1\n\n\twork_dict = {}\n\tfor test in suite[\"SoftwareFaults\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"SoftwareFaults\" in prog_list:\n\t\t\twork_dict[\"./SoftwareFaults/\"+test] = suite[\"SoftwareFaults\"][test]\n\tfor test in suite[\"HardwareFaults\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"HardwareFaults\" in prog_list:\n\t\t\twork_dict[\"./HardwareFaults/\"+test] = suite[\"HardwareFaults\"][test]\n\tfor test in suite[\"BatchMode\"]:\n\t\tif len(prog_list) == 0 or test in prog_list or \"BatchMode\" in prog_list:\n\t\t\twork_dict[\"./BatchMode/\"+test] = suite[\"BatchMode\"][test]\n\t\n\t\n\tresult_list = []\n\tfor test_path in work_dict:\n\t\tinject_dir = os.path.abspath(os.path.join(testsuite_dir, test_path))\n\t\tinject_prog = suite[\"PROGRAMS\"][work_dict[test_path]][0]\n\t\tinject_input = str(suite[\"INPUTS\"][work_dict[test_path]])\n\t\tif test_path.startswith('./BatchMode'):\n\t\t\t# print(\"\\tChecking on BatchMode:\", test_path)\n\t\t\tmodels = [m for m in os.listdir(inject_dir) if os.path.isdir(os.path.join(inject_dir, m))]\n\t\t\tfor m in models:\n\t\t\t\tsubdir = os.path.join(inject_dir, m)\n\t\t\t\t# print(\"\\t\\tChecking on model:\", m)\n\t\t\t\tresult = checkLLFIDir(subdir, inject_prog, inject_input)\n\t\t\t\tif result != \"PASS\":\n\t\t\t\t\tbreak\n\t\t\tif len(models) == 0:\n\t\t\t\tresult = \"Subdirectories for failure modes not found!\"\n\t\telse:\n\t\t\tresult = checkLLFIDir(inject_dir, inject_prog, inject_input)\n\t\tif result != \"PASS\":\n\t\t\tr += 1\n\t\trecord = {\"name\":test_path, \"result\":result}\n\t\tresult_list.append(record)\n\n\treturn r, result_list\n\nif __name__ == \"__main__\":\n\tr, result_list = check_injection(*sys.argv[1:])\n\tprint (\"=============== Result ===============\")\n\tfor record in result_list:\n\t\tprint(record[\"name\"], \"\\t\\t\", record[\"result\"])\n\tsys.exit(r)" }, { "alpha_fraction": 0.751518189907074, "alphanum_fraction": 0.751518189907074, "avg_line_length": 29.399999618530273, "blob_id": "627d0e770908a764c8f6ae93ca73b567a5b154ac", "content_id": "6a95c669c47ae87d79a2c28bf5d4343586d1a78a", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1976, "license_type": "permissive", "max_line_length": 82, "num_lines": 65, "path": "/llvm_passes/core/FICustomSelectorManager.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FI_CUSTOM_INST_SELECTOR_MANAGER_H\n#define FI_CUSTOM_INST_SELECTOR_MANAGER_H\n\n#include <iostream>\n#include <map>\n#include <string>\n#include <set>\n\nnamespace llfi {\nclass FIInstSelector;\nclass FIRegSelector;\n\nclass FICustomInstSelectorManager {\n public:\n FICustomInstSelectorManager() {}\n\n public:\n static FICustomInstSelectorManager *getCustomInstSelectorManager();\n void addCustomInstSelector(const std::string &name, \n FIInstSelector *instselector);\n FIInstSelector *getCustomInstSelector(const std::string &name);\n void getAllSoftwareSelectors(std::set<std::string>& all_software_failure_names);\n void getAllHardwareSelectors(std::set<std::string>& all_hardware_failure_names);\n\n private:\n std::map<const std::string, FIInstSelector* > optionname_instselector;\n};\n\n\nclass FICustomRegSelectorManager {\n public:\n FICustomRegSelectorManager() {}\n\n public:\n static FICustomRegSelectorManager *getCustomRegSelectorManager();\n void addCustomRegSelector(const std::string &name, \n FIRegSelector *regselector);\n FIRegSelector *getCustomRegSelector(const std::string &name);\n void getAllSoftwareSelectors(std::set<std::string>& all_software_failure_names);\n void getAllHardwareSelectors(std::set<std::string>& all_hardware_failure_names);\n \n private:\n std::map<const std::string, FIRegSelector* > optionname_regselector;\n};\n\n\n// helper class to register custom inst or reg selector\nstruct RegisterFIInstSelector {\n RegisterFIInstSelector(const std::string &name, FIInstSelector *sel) {\n FICustomInstSelectorManager *m = \n FICustomInstSelectorManager::getCustomInstSelectorManager();\n m->addCustomInstSelector(name, sel);\n }\n};\n\nstruct RegisterFIRegSelector {\n RegisterFIRegSelector(const std::string &name, FIRegSelector *sel) {\n FICustomRegSelectorManager *m =\n FICustomRegSelectorManager::getCustomRegSelectorManager();\n m->addCustomRegSelector(name, sel);\n }\n};\n\n}\n#endif\n" }, { "alpha_fraction": 0.5092807412147522, "alphanum_fraction": 0.5858468413352966, "avg_line_length": 25.121212005615234, "blob_id": "0331e6b39c468272e15aaf877b142ac8f8a915bb", "content_id": "323ea485fd8f08732a2b87d4900aaae513d54232", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 862, "license_type": "permissive", "max_line_length": 75, "num_lines": 33, "path": "/test_suite/PROGRAMS/mcf/output.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "/**************************************************************************\nOUTPUT.H of ZIB optimizer MCF, SPEC version\n\nThis software was developed at ZIB Berlin. Maintenance and revisions \nsolely on responsibility of Andreas Loebel\n\nDr. Andreas Loebel\nOrtlerweg 29b, 12207 Berlin\n\nKonrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\nScientific Computing - Optimization\nTakustr. 7, 14195 Berlin-Dahlem\n\nCopyright (c) 1998-2000 ZIB. \nCopyright (c) 2000-2002 ZIB & Loebel. \nCopyright (c) 2003-2005 Andreas Loebel.\n**************************************************************************/\n/* LAST EDIT: Sun Nov 21 16:21:59 2004 by Andreas Loebel (boss.local.de) */\n/* $Id: output.h,v 1.10 2005/02/17 19:42:21 bzfloebe Exp $ */\n\n\n\n#ifndef _OUTPUT_H\n#define _OUTPUT_H\n\n\n#include \"mcfutil.h\"\n\n\nextern long write_circulations _PROTO_(( char *, network_t * ));\n\n\n#endif\n" }, { "alpha_fraction": 0.7492566704750061, "alphanum_fraction": 0.7502477765083313, "avg_line_length": 31.03174591064453, "blob_id": "20ed5b3ea182deef01a38330fa826cede8f74915", "content_id": "931769535da14e5e01df5ffddd4e0d2bf638db98", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2018, "license_type": "permissive", "max_line_length": 81, "num_lines": 63, "path": "/llvm_passes/core/Utils.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef LLFI_UTILS_H\n#define LLFI_UTILS_H\n\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Value.h\"\n#include \"llvm/IR/BasicBlock.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/Metadata.h\"\n#include \"llvm/IR/Constants.h\"\n\n#include \"llvm/Support/Debug.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Support/InstIterator.h\"\n\n// For name demangling\n#include <cxxabi.h>\n\n#include <map>\n#include <set>\n#include <string>\n#include <sstream>\n\nusing namespace llvm;\nnamespace llfi {\nstd::string intToString(int i);\nstd::string longToString(long i);\n\n// Return a demangled version of a C++ function name. Removes type info from\n// templated functions\nstd::string demangleFuncName(std::string func);\n\n// return the terminate instruction of the function\nInstruction *getTermInstofFunction(Function *func);\n// return instumentation code insertion point for fi in reg of inst\nInstruction *getInsertPtrforRegsofInst(Value *reg, Instruction *inst);\n\nvoid getProgramExitInsts(Module &M, std::set<Instruction*> &exitinsts);\n\n// get or set the LLFI index of the specified instruction. use metadata\nlong getLLFIIndexofInst(Instruction *inst);\nvoid setLLFIIndexofInst(Instruction *inst);\n\n// get the map of opcode name and their opcode\nvoid genFullNameOpcodeMap(std::map<std::string, unsigned> &opcodenamemap);\n\n//Check metadata to see if instruction was generated/inserted by LLFI\nbool isLLFIIndexedInst(Instruction *inst);\n\n// sets the metadata on the injectFault call\nvoid setInjectFaultInst(Value *reg, Instruction *inst, Instruction *ficall);\n\n// checks if the instruction is a call to llfi's 'injectFault*', if it is, return\n// the next instruction iff injectFault occurs AFTER the targeted instruction\nInstruction* changeInsertPtrIfInjectFaultInst(Instruction *inst);\n\n//======== Add opcode_str QINING @SEP 13th========\nGlobalVariable* findOrCreateGlobalNameString(Module &M, std::string name);\n//================================================\n}\n\n#endif\n" }, { "alpha_fraction": 0.8353658318519592, "alphanum_fraction": 0.8394308686256409, "avg_line_length": 26.27777862548828, "blob_id": "589ecf5e60c6d23ce2c314479340d2544714dc0b", "content_id": "bb758f6cd70e8842dee419ff0291c24388aa8464", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 492, "license_type": "permissive", "max_line_length": 56, "num_lines": 18, "path": "/bin/CMakeLists.txt", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8)\n\ninclude(../config/copy_utils.cmake)\n\nproject(bin)\n\ncopy(instrument.py instrument)\ncopy(injectfault.py injectfault)\ncopy(profile.py profile)\ncopy(SoftwareFailureAutoScan.py SoftwareFailureAutoScan)\ncopy(batchInstrument.py batchInstrument)\ncopy(batchProfile.py batchProfile)\ncopy(batchInjectfault.py batchInjectfault)\ncopy(llfi-gui.py llfi-gui)\ncopy(HardwareFailureAutoScan.py HardwareFailureAutoScan)\ncopy(InjectorAutoScan.py InjectorAutoScan)\n\ngenCopy()\n\n" }, { "alpha_fraction": 0.699489414691925, "alphanum_fraction": 0.7002187967300415, "avg_line_length": 25.882352828979492, "blob_id": "55ff0c357e854b569262891e8881fde558e550d1", "content_id": "7a5251250440d81ff3ec05ab2c2d7964b45ae639", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1371, "license_type": "permissive", "max_line_length": 76, "num_lines": 51, "path": "/llvm_passes/core/FaultInjectionPass.h", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#ifndef FAULTINJECTION_PASS_H\n#define FAULTINJECTION_PASS_H\n\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/Pass.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Instructions.h\"\n\n#include <iostream>\n#include <list>\n#include <map>\n#include <string>\n\nusing namespace llvm;\n\nnamespace llfi {\nclass FaultInjectionPass: public ModulePass {\n public:\n FaultInjectionPass() : ModulePass(ID) { }\n virtual bool runOnModule(Module &M);\t\n static char ID;\n virtual void getAnalysisUsage(AnalysisUsage &AU) const {\n AU.addRequired<DataLayout>();\n }\n\n private: \n void checkforMainFunc(Module &M);\n void finalize(Module& M);\n\n void insertInjectionFuncCall(\n std::map<Instruction*, std::list< int >* > *inst_regs_map, Module &M);\n void createInjectionFuncforType(Module &M, Type *functype, \n std::string &funcname, Constant *fi_func, \n Constant *pre_func);\n\tvoid createInjectionFunctions(Module &M);\n\n private:\n std::string getFIFuncNameforType(const Type* type);\n \n Constant *getLLFILibPreFIFunc(Module &M);\n Constant *getLLFILibFIFunc(Module &M);\n Constant *getLLFILibInitInjectionFunc(Module &M);\n Constant *getLLFILibPostInjectionFunc(Module &M);\n private:\n std::map<const Type*, std::string> fi_rettype_funcname_map;\n};\n\nchar FaultInjectionPass::ID=0;\n}\n#endif\n" }, { "alpha_fraction": 0.6776315569877625, "alphanum_fraction": 0.6820175647735596, "avg_line_length": 42.4523811340332, "blob_id": "2d146c98147819d146652202a3278ff977fe31d3", "content_id": "bc8bb464420976c46709f17f84f1a9c277b9cd94", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1824, "license_type": "permissive", "max_line_length": 198, "num_lines": 42, "path": "/web-app/views/src/js/components/mainWindow/mainPannel/tutorial.js", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "var React = require(\"react\");\nvar Tutorial = React.createClass({\n\tgetInitialState: function() {\n\t\treturn {\n\t\t\ttoggle: true\n\t\t};\n\t},\n\trender: function() {\n\t\tvar caretDirectionClassName = this.state.toggle ? \"caret-directional down\" : \"caret-directional right\";\n\t\tvar tutorialContainerClassName = this.state.toggle ? \"llfiTutorial-container\" : \"llfiTutorial-container hide\";\n\t\treturn (\n\t\t\t<div className = \"llfiTutorial\">\n\t\t\t\t<div className=\"btn dropdown-toggle btn-block llfiTutorial-toggle\" onClick={this.onClickToggle}>\n\t\t\t\t\t<span className={caretDirectionClassName}></span>\n\t\t\t\t\t<span className=\"llfiTutorial-label\">LLFI Tutorial</span>\n\t\t\t\t</div>\n\t\t\t\t<div className={tutorialContainerClassName}>\n\t\t\t\t\t<div className=\"llfiTutorial-context\">\n\t\t\t\t\t\t<p>This is a short tutorial for using the GUI to run LLFI. Full instructions are available on the project's wiki page.</p>\n\t\t\t\t\t\t<p>1. Click File->'Open File' to import any standalone C, C++ or .ll file. If you want to import a C or C++ project instead, use File->'Import Project' and nagivate to your project folder.</p>\n\t\t\t\t\t\t<p>Note: You cannot edit your program here.</p>\n\t\t\t\t\t\t<p>2. Click 'Compile To IR' to compile your program to IR form.</p>\n\t\t\t\t\t\t<p>3. Click 'Instrument' to configure compile options.</p>\n\t\t\t\t\t\t<p>4. Enter in any command line inputs (if exists) and then click 'Profiling'.</p>\n\t\t\t\t\t\t<p>5. Click 'Runtime Options' and configure.</p>\n\t\t\t\t\t\t<p>6. Click 'Inject Fault'.</p>\n\t\t\t\t\t\t<p>7. View the fault injection results at the bottom pane.</p>\n\t\t\t\t\t\t<p>8. Navigate to 'Fault Injection Status', select one or more traces, and click 'Trace Graph' to view the fault propagation graphically.</p>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t);\n\t},\n\n\tonClickToggle: function() {\n\t\tthis.setState({\n\t\t\ttoggle: !this.state.toggle\n\t\t});\n\t},\n});\n\nmodule.exports = Tutorial;" }, { "alpha_fraction": 0.5730705857276917, "alphanum_fraction": 0.5763546824455261, "avg_line_length": 34.82352828979492, "blob_id": "1607c852abd87f7671fa2909112ba30931391e04", "content_id": "0baf8967be702165bbd23637eea1bb6479f02424", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1218, "license_type": "permissive", "max_line_length": 95, "num_lines": 34, "path": "/llvm_passes/core/RegLocBasedFIRegSelector.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "#include \"RegLocBasedFIRegSelector.h\"\n\nnamespace llfi {\n\nbool RegLocBasedFIRegSelector::isRegofInstFITarget(Value *reg, \n Instruction *inst) {\n if (firegloc == dstreg) {\n return reg == inst;\n } else if (firegloc == allsrcreg) {\n if(isa<GetElementPtrInst>(inst)){\n if(inst->getOperand(inst->getNumOperands()-1) == reg && isa<Constant>(reg)) return false;\n }\n return reg != inst;\n } else {\n unsigned srcindex = (unsigned) (firegloc - srcreg1);\n unsigned totalsrcregnum = inst->getNumOperands();\n if (srcindex < totalsrcregnum) {\n if(isa<GetElementPtrInst>(inst)){\n if(inst->getOperand(totalsrcregnum-1) == reg && isa<Constant>(reg)) return false;\n }\n return inst->getOperand(srcindex) == reg;\n } else\n return false;\n }\n}\n\nbool RegLocBasedFIRegSelector::isRegofInstFITarget(Value *reg, \n Instruction *inst,\n int pos) {\n if(firegloc == allsrcreg || firegloc == dstreg) return isRegofInstFITarget(reg, inst);\n else return isRegofInstFITarget(reg, inst) && (firegloc - srcreg1) == pos;\n}\n\n}\n" }, { "alpha_fraction": 0.37755101919174194, "alphanum_fraction": 0.37755101919174194, "avg_line_length": 18.5, "blob_id": "4c7ce47439040401dc7405fa65084d69cf672646", "content_id": "259ddb0e6473c5f41922ed2604b9c098d2eabf34", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "NCSA" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 196, "license_type": "permissive", "max_line_length": 38, "num_lines": 10, "path": "/runtime_lib/_FIDLSoftwareFaultInjectors.cpp", "repo_name": "DependableSystemsLab/LLFI", "src_encoding": "UTF-8", "text": "// DO NOT MODIFY\n#include \"_SoftwareFaultInjectors.cpp\"\n\n/*********************\n * DEFAULT INJECTORS *\n *********************/\n\n/********************\n * CUSTOM INJECTORS *\n ********************/\n\n" } ]
135
alex8866/deploy
https://github.com/alex8866/deploy
b9568543371b4751d3c4e1b904c933991fed214f
93d7d5be1c8b9b949203aa0cba0d341ac95e957a
b7308cd0389c8b9c86ded29f7ed83aa5c37a7e6c
refs/heads/master
2016-08-05T09:42:04.060387
2014-12-15T05:04:49
2014-12-15T05:04:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6643495559692383, "alphanum_fraction": 0.6683217287063599, "avg_line_length": 29.484848022460938, "blob_id": "a1cf0bcf87e49644808abc8bcc7027da160842ec", "content_id": "295bae16dca56388cf08bbe5ef648c923e76519a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1007, "license_type": "no_license", "max_line_length": 91, "num_lines": 33, "path": "/bin/cpuinfo", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho -n \"CPU model name:\"\ncat /proc/cpuinfo |grep \"model name\" |sort|uniq | awk -F: '{print $2}'\n\necho -n \"physical CPU number in total: \"\ncat /proc/cpuinfo | grep \"physical id\"| sort | uniq | wc -l\n\necho -n \"logical CPU number in total: \"\ncat /proc/cpuinfo | grep processor | wc -l\n\ncat /proc/cpuinfo | grep -qi \"core id\"\nif [ $? -ne 0 ];then\n echo \"Warning. No multi-core or hyper-threading is enabled.\"\n exit 0\nfi\n\n\necho -n \"core number in a physical CPU: \"\ncore_per_phy_cpu=$(cat /proc/cpuinfo | grep \"core id\" | sort | uniq | wc -l)\necho $core_per_phy_cpu\n\necho -n \"logical CPU number in a physical CPU: \"\nlogical_cpu_per_phy_cpu=$(cat /proc/cpuinfo|grep siblings | sort|uniq|awk -F: '{print $2}')\necho $logical_cpu_per_phy_cpu\n\nif [ $logical_cpu_per_phy_cpu -gt $core_per_phy_cpu ];then\n echo \"Hyper threading is enabled.\"\nelif [ $logical_cpu_per_phy_cpu -eq $core_per_phy_cpu ];then\n echo \"Hyper threading is NOT enabled.\"\nelse\n echo \"Error, There's something wrong.\"\nfi\n\n" }, { "alpha_fraction": 0.5412826538085938, "alphanum_fraction": 0.5679723620414734, "avg_line_length": 39.372093200683594, "blob_id": "3bab736197a2561b98f20966088479a96d988bc5", "content_id": "0d64f64f6677fe281b2499e81f23390effcec96b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5422, "license_type": "no_license", "max_line_length": 605, "num_lines": 129, "path": "/conf/.bashrc", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "# .bashrc\n\n# Source global definitions\nif [ -f /etc/bashrc ]; then\n\t. /etc/bashrc\nfi\n\nls=\"ls --color=yes\"\nalias ll=\"ls -l\"\n: << EOF\n\n\\$ 提示符 root # 普通用户 $\n设置字符序列颜色格式为:\\[\\e[F;Bm\\]\nF为自体颜色, 编号30-37 B为背景颜色,编号:40-47\n\\e[0m关闭颜色输出 当B=1时将显示加亮加粗的字体\n\n颜色表(注意不是组合后的结果):\n前景 背景 颜色\n30 40 黑色\n31 41 红色\n32 42 绿色\n33 43 黄色\n34 44 蓝色\n35 45 紫红色\n36 46 青蓝色\n37 47 白色\n\n代码 意义\n0 OFF\n1 高亮显示\n4 underline\n5 闪烁\n7 反白显示\n8 不可见\nEOF\n\n#xm automatic complete\n#complete -W \"console create destroy domid domname dump-core list mem-max mem-set migrate pause reboot rename restore save shutdown sysrq trigger top unpause uptime vcpu-list vcpu-pin vcpu-set the debug-keys dmesg info log serve sched-credit sched-sedf block-attach block-detach block-list block-configure network-attach network-detach network-list vtpm-list pci-attach pci-detach pci-list pci-list-assignable-devices vnet-list vnet-create vnet-delete labels addlabel rmlabel getlabel dry-run resources makepolicy loadpolicy cfgbootpolicy dumppolicy\" xm\nEDITOR=vim\n\n#Xen xm command completion function\nfunction _xm()\n{\n XENXM=${XENXM:=\"on\"}\n [ \"$XENXM\" == \"off\" ] && return 0\n\n local word=${COMP_WORDS[COMP_CWORD]}\n local line=${COMP_LINE}\n local comstr=\"console\\ncreate\\ndestroy\\ndomid\\ndomname\\ndump-core\\nlist\\nmem-max\\nmem-set\\nmigrate\\npause\\nreboot\\nrename\\nrestore\\nsave\\nshutdown\\nsysrq\\ntrigger\\ntop\\nunpause\\nuptime\\nvcpu-list\\nvcpu-pin\\nvcpu-set\\ndebug-keys\\ndmesg\\ninfo\\nlog\\nserve\\nsched-credit\\nsched-sedf\\nblock-attach\\nblock-detach\\nblock-list\\nblock-configure\\nnetwork-attach\\nnetwork-detach\\nnetwork-list\\nvtpm-list\\npci-attach\\npci-detach\\npci-list\\npci-list-assignable-devices\\nvnet-list\\nvnet-create\\nvnet-delete\\nlabels\\naddlabel\\nrmlabel\\ngetlabel\\ndry-run\\nresources\\nmakepolicy\\nloadpolicy\\ncfgbootpolicy\\ndumppolicy\"\n local Wordlist\n local Wordlist1\n local Wordlist2\n\n case \"$line\" in\n *create*)\n COMPREPLY=($(compgen -f -X \"!*.cfg\" -- \"${word}\"))\n ;;\n *console*|*destroy*|*reboot*|*pause*|*unpause*|*shutdown*|*mem-max*|*mem-set*|*uptime*|*vcpu-list*|*vcpu-set*|*block-attach*|*block-list*|*network-attach*|*network-list*|*pci-attach*|*vcpu-pin*|*rename*|*domid*|*migrate*|*save*|*dump-core*)\n Wordlist2=$(xm list|sed '1,2d'|awk '{print $1}')\n COMPREPLY=($(compgen -W \"$Wordlist2\" -- \"${word}\"))\n ;;\n *pci-list-assignable-devices*)\n Wordlist1=$(xm pci-list-assignable-devices|grep -iv error)\n COMPREPLY=($(compgen -W \"$Wordlist1\" -- \"${word}\"))\n ;;\n *pci-list*)\n case \"${line/*pci-list/}\" in\n *\" \"*)\n Wordlist2=$(xm list|sed '1,2d'|awk '{print $1}')\n COMPREPLY=($(compgen -W \"$Wordlist2\" -- \"${word}\"))\n ;;\n \"\")\n Wordlist2=$(xm list|sed '1,2d'|awk '{print $1}')\n COMPREPLY=($(compgen -W \"$Wordlist2 pci-list-assignable-devices\" -- \"${word}\"))\n ;;\n esac\n ;;\n *list*)\n Wordlist2=$(xm list|sed '1,2d'|awk '{print $1}')\n COMPREPLY=($(compgen -W \"$Wordlist2\" -- \"${word}\"))\n ;;\n *block-detach*)\n [ $(echo \"$line\" | awk '{print NF}') -ge 3 ] &&\n Wordlist2=$(eval xm block-list $(echo \"$line\" |awk '{print $3}') |\n awk '$1!~/Vdev/{print $1}') ||\n Wordlist2=$(xm list | sed '1,2d' | awk '{print $1}')\n COMPREPLY=($(compgen -W \"$Wordlist2\" -- \"${word}\"))\n ;;\n *network-detach*)\n [ $(echo \"$line\" | awk '{print NF}') -ge 3 ] &&\n Wordlist2=$(eval xm network-list $(echo \"$line\" |awk '{print $3}') |\n awk '$1!~/Idx/{print $1}') ||\n Wordlist2=$(xm list | sed '1,2d' | awk '{print $1}')\n COMPREPLY=($(compgen -W \"$Wordlist2\" -- \"${word}\"))\n ;;\n *pci-detach*)\n [ $(echo \"$line\" | awk '{print NF}') -ge 3 ] &&\n Wordlist2=$(eval xm pci-list $(echo \"$line\" |awk '{print $3}') |\n awk '$1!~/VSlt/{print \"0000:\"$3\":\"$4\".\"$5}') ||\n Wordlist2=$(xm list | sed '1,2d' | awk '{print $1}')\n COMPREPLY=($(compgen -W \"$Wordlist2\" -- \"${word}\"))\n\n ;;\n *domname*)\n Wordlist2=$(xm list | sed '1,2d' | awk '{print $2}')\n COMPREPLY=($(compgen -W \"$Wordlist2\" -- \"${word}\"))\n ;;\n *dry-run*)\n COMPREPLY=($(compgen -f -X \"!*.cfg\" -- \"${word}\"))\n ;;\n *restore*)\n COMPREPLY=($(compgen -f -- \"${word}\"))\n ;;\n *)\n Wordlist=$(echo -e \"$comstr\")\n COMPREPLY=($(compgen -W \"$Wordlist\" -- \"${word}\"))\n ;;\n esac\n}\n\n#install profile have include this\ncomplete -o default -F _xm xm\nPS1='\\[\\e[34;35m\\]\\[\\e[0m\\][host\\[\\e[36;99m\\]-a\\[\\e[0m\\]\\[\\e[34;99m\\]\\[\\e[0m\\]]\\[\\e[31;99m\\]\\$\\[\\e[0m\\]'\nexport TESTER=INSTESTER\nBASE_NAME=\"$(basename $(ls -1d /var/lib/deploy*))\"\nalias cg=\"source /var/lib/$BASE_NAME/$TESTER/bin/cg\"\nexport COWPATH=/var/lib/$BASE_NAME/$TESTER/bin/\nexport PATH=$PATH:/var/lib/$BASE_NAME/$TESTER/bin:/usr/sbin # Add RVM to PATH for scripting\nexport GUESTFISH_OUTPUT='\\e[0;33m'\n" }, { "alpha_fraction": 0.7309486865997314, "alphanum_fraction": 0.7589424848556519, "avg_line_length": 18.484848022460938, "blob_id": "2807657ea5192d5de2704aaa833b33d0e02b417e", "content_id": "0ab748754c4b1b7cff52f0455050a22e90941780", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 643, "license_type": "no_license", "max_line_length": 91, "num_lines": 33, "path": "/testenv.ini", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#The name of the tester, name should not include special character,\ntester=lkong\n\n#yes/no, yes as default\ncowsay=yes\n\n#The target host's ip address, note: the ip address should be separated by blank space.\n#In fact, this file will be source in script file: deploy.\nstaticip=()\nip=()\n\n#the target host's password\npassword=\n\n#Specify the host environment, these variable name should be lower case letters, and should\n#be separated by '_'\nhost_kernel_version=\nhost_release=5.11\nhost_arch=x86_64\ncpu_type=intel\n\n\n#Guest environment\nguest_kernel=test\nguest_version=7.0\nguest_bit=64\nguest_type=hvm\n\n#[other]\n\n\n#Xen version\nxen_version=3.0.3-144.el5\n" }, { "alpha_fraction": 0.5319905281066895, "alphanum_fraction": 0.5545023679733276, "avg_line_length": 30.259260177612305, "blob_id": "d8434e998dedabf00a9a6f40fa956dc9e3561b80", "content_id": "bac5d885d2dfe394ecb7eaf3162c7446abe81051", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 844, "license_type": "no_license", "max_line_length": 112, "num_lines": 27, "path": "/bin/clone", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#clone hvm-7.0-64-1.cfg\n\nfunction create_image_file()\n{\n test_case=\"$(echo $1 | sed 's/.cfg//')\"\n base_name=$(echo $test_case|cut -d'-' -f1)-$(echo $test_case|cut -d'-' -f2)-$(echo $test_case|cut -d'-' -f3)\n\n old_version=$(echo \"$test_case\"|cut -d'-' -f4)\n new_version=$((old_version + 1))\n new_case=\"$base_name\"-\"$new_version\"\n\n eval sed \"s/$test_case/$new_case/g\" \"$test_case\".cfg > \"$new_case\".cfg\n eval sed -i '/vif/s/mac=..:..:..:..:..:../mac=$(macgen)/' \"$new_case\".cfg\n #cp /var/lib/xen/images/\"$test_case\".img /var/lib/xen/images/\"$new_case\".img\n}\n\nif [ \"$#\" -eq 2 -a -f \"$2\" ];then\n create_image_file \"$1\"\n cp -v \"$2\" /var/lib/xen/images/\"$new_case\".img\nelif [ \"$#\" -eq 2 -a ! -f \"$2\" ];then\n echo \"ERROR: file $2 does not exist, please check.\"\n exit\nelse\n create_image_file \"$1\"\nfi\n" }, { "alpha_fraction": 0.4953271150588989, "alphanum_fraction": 0.6168224215507507, "avg_line_length": 34.66666793823242, "blob_id": "9446ac7747df24ac9e8d8149f3f5fa0c71552c23", "content_id": "763c822468472f229102e8dd2a275bf3c6411d46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 107, "license_type": "no_license", "max_line_length": 76, "num_lines": 3, "path": "/bin/IP", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\nifconfig|grep inet|grep -Ev \"inet6|127.0.0.1|192.168\"|sed 's/inet addr://' |\ncowsay -n -e \"..\"\n" }, { "alpha_fraction": 0.5740585923194885, "alphanum_fraction": 0.5974895358085632, "avg_line_length": 22.431371688842773, "blob_id": "e5d521de3f751e4d17af8f094b4bc869f2aa6e3d", "content_id": "25388aef5541e8f1346ac94569968a507c3064d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1195, "license_type": "no_license", "max_line_length": 75, "num_lines": 51, "path": "/future/jssh.py", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#-*- coding:utf-8 -*-\n#author [email protected]\n#date\t2013-10-10\n\nimport sys, time, os\n\ntry:\n import pexpect\nexcept ImportError:\n print \"\"\"\n You must install pexpect module\n \"\"\"\n sys.exit(1)\n\naddr_map = {\n\t'server_1' : ('[email protected]', 'sunbx'),\n}\n\ncmd_map = {\n 'huitui':\"/home/sunbx/path/server/workspace/project/.script/huitui.sh\",\n 'fabu' : \"/home/sunbx/path/server/workspace/project/.script/fabu.sh\",\n} \n\nif len(sys.argv) != 2:\n sys.stderr.write(\"Usage: python %s cmd \\n\" % sys.argv[0])\n raise SystemExit(1)\n\ncmd_output_map = {}\n\nfor key in addr_map:\n try:\n cmd_key = sys.argv[1]\n\tcmd = cmd_map[cmd_key]\n except:\n sys.stderr.write(\"Usage: python %s cmd \\n\" % sys.argv[0])\n print \"\"\" \n \t\tcmd: huitui or fabu \n\t\"\"\"\n raise SystemExit(1)\n server = pexpect.spawn('ssh %s' % addr_map[key][0])\n # server.expect('.*yes/no.*')\n # server.sendline(\"yes\")\n server.expect('.*ssword:')\n server.sendline(addr_map[key][1])\n command_output = pexpect.run ('bash %s' % cmd)\n server.sendline(\"quit\")\n cmd_output_map[key] = command_output\n\nfor key in cmd_output_map:\n print cmd_output_map[key] + key\n" }, { "alpha_fraction": 0.5650969743728638, "alphanum_fraction": 0.5724838376045227, "avg_line_length": 37.60714340209961, "blob_id": "011a8be0c7c7cc87cdcb1492e9660f84ad0dc478", "content_id": "d259bb263117fe14e671fd5ce0ccc65097664411", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1083, "license_type": "no_license", "max_line_length": 140, "num_lines": 28, "path": "/CheckKey.sh", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n> /tmp/CheckKey.log\n\n#Check Host\n[ \"$(uname -r)\" != \"$host_kernel\" -a \"$host_kernel\" != \"\" ] && echo \"ERROR: host_kernel -> $(uname -r) != $host_kernel\" >> /tmp/CheckKey.log\n\n[ \"$(eval cat /etc/redhat-release|awk \"/$host_release/{print \"1\"}\")\" != \"1\" -a \"$host_release\" != \"\" ] && \necho \"ERROR: host_release -> $(cat /etc/redhat-release) != $host_release\" >> /tmp/CheckKey.log\n\n[ \"$(eval cat /etc/redhat-release|awk \"/$host_release/{print \"1\"}\")\" != \"1\" -a \"$host_release\" != \"\" ] && \necho \"ERROR: host_release -> $(cat /etc/redhat-release) != $host_release\" >> /tmp/CheckKey.log\n\n[ \"$(uname -m)\" != \"$host_arch\" -a \"$host_arch\" != \"\" ] && echo \"ERROR: host_arch -> $(uname -m) != $host_arch\" >> /tmp/CheckKey.log\n\n#Check Guest (use libguestfs)\n\n#Check Xen\n[ \"$(rpm -qa|egrep ^xen-[0-9]|sed 's/xen-//g')\" != \"$xen_version\" -a \"$xen_version\" != \"\" ] && \necho \"ERROR: xen_version -> $(rpm -qa|egrep ^xen-[0-9]|sed 's/xen-//g') != $xen_version\" >> /tmp/CheckKey.log\n\n#Check Other\n\nif [ -s \"/tmp/CheckKey.log\" ];then\n cat /tmp/CheckKey.log\nelse\n echo \"No error!\"\nfi\n\n\n" }, { "alpha_fraction": 0.5343945622444153, "alphanum_fraction": 0.557244598865509, "avg_line_length": 19.981945037841797, "blob_id": "0398ce9ba43c98d83793695632a5f5d18431a0e0", "content_id": "e7b9000d2931c12f53c7ed5d43965ef1eb81b595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 20919, "license_type": "no_license", "max_line_length": 114, "num_lines": 997, "path": "/bin/check", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash - \n\n########################################################################\n# Copyright (c) Alex, 2013; All Rights Reserved #\n# #\n# LICENSE #\n# GNU GPL, see the license file for details #\n# # \n# CONTACT #\n# [email protected] #\n# #\n# For more information, please read README & man logview & logview -h #\n######################################################################## \n\n#set -o nounset # Treat unset variables as an error\n\n#future function\nswatch()\n{\n :\n}\n\n#trap signal 1 2 3 15\n#trap 'kill -9 $BG_PID;echo;exit 2' 1 2 3 15\nusage()\n{\n cat << EOF\nUsage $0 OPTION\nCheck system information\n\nOPTIONS:\n -h Print this help message\n system, Check information related to system\n xen, Check infomation related to xen\n libguestfs, Check information related to libguestfs\n log, Check system log file failed/error\n\n\nReprot bugs to <[email protected]>.\nEOF\n}\n\nfunction trap_exit\n{\n\ttput cnorm\n\tclear\n\tkill -9 $BG_PID\n}\ntrap 'trap_exit;exit 2' 1 2 3 15 #catch singal 1 2 3 15\n\n#define global variables\nglobal_variables()\n{\n DISK_SPEED_INIT=150\n TITLE=\"False\"\n\n #system date and time\n DATE=$(date +%F)\n TIME=$(date +%T)\n TIME_ZONE=$(date -R | awk '{print $NF}')\n\n [ -f \"$HOME/.testenv.ini\" ] && source $HOME/.testenv.ini\n\n tput init \t\t\t\t\t\t\t#open tput\n BOLD=`tput bold` \t\t\t\t\t#bold \n B_LINK=`tput blink` \t\t\t\t#blink\n REV=`tput rev` \t\t\t\t\t\n NORMAL=`tput sgr0` \t\t\t\t\t#normal cursor\n CURSOR_OFF=\"tput civis\" \t\t\t#hide cursor\n CURSOR_ON=\"tput cnorm\" \t\t\t#cursor visible\n TPUT_RED=`echo -e \"\\e[40;31m\"` \t\t#red\n P_COL=`tput cols` \t\t\t\t #get screen columns\n P_ROW=`tput lines` \t\t\t\t #get screen rows\n HOLD_C=`expr $P_COL - 1`\n HOLD_R=${P_ROW} \n STRLEN=$(echo \"$HOLD_C - 26\" | bc)\n\n #format the error string\n OUT_PUT_ERROR=\"${BOLD}${TPUT_RED}${B_LINK}* ${NORMAL}${BOLD}${TPUT_RED}\"\n\n #the following three variable is used to print the progress bar\n TPUTSTART=1\n TPUTTOTAL=1\n TPUTEVEL=1\n CLEAR=\"False\"\n}\n\n#define global variables\nglobal_variables\n\n#print the progress bar, future function\nbars ()\n{\n stty -echo >/dev/null 2>&1\n i=0\n b=''\n while true\n do\n ((i+=2))\n echo -ne \"\\e[${HOLD_R};30HChecking---\"\n #\tif [ $i -gt 100 ];then\n #\t\tcontinue\n #\telse\n printf \"Progress:[%-50s]%d%%\\r\" $b $i\n #fi\n sleep 1.1\n b=#$b\n done\n stty echo\n echo\n}\t# ---------- end of function BARS ----------\n\n#highlight function, future function\ncolour ()\n{\n #background_front\n case $1 in\n black_red)\n echo -e \"\\e[40;31m\"\n ;;\n\n black_green)\n echo -e \"\\e[40;32m\"\n ;;\n\n black_yellow)\n echo -e \"\\e[40;33m\"\n ;;\n\n esac # --- end of case ---\n\n}\t# ---------- end of function colour ----------\n\n\n#deal the error string\ndeal()\n{\n# [ $TPUTSTART -eq 1 ] && {\n# tput cup $((P_ROW - 3)) $TPUTSTART\n# printf \"Checking \"\n# TPUTSTART=$((TPUTSTART + 10))\n#}\n\n#tput cup $((P_ROW - 3)) $TPUTSTART\n#for ((i=1; i <= TPUTEVEL; i++))\n#do\n# printf \"#\"\n#done\n\n#TPUTSTART=$((TPUTSTART + TPUTEVEL))\n[ \"$2\" == \"False\" ] && {\nOUTPUT_NAME=\"$OUTPUT_NAME\n$1\"\nreturn 0\n}\n\n[ \"$2\" == \"\" ] && {\nOUTPUT_NAME=\"$OUTPUT_NAME\n$1\"\n\n#eval \"$3\"=\"${BOLD}${TPUT_RED}${B_LINK}1123132 ${NORMAL}${BOLD}${TPUT_RED}N/A\"\neval \"$3\"='\"\\033[1m\\033[31m\\033[5m* \\033[0m\\033[1m\\033[31mN/A\\033[0m\"'\nreturn 0\n}\n\n TMP_VAL=\"$2\"\n TMP_NAME=\"$1\"\n\n strlen=${#2}\n val_jug=$(echo \"$strlen / $STRLEN\" | bc)\n name_jug=$(($(echo \"$2\" | wc -l) + val_jug -1))\n\n [ $val_jug -gt 0 ] && {\n TMP_VAL=\"$(\n echo \"$TMP_VAL\"| awk -v val_jug=$val_jug -v len=$STRLEN '{\n start = 1;\n for (i=0; i<=val_jug; i++)\n { \n print substr($0, start, len);\n start = start + len\n } \n }')\"\n }\n\n [ $name_jug -gt 0 ] && TMP_NAME=\"$TMP_NAME$(\n echo | awk -v name_jug=$name_jug '{\n for (i=0; i<name_jug;i++)\n { \n print \"\\n \";\n } \n }' \n )\"\n [ \"${TMP_VAL}\" == \"\" ] && TMP_VAL=\" \"\n eval \"$3\"='$TMP_VAL'\n\n OUTPUT_NAME=\"$OUTPUT_NAME\n$TMP_NAME\"\n}\n\ncolorerror()\n{\n if [ \"$(echo \"$1\" | awk '$0~/N\\/A/{print 0}')\" == \"0\" ];then\n return\n fi\n eval \"$2\"='\"\\033[1m\\033[31m\\033[5m$1\\033[0m\"'\n}\n\n#--- print the menu ---\n#print strings at tput cup x y\nxy()\n{\n\t_R=$1 #row\n\t_C=$2 #column\n\t_TEXT=\"$3\"\n\ttput cup $_R $_C\n\techo -en \"$_TEXT\"\n} \n\ncenter() \n{ \n\t_STR=$2 #string\n\t_ROW=$3\n keylen=$4\n\tLEN=`echo $_STR | wc -c`\n\tCOLS=`tput cols` #column number\n\tHOLD_COL=`expr $COLS - $LEN`\n\tcase $1 in\n\t\tall)\n\t\t#NEW_COL=`expr $HOLD_COL / 2`\n NEW_COL=$HOLD_C\n\t\t;;\n\n\t\tsystem)\n NEW_COL=$(echo \"$P_COL / 2 - 11\" | bc)\n\t\t;;\n\n\t\tvalue)\n NEW_COL=$(echo \"($HOLD_C - $keylen - 4)/2 + $keylen + 4 - 2\" | bc)\n\t\t;;\n\n\t\tmiddle)\n NEW_COL=$((keylen + 3))\n\t\t;;\n\n\tesac \n\ttput cup $_ROW $NEW_COL\n\techo -n $_STR\n}\n\n#print \"<<system information>>\"\nprint_title()\n{\n colour black_green\n center \"system\" \"${BOLD}<<System Information>>${NORMAL}\" 0\n xy 1 1 \"Date:$DATE\"\n xy 1 20 \"Time:${TIME}\"\n\n}\n\ngrepstr()\n{\n grepstr=\"$(\n cat \"$1\" 2>/dev/null | awk '\n BEGIN {\n failed=0\n error=0\n warning=0\n }\n\n {\n lower=tolower($0);\n if (match(lower, /failed/))\n {\n failed += 1;\n }\n \n if (match(lower, /error/))\n {\n error += 1;\n }\n\n if (match(lower, /warning/))\n {\n warning += 1;\n }\n }\n\n END {\n print failed\":\"error\":\"warning;\n }\n '\n )\"\n\n failed=$(echo \"$grepstr\" | cut -d':' -f1)\n error=$(echo \"$grepstr\" | cut -d':' -f2)\n warning=$(echo \"$grepstr\" | cut -d':' -f3)\n \n [ ${failed} -ne 0 ] && failed=\"${OUT_PUT_ERROR}${failed} failed${NORMAL}\" ||\n failed=\"${failed} failed\"\n [ ${error} -ne 0 ] && error=\"${OUT_PUT_ERROR}${error} error${NORMAL}\" ||\n error=\"${error} error\"\n [ ${warning} -ne 0 ] && warning=\"${OUT_PUT_ERROR}${warning} warning${NORMAL}\" ||\n warning=\"${warning} warning\"\n\n echo \"$failed, $error, $warning\"\n}\n\n#check the log files\nlog()\n{\n#Program start\nOUTPUT_NAME=\"\"\nOUTPUT_VAL=\"\"\n[ \"$TITLE\" == \"True\" ] && start_line=0 || start_line=2\n\neval ${CURSOR_OFF}\n[ \"$CLEAR\" == \"False\" ] && {\n clear\n CLEAR=\"True\"\n}\n\n#start function bars\n#bars & \n#BG_PID=$!\nTPUTTOTAL=8\nTPUTEVEL=$(echo \"($P_COL - 20)/ $TPUTTOTAL\" | bc)\n\n#get system information, notice, only on parameter\ndmesg=$(grepstr /var/log/dmesg)\ndeal \"dmesg\" \"False\"\n\nmessages=$(grepstr /var/log/messages)\ndeal \"messages\" \"False\"\n\nanaconda=$(grepstr /var/log/anaconda.log)\ndeal \"anaconda\" \"False\"\n\nboot=$(grepstr /var/log/boot.log)\ndeal \"boot.log\" \"False\"\n\nxend_debug=\"$(grepstr /var/log/xen/xend-debug.log)\"\ndeal \"xen/xend-debug.log\" \"False\"\n\nxen_hotplug=$(grepstr /var/log/xen/xen-hotplug.log)\ndeal \"xen/xen-hotplug.log\" \"False\"\n\nxend=$(grepstr /var/log/xen/xend.log)\ndeal \"xen/xend.log\" \"False\"\n\nxm dmesg > /tmp/xm_dmesg.log\nxm_dmesg=$(grepstr /tmp/xm_dmesg.log)\ndeal \"xm dmesg\" \"False\"\n\nqemu_dm_log=\"$(ls -1t /var/log/xen/qemu-dm.*.log 2>/dev/null|head -1)\"\nqemu_dm_log=${qemu_dm_log:=\"qemu-dm.log\"}\nqemu_dm=$(grepstr \"$qemu_dm_log\")\ndeal \"xen/$(basename \"$qemu_dm_log\")\" \"False\"\n#echo -n \" [100%]\"\n\n#kill function bars\n#kill -9 $BG_PID >/dev/null\neval ${CURSOR_ON}\n\n#create table\ntput clear\n\n[ \"$TITLE\" == \"False\" ] && print_title && TITLE=\"True\"\n\nKEYLEN=$(\necho \"$OUTPUT_NAME\" | awk '\nBEGIN {\n maxlen=0;\n}\n\n{\n len=length($0);\n if (len >= maxlen)\n {\n maxlen = len;\n }\n}\n\nEND {\n print maxlen;\n}\n'\n)\n\n#print the first ---\nxy $start_line 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n#print the first |\nfor ((i=start_line+1;i<=${HOLD_R} - 2;i++));do\n\tprintf \"|\"\n\tprintf \"\\n\"\ndone\n\n#print the middle |\nfor ((i=start_line+1;i<${HOLD_R} - 2;i++));do\n\tcenter \"all\" \"|\" $i\n\tcenter \"middle\" \"|\" \"$i\" \"$KEYLEN\"\ndone\n\n#print Key and Value\ncolour black_yellow\nxy $((start_line+1)) $(echo \"($KEYLEN + 4) / 2 - 6\" | bc) \"${BOLD}Key(/var/log/)\"\ncenter \"value\" \"${BOLD}Value${NORMAL}\" $((start_line+1)) \"$KEYLEN\"\n\n#print the second ---\nxy $((start_line+2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n#Value\nOUTPUT_VAR=\"${dmesg}\n${messages}\n${anaconda}\n${boot}\n${xend_debug}\n${xen_hotplug}\n${xend}\n${xm_dmesg}\n${qemu_dm}\"\n\n#output the Key\n_x=$((start_line+3))\nOIFS=\"$IFS\"\nIFS=$'\\n'\nfor S_NAME in ${OUTPUT_NAME};do\n\t_y=2\n\txy $_x $_y \"${S_NAME}\"\n\t((_x++))\ndone\n\nOLD_IFS=\"$OIFS\"\nIFS=$'\\x0A'\n\n#output the Value\n_xx=$((start_line+3))\nfor S_VAR in ${OUTPUT_VAR};do\n if echo \"$S_VAR\" |egrep ^[0-9] &>/dev/null;then\n _yy=$((KEYLEN + 4 + 2))\n else\n\t _yy=$((KEYLEN + 4))\n fi\n\txy $_xx $_yy \"${S_VAR}\"\n\t((_xx++))\ndone\nIFS=$OLD_IFS\n\n#print the last ---\nxy $((HOLD_R-2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C} + 2;i++));do\n\tprintf \"%s\" -\ndone\nprintf \"\\n\"\n}\n\n\nsystem()\n{\n\n#Program start\nOUTPUT_NAME=\"\"\nOUTPUT_VAL=\"\"\n[ \"$TITLE\" == \"True\" ] && start_line=0 || start_line=2\n\neval ${CURSOR_OFF}\n[ \"$CLEAR\" == \"False\" ] && {\n clear\n CLEAR=\"True\"\n}\n#start function bars\n#bars & \n#BG_PID=$!\nTPUTTOTAL=11\nTPUTEVEL=$(echo \"($P_COL - 20)/ $TPUTTOTAL\" | bc)\n\n#get system information\nProduct_Name=\"$(dmidecode | grep \"Product Name\"|awk -F':' 'NR==1{gsub(/^[[:blank:]]*/, \"\", $NF);print $NF }')\"\ndeal \"Product Name\" \"$Product_Name\" \"Product_Name\"\n\nHost_Name=$(uname -n)\ndeal \"Host Name\" \"$Host_Name\" \"Host_Name\"\n\nHost_Release=$(cat /etc/issue|head -1)\ndeal \"Host Release\" \"$Host_Release\" \"Host_Release\"\necho \"$Host_Release\" | grep \"$host_release\" &>/dev/null || Host_Release=\"${OUT_PUT_ERROR}${Host_Release}${NORMAL}\"\n\nHost_Arch=$(uname -i)\ndeal \"Host_Arch\" \"$Host_Release\" \"Host_Release\"\nif [[ \"${Host_Arch}\" != \"${host_arch}\" ]];then\n Host_Arch=\"${OUT_PUT_ERROR}${Host_Arch}${NORMAL}\"\nfi\n\nHost_Kernel_Version=$(uname -r)\ndeal \"Host Kernel Version\" \"$Host_Kernel_Version\" \"Host_Kernel_Version\"\nif [[ \"${Host_Kernel_Version}\" != \"${host_kernel_version}\" ]];then\n Host_Kernel_Version=\"${OUT_PUT_ERROR}${Host_Kernel_Version}${NORMAL}\"\nfi\n\nCPU_Type=$(cat /proc/cpuinfo|grep \"name\" | uniq -c|cut -d':' -f2|awk '{print $1}' | column -t)\ndeal \"CPU Type\" \"$CPU_Type\" \"CPU_Type\"\necho \"$CPU_Type\"|grep -i \"$cpu_type\" &>/dev/null|| CPU_Type=\"${OUT_PUT_ERROR}${CPU_Type}${NORMAL}\"\n\nCPU_Core_Number=$(cat /proc/cpuinfo|grep \"processor\" | wc -l)\ndeal \"CPU Core Number\" \"$CPU_Core_Number\" \"CPU_Core_Number\"\n\nCPU_Physical_Number=$(cat /proc/cpuinfo|grep \"physical id\"|sort|uniq|wc -l)\ndeal \"CPU Physical Number\" \"$CPU_Physical_Number\" \"CPU_Physical_Number\"\n\nCPU_Clock_Speed=$(cat /proc/cpuinfo|grep \"MHz\"|uniq|awk -F: '{print $2\"MHZ\"}'|column -t)\ndeal \"CPU Clock Speed\" \"$CPU_Clock_Speed\" \"CPU_Clock_Speed\"\n\nMemory_Size=$(free -m|grep -i \"mem\"|awk '{print $2\"MB\"}')\ndeal \"Memory Size\" \"$Memory_Size\" \"Memory_Size\"\n\nDisk_Size=\"$(fdisk -l |grep :|awk '/\\/dev\\/sd[a-z]/{print $2,$3\"G\"}')\"\ndeal \"Disk Size\" \"$Disk_Size\" \"Disk_Size\"\n\nSwap_Size=$(free -m | grep -i \"swap\" | awk '{print $2\"MB\"}')\ndeal \"Swap Size\" \"$Swap_Size\" \"Swap_Size\"\n\nNIC_Number=$(ifconfig -a|egrep ^eth|wc -l)\ndeal \"NIC Number\" \"$NIC_Number\" \"NIC_Number\"\n\nIP_Address=\"\"\nping -c 1 8.8.8.8 &>/dev/null || IP_Address=\"${OUT_PUT_ERROR}Network is unavailable${NORMAL}\"\n\n[ \"$IP_Address\" == \"\" ] && {\n for eth in $(ifconfig -a|egrep ^eth|awk '{print $1}')\n do\n IP=\"$(ifconfig \"$eth\"|grep \"inet \" |awk -F'[ :]+' '{print $4}')\"\n [ \"$IP\" != \"\" ] && {\n IP_Address=\"$IP\"\n break\n }\n done\n}\ndeal \"IP Address\" \"$IP_Address\" \"IP_Address\"\n\nif (service iptables status |grep INPUT 2>&1 1>/dev/null) ; then\n Firewall_Status=\"iptables on\"\nelse\n Firewall_Status=\"iptables off\"\nfi\ndeal \"Firewall Status\" \"$Firewall_Status\" \"Firewall_Status\"\n\nSelinux_Status=$(cat /etc/selinux/config | grep \"SELINUX=\" | grep -v \"^#\" | cut -d= -f2)\ndeal \"Selinux Status\" \"$Selinux_Status\" \"Selinux_Status\"\n\n#echo -n \" [100%]\"\n\n#kill function bars\n#kill -9 $BG_PID >/dev/null\neval ${CURSOR_ON}\n\n#create table\ntput clear\n\n[ \"$TITLE\" == \"False\" ] && print_title && TITLE=\"True\"\n\nKEYLEN=$(\necho \"$OUTPUT_NAME\" | awk '\nBEGIN {\n maxlen=0;\n}\n\n{\n len=length($0);\n if (len >= maxlen)\n {\n maxlen = len;\n }\n}\n\nEND {\n print maxlen;\n}\n'\n)\n\n#print the first ---\nxy $start_line 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n#print the first |\nfor ((i=start_line+1;i<=${HOLD_R} - 2;i++));do\n\tprintf \"|\"\n\tprintf \"\\n\"\ndone\n\n#print the middle |\nfor ((i=start_line+1;i<${HOLD_R} - 2;i++));do\n\tcenter \"all\" \"|\" $i\n\tcenter \"middle\" \"|\" \"$i\" \"$KEYLEN\"\ndone\n\n#print Key and Value\ncolour black_yellow\nxy $((start_line+1)) $(echo \"($KEYLEN + 4) / 2 - 1\" | bc) \"${BOLD}Key\"\ncenter \"value\" \"${BOLD}Value${NORMAL}\" $((start_line+1)) \"$KEYLEN\"\n\n#print the second ---\nxy $((start_line+2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n#Value\nOUTPUT_VAR=\"${Product_Name}\n${Host_Name}\n${Host_Release}\n${Host_Kernel_Version}\n${Host_Arch}\n${CPU_Type}\n${CPU_Physical_Number}\n${CPU_Core_Number}\n${CPU_Clock_Speed}\n${Memory_Size}\n${Disk_Size}\n${Swap_Size}\n${NIC_Number}\n${IP_Address}\n${Firewall_Status}\n${Selinux_Status}\"\n\n#${Disk_Status}\n\n#output the Key\n_x=$((start_line+3))\nOIFS=\"$IFS\"\nIFS=$'\\n'\nfor S_NAME in ${OUTPUT_NAME};do\n\t_y=2\n\txy $_x $_y \"${S_NAME}\"\n\t((_x++))\ndone\n\nOLD_IFS=\"$OIFS\"\nIFS=$'\\x0A'\n\n#output the Value\n_xx=$((start_line+3))\nfor S_VAR in ${OUTPUT_VAR};do\n echo \"$S_VAR\" >> bcde\n if echo \"$S_VAR\" |grep \"* \" &>/dev/null;then\n _yy=$((KEYLEN + 4))\n else\n\t _yy=$((KEYLEN + 4 + 2))\n fi\n\txy $_xx $_yy \"${S_VAR}\"\n\t((_xx++))\ndone\nIFS=$OLD_IFS\n\n#print the last ---\nxy $((HOLD_R-2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C} + 2;i++));do\n\tprintf \"%s\" -\ndone\nprintf \"\\n\"\n}\n\nxen()\n{\n#Program start\nOUTPUT_NAME=\"\"\nOUTPUT_VAL=\"\"\n[ \"$TITLE\" == \"True\" ] && start_line=0 || start_line=2\n\neval ${CURSOR_OFF}\n[ \"$CLEAR\" == \"False\" ] && {\n clear\n CLEAR=\"True\"\n}\n#start function bars\n#bars & \n#BG_PID=$!\nTPUTTOTAL=1\nTPUTEVEL=$(echo \"($P_COL - 20)/ $TPUTTOTAL\" | bc)\n\n#get system information\nXen_Version=$(rpm -qa|grep ^xen-[0-9])\ndeal \"Xen Version\" \"$Xen_Version\" \"Xen_Version\"\nif [[ \"${Xen_Version}\" != \"${Xen_version}\" ]];then\n colorerror \"$Xen_Version\" \"Xen_Version\"\nfi\n\nXend_Status=\"$(service xend status 2>&1)\"\ndeal \"Xend Status\" \"$Xend_Status\" \"Xend_Status\"\n[ \"$Xend_Status\" != \"xend is running\" ] && \ncolorerror \"$Xend_Status\" \"Xend_Status\"\n\nif cat /proc/cpuinfo|egrep 'vmx|svm' &>/dev/null;then\n Full_Virtualization=\"Support\"\n deal \"Full Virtualization\" \"$Full_Virtualization\" \"Full_Virtualization\"\nelse\n Full_Virtualization=\"Not support\"\n deal \"Full Virtualization\" \"$Full_Virtualization\" \"Full_Virtualization\"\n colorerror \"$Full_Virtualization\" \"Full_Virtualization\"\nfi\n\n#Check If mount xenimage and iso.\nif mount |grep \"10.66.90.121:/vol/s1xenimage\" &>/dev/null;then\n S1_Xen_Image=\"10.66.90.121:/vol/s1xenimage has mounted\"\n deal \"S1 Xen Image\" \"$S1_Xen_Image\" \"S1_Xen_Image\"\nelse\n S1_Xen_Image=\"10.66.90.121:/vol/s1xenimage not mounted\"\n deal \"S1 Xen Image\" \"$S1_Xen_Image\" \"S1_Xen_Image\"\n colorerror \"$S1_Xen_Image\" \"S1_Xen_Image\"\nfi\n\nif mount | grep \"10.66.90.128:/vol/S1/iso\" &>/dev/null;then\n S1_Iso=\"10.66.90.128:/vol/S1/iso/ has mounted\"\n deal \"S1 Iso\" \"$S1_Iso\" \"S1_Iso\"\nelse\n S1_Iso=\"10.66.90.128:/vol/S1/iso not mounted\"\n deal \"S1 Iso\" \"$S1_Iso\" \"S1_Iso\"\n colorerror \"$S1_Iso\" \"S1_Iso\"\nfi\n\n\n#echo -n \" [100%]\"\n\n#kill function bars\n#kill -9 $BG_PID >/dev/null\neval ${CURSOR_ON}\n\n#Value\nOUTPUT_VAR=\"${Xen_Version}\n${Xend_Status}\n${Full_Virtualization}\n${S1_Xen_Image}\n${S1_Iso}\"\n\n#create table\ntput clear\n\n[ \"$TITLE\" == \"False\" ] && print_title && TITLE=\"True\"\n\nKEYLEN=$(\necho \"$OUTPUT_NAME\" | awk '\nBEGIN {\n maxlen=0;\n}\n\n{\n len=length($0);\n if (len >= maxlen)\n {\n maxlen = len;\n }\n}\n\nEND {\n print maxlen;\n}\n'\n)\n\n#print the first ---\nxy $start_line 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n#print the first |\nfor ((i=start_line+1;i<=${HOLD_R} - 2;i++));do\n\tprintf \"|\"\n\tprintf \"\\n\"\ndone\n\n#print the middle |\nfor ((i=start_line+1;i<${HOLD_R} - 2;i++));do\n\tcenter \"all\" \"|\" $i\n\tcenter \"middle\" \"|\" \"$i\" \"$KEYLEN\"\ndone\n\n#print Key and Value\ncolour black_yellow\nxy $((start_line+1)) $(echo \"($KEYLEN + 4) / 2 - 1\" | bc) \"${BOLD}Key\"\ncenter \"value\" \"${BOLD}Value${NORMAL}\" $((start_line+1)) \"$KEYLEN\"\n\n#print the second ---\nxy $((start_line+2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n\n#output the Key\n_x=$((start_line+3))\nOIFS=\"$IFS\"\nIFS=$'\\n'\nfor S_NAME in ${OUTPUT_NAME};do\n\t_y=2\n\txy $_x $_y \"${S_NAME}\"\n\t((_x++))\ndone\n\nOLD_IFS=\"$OIFS\"\nIFS=$'\\x0A'\n\n#output the Value\n_xx=$((start_line+3))\nfor S_VAR in ${OUTPUT_VAR};do\n if echo \"$S_VAR\" |grep \"* \" &>/dev/null;then\n _yy=$((KEYLEN + 4))\n else\n\t _yy=$((KEYLEN + 4 + 2))\n fi\n\txy $_xx $_yy \"${S_VAR}\"\n\t((_xx++))\ndone\nIFS=$OLD_IFS\n\n#print the last ---\nxy $((HOLD_R-2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C} + 2;i++));do\n\tprintf \"%s\" -\ndone\nprintf \"\\n\"\n}\n\n\nguestfs()\n{\n#Program start\nOUTPUT_NAME=\"\"\nOUTPUT_VAL=\"\"\n[ \"$TITLE\" == \"True\" ] && start_line=0 || start_line=2\n\neval ${CURSOR_OFF}\nclear\n#start function bars\n#bars & \n#BG_PID=$!\nTPUTTOTAL=1\nTPUTEVEL=$(echo \"($P_COL - 20)/ $TPUTTOTAL\" | bc)\n\n#get system information\nLibguestfs_Version=$(rpm -qa|grep libguestfs-[0-9])\ndeal \"Libguestfs Version\" \"$Libguestfs_Version\" \"Libguestfs_Version\"\n\nLibguestfs_Tools_Version=$(rpm -qa|grep libguestfs-tools-[0-9])\ndeal \"Libguestfs Tools Version\" \"$Libguestfs_Tools_Version\" \"Libguestfs_Tools_Version\"\n\nLibguestfs_Tools_C_Version=$(rpm -qa|grep libguestfs-tools-c-[0-9])\ndeal \"Libguestfs Tools C Version\" \"$Libguestfs_Tools_C_Version\" \"Libguestfs_Tools_C_Version\"\n\nPerl_Sys_Guestfs_Version=$(rpm -qa|grep perl-Sys-Guestfs-[0-9])\ndeal \"Perl Sys Guestfs Version\" \"$Perl_Sys_Guestfs_Version\" \"Perl_Sys_Guestfs_Version\"\n\nSupermin_Version=$(rpm -qa|grep supermin-[0-9])\ndeal \"Supermin Version\" \"$Supermin_Version\" \"Supermin_Version\"\n\nAugeas_Version=$(rpm -qa|grep augeas-[0-9])\ndeal \"Augeas Version\" \"$Augeas_Version\" \"Augeas_Version\"\n\nHivex_Version=$(rpm -qa|grep ^hivex-[0-9])\ndeal \"Hivex Version\" \"$Hivex_Version\" \"Hivex_Version\"\n\nLibguestfs_Winsupport_Version=$(rpm -qa|grep libguestfs-winsupport-[0-9])\ndeal \"Libguestfs Winsupport Version\" \"$Libguestfs_Winsupport_Version\" \"Libguestfs_Winsupport_Version\"\n\nqemu_kvm_Version=$(rpm -qa|grep qemu-kvm-[0-9])\ndeal \"qemu-kvm Version\" \"$qemu_kvm_Version\" \"qemu_kvm_Version\"\n\nlibvirt_Version=$(rpm -qa|grep libvirt-[0-9])\ndeal \"libvirt Version\" \"$libvirt_Version\" \"libvirt_Version\"\n\n#echo -n \" [100%]\"\n\n#kill function bars\n#kill -9 $BG_PID >/dev/null\neval ${CURSOR_ON}\n\n#Value\nOUTPUT_VAR=\"${Libguestfs_Version}\n${Libguestfs_Tools_Version}\n${Libguestfs_Tools_C_Version}\n${Perl_Sys_Guestfs_Version}\n${Supermin_Version}\n${Augeas_Version}\n${Hivex_Version}\n${Libguestfs_Winsupport_Version}\n${qemu_kvm_Version}\n${libvirt_Version}\"\n\n#create table\ntput clear\n\n[ \"$TITLE\" == \"False\" ] && print_title && TITLE=\"True\"\n\nKEYLEN=$(\necho \"$OUTPUT_NAME\" | awk '\nBEGIN {\n maxlen=0;\n}\n\n{\n len=length($0);\n if (len >= maxlen)\n {\n maxlen = len;\n }\n}\n\nEND {\n print maxlen;\n}\n'\n)\n\n#print the first ---\nxy $start_line 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n#print the first |\nfor ((i=start_line+1;i<=${HOLD_R} - 2;i++));do\n\tprintf \"|\"\n\tprintf \"\\n\"\ndone\n\n#print the middle |\nfor ((i=start_line+1;i<${HOLD_R} - 2;i++));do\n\tcenter \"all\" \"|\" $i\n\tcenter \"middle\" \"|\" \"$i\" \"$KEYLEN\"\ndone\n\n#print Key and Value\ncolour black_yellow\nxy $((start_line+1)) $(echo \"($KEYLEN + 4) / 2 - 1\" | bc) \"${BOLD}Key\"\ncenter \"value\" \"${BOLD}Value${NORMAL}\" $((start_line+1)) \"$KEYLEN\"\n\n#print the second ---\nxy $((start_line+2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C}+2;i++));do\n\tprintf \"%s\" -\ndone\n\n\n#output the Key\n_x=$((start_line+3))\nOIFS=\"$IFS\"\nIFS=$'\\n'\nfor S_NAME in ${OUTPUT_NAME};do\n\t_y=2\n\txy $_x $_y \"${S_NAME}\"\n\t((_x++))\ndone\n\nOLD_IFS=\"$OIFS\"\nIFS=$'\\x0A'\n\n#output the Value\n_xx=$((start_line+3))\nfor S_VAR in ${OUTPUT_VAR};do\n if echo \"$S_VAR\" |grep \"* \" &>/dev/null;then\n _yy=$((KEYLEN + 4))\n else\n\t _yy=$((KEYLEN + 4 + 2))\n fi\n\txy $_xx $_yy \"${S_VAR}\"\n\t((_xx++))\ndone\nIFS=$OLD_IFS\n\n#print the last ---\nxy $((HOLD_R-2)) 0 \"-\"\nfor ((i=2;i<${HOLD_C} + 2;i++));do\n\tprintf \"%s\" -\ndone\nprintf \"\\n\"\n}\n\n\n#script start\n[ \"$1\" == \"-h\" ] && {\nusage\nexit 0\n}\n\ncom=(\"$*\")\nfor command in ${com[*]}\ndo\n eval \"$command\"\ndone\n" }, { "alpha_fraction": 0.5379008650779724, "alphanum_fraction": 0.6209912300109863, "avg_line_length": 31.66666603088379, "blob_id": "ca9ad7f5ccafb736bf3cdef125a65ef8032597ea", "content_id": "6e90c6f57dcfd6eae27e0f7ebfc80cce64f69e1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 686, "license_type": "no_license", "max_line_length": 66, "num_lines": 21, "path": "/bin/mt", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n[ ! -d /vol/s1xenimage ] && mkdir -p /vol/s1xenimage\nmount |grep \"/vol/s1xenimage\" &>/dev/null;\nif [ \"$?\" -ne 0 ];then\n mount 10.66.90.121:/vol/s1xenimage /vol/s1xenimage &>/dev/null\n [ \"$?\" -eq 0 ] && echo mount /vol/s1xenimage successfully || \n echo mount /vol/s1xenimage failed\nelse\n echo 10.66.90.121:/vol/s1xenimage already mounted\nfi\n\n[ ! -d /vol/S1/iso ] && mkdir -p /vol/S1/iso\nmount |grep \"/vol/S1/iso\" &>/dev/null;\nif [ \"$?\" -ne 0 ];then\n mount 10.66.90.128:/vol/S1/iso /vol/S1/iso\n [ \"$?\" -eq 0 ] && echo mount /vol/S1/iso successfully || \n echo mount /vol/S1/iso failed\nelse\n echo 10.66.90.128:/vol/S1/iso /vol/S1/iso already mounted\nfi\n" }, { "alpha_fraction": 0.5125858187675476, "alphanum_fraction": 0.5377574563026428, "avg_line_length": 26.3125, "blob_id": "578aecb45876ef65da1c7b663f469a1c54a2c415", "content_id": "36f11c9cbed667141a2c0437dec31beb8292a0d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 437, "license_type": "no_license", "max_line_length": 122, "num_lines": 16, "path": "/bin/getcfg", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#This script is used to get the xm's configure file from autotest DEBUG log.\ncut -d'|' -f3 \"$1\"|awk '{gsub(/^[[:blank:]]*/,\"\",$0);print $0}' |\nawk '{gsub(/[[:blank:]]*/,\"\",$0);print $0}' |\nawk -F'=' -v OFS=\"=\" '{\nif (match($1, /kernel|vnclisten|serial|uuid|on_reboot|on_poweroff|on_crash|soundhw|name|device_model|builder|bootloader/))\n {\n $2=\"\\\"\"$2\"\\\"\"\n print $0\n }\nelse\n {\n print $0\n }\n}\n'\n" }, { "alpha_fraction": 0.519911527633667, "alphanum_fraction": 0.5663716793060303, "avg_line_length": 25.52941131591797, "blob_id": "87d1ac0843ca0e0ed7843132859dedee9c6c9082", "content_id": "e7d8ba682d8b97ed75c178fa33d37ed47174be59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 452, "license_type": "no_license", "max_line_length": 68, "num_lines": 17, "path": "/bin/cl", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nyellow='\\033[33m';normal='\\033[0m';cyan='\\033[36m';red='\\033[31m'\n\nif [ \"$1\" == \"error\" ];then\n echo -e ${red}\"ERROR/FAILED messages in /var/log/xen/*\"${normal}\n\n if [ \"$(ls /var/log/xen/* 2>/dev/null)\" != \"\" ];then\n cat /var/log/xen/* |egrep -i 'error|failed'\n fi\n\n echo -e ${red}\"ERROR/FAILED messages from 'xm dmesg'\"${normal}\n xm dmesg | egrep -i 'error|failed'\nelse\n rm -rf /var/log/xen/*\n xm dmesg -c\nfi\n\n" }, { "alpha_fraction": 0.6581196784973145, "alphanum_fraction": 0.6581196784973145, "avg_line_length": 22.399999618530273, "blob_id": "3c2083c0a5b2b3c967b9d00ee1ffb398d064eb57", "content_id": "08e48f6e1c5d61f62ac6bcb6f0e895620e2dd7bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 117, "license_type": "no_license", "max_line_length": 76, "num_lines": 5, "path": "/README.md", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "deploy\n\nWHAT IS DEPLOY?\n===============\ndeploy is a small tool used to deploy own linux environment. Developing ....\n" }, { "alpha_fraction": 0.5054066777229309, "alphanum_fraction": 0.5265632271766663, "avg_line_length": 32.234375, "blob_id": "021a016dd2c8cc431299667a098d82037938bede", "content_id": "3a86ae7806d54ef52e28a9d292bb594bf0cfca20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2127, "license_type": "no_license", "max_line_length": 166, "num_lines": 64, "path": "/bin/cg", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#chrhel 6.5\nBASE_DIR=$(basename $(ls -1d /var/lib/deploy*))\nDATA_DIR=\"/var/lib/$BASE_DIR\"/$TESTER\nyellow='\\033[33m';normal='\\033[0m';cyan='\\033[36m';red='\\033[31m'\n\nfunction Usage()\n{\n cat << EOF\n cg [chad] branch\n -h: print this help message\n branch: which branch you want to swith\n -c: show current branch\n -a: show all the branch\n -d: delete a branch\nEOF\n}\ncurrent_branch=$(ls -1ld $HOME/$TESTER |awk -F'/' '{print $NF}')\nif [ \"$#\" -eq 0 ];then\n Usage\nelif [ \"$1\" == \"-h\" ];then\n Usage\nelif [ \"$1\" == \"-d\" -a \"$2\" != \"\" ];then\n until [ -z \"$2\" ]\n do\n [ \"$current_branch\" == \"$2\" ] && {\n echo -e \"${red}Can not delete branch ${normal}${cyan}${current_branch}${normal}${red}, you are now on branch${normal} ${cyan}${current_branch}${normal}\" |\n cowsay -n -d\n shift\n continue\n }\n rm -rf \"$DATA_DIR/$2\"\n shift\n done\nelif [ \"$1\" == \"-d\" -a \"$2\" == \"\" ];then\n Usage\nelif [ \"$1\" == \"-c\" ];then\n echo $current_branch #| cowsay -n -e \"..\"\nelif [ \"$1\" == \"-a\" ];then\n ls -1 $DATA_DIR |egrep -v 'bin|testenv.ini' #| cowsay -n -e \"..\"\nelse\n branch=\"$1\"\n [ ! -d $DATA_DIR/$branch ] && {\n mkdir -p $DATA_DIR/$branch\n\n for var in $(ls -1 $DATA_DIR/$current_branch/{pv,hvm}-$current_branch-*-1.cfg)\n do\n BASE_NAME=$(basename $(echo \"$var\"))\n\n TYPE=$(echo $BASE_NAME|cut -d'-' -f1)\n ARCH=$(echo $BASE_NAME|cut -d'-' -f3)\n cp -f $var $DATA_DIR/$branch/$TYPE-$branch-$ARCH-1.cfg\n eval sed -i 's/$TYPE-$current_branch-$ARCH-1/$TYPE-$branch-$ARCH-1/g' $DATA_DIR/$branch/$TYPE-$branch-$ARCH-1.cfg\n\n #Change mac address\n eval sed -i '/vif/s/mac=..:..:..:..:..:../mac=$(macgen)/' $DATA_DIR/$branch/$TYPE-$branch-$ARCH-1.cfg\n done\n cp $DATA_DIR/$current_branch/virsh-kvm.xml $DATA_DIR/$branch/\n }\n unlink $HOME/$TESTER &>/dev/null\n ln -s $DATA_DIR/$branch $HOME/$TESTER\n cd $HOME/$TESTER\n echo -e ${yellow}\"You are now on branch${normal} ${cyan}$branch\"${normal} | cowsay -n -e \"..\"\nfi\n" }, { "alpha_fraction": 0.5217266082763672, "alphanum_fraction": 0.535251796245575, "avg_line_length": 30.0961971282959, "blob_id": "a334552a2b465ea7e875fd665abaaadec7dccd04", "content_id": "cc91acb7d16ddf7db726c13cc700cd31fe164991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 13900, "license_type": "no_license", "max_line_length": 553, "num_lines": 447, "path": "/deploy", "repo_name": "alex8866/deploy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n: << EOF\nDescription: This shell script is used to deploy your linux work environment.\nAuthor: Alex\nEmail: [email protected]\nDate: 2013-08-21\n\nHow to use this script:\n1. [remote machine]#ssh-keygen\n2. [local machine]#ssh-keygen do this step if you have not generate a .ssh directory in you home dir.\n3. [local machine]#ssh-copy-id -id ~/.ssh/id_rsa.pub username@ipaddress\n4. cd ConfigOwnEnv\n5. ./ConfigOwnEnv.sh username@ipaddress\n6. Warning: You can only deploy for root user on target host currently\nEOF\n\n###You only need to configure DeDir & HostCom ###\n#Define the root directory of the De tool.\nINSTALL_DIR=\"/usr/local/src\"\nBIN_DIR=\"$HOME/bin\"\n[ ! -d $BIN_DIR ] && mkdir -p $BIN_DIR\n\nBASE_NAME=$(basename $(ls -1d $INSTALL_DIR/deploy*))\n\nDeDir=\"$INSTALL_DIR/$BASE_NAME\"\nexport COWPATH=$DeDir/bin\nHostCom=$BIN_DIR/tsh\n#define colors\ncyan='\\033[36m';normal='\\033[0m';red='\\033[31m';magenta='\\033[35m';yellow='\\033[33m';white='\\033[37m';green='\\033[32m'\n##################################################\necho \"$*\" | grep -w \"\\-v\" &>/dev/null && DEBUG=\"yes\" || DEBUG=\"\"\n\nEDITOR=${EDITOR:=\"vim\"}\nJUG=0\n\n#Get variables from testenv.ini\n[ ! -f $HOME/.testenv.ini ] && cp $DeDir/testenv.ini $HOME/.testenv.ini\nsource $HOME/.testenv.ini\nDATA_DIR=\"/var/lib/$BASE_NAME/$tester\"\n\n#define test case\ntest_case=\"$guest_type\"-\"$guest_version\"-\"$guest_bit\"-1\nhvm_cfg=hvm-\"$guest_version\"-\"$guest_bit\"-1\npv_cfg=pv-\"$guest_version\"-\"$guest_bit\"-1\n\nHostIP1=\"$1\"\nHostIP2=\"$1\"\n\nopts=\"h\"\ndeclare dict=(a b c d e f g h i g)\ndeclare dict2=(1 2 3 4 5 6 7 8 9)\n\nfunction Usage()\n{\n cat << EOF\n deploy [-edcouh]\n -e(edit): edit testenv.ini, testenv.ini is a configure file which used to configure your environment.\n -d(deploy): depoly your env on the host, hosts are specified in testenv.ini.\n -c(check): check key-value, key-value are specified in testenv.ini\n -u(undeploy): undeploy your env on the host, this will restore the env on the host.(Use -U for no static ip)\n -U: Only undeploy the specified host\n -h(help): print this help message\n -o \"host ip\": only deploy the host you specify, others will not change.(no static ip)\n -O temp configure a machine\n -v Enter verbose model\n -s(set): yes/no, enable/disalbe cowsay,need root permission, system config, just for fun\n install: install deploy tool\n\nEOF\n}\n\n\n#Check for target host\nfunction CheckSSH()\n{\n echo -ne ${yellow}\"Checking ssh key for host: $1\" ......${normal}\n UI=root@\"$1\"\n /usr/bin/expect << EOF > /dev/null\n set timeout 30\n spawn ssh $UI\n\n expect {\n \"*yes*\"\n {\n exit 1\n }\n\n \"*assword*\"\n {\n exit 1\n }\n\n \"*#*\" {exit 0}\n eof {exit 2}\n}\nEOF\n JUG=\"$?\"\n [ $JUG -eq 2 ] && eval sed -i '/$1/d' $HOME/.ssh/known_hosts &> /dev/null\n echo -e ${cyan} done${normal}\n return \"$JUG\"\n}\n\n#make sure you have configured ssh key and generated login command before you deploy your env.\nfunction CheckBefore()\n{\n [ ! -f \"$2\" ] && echo -e \"${red}Pls edit testenv.ini and deploy your env first(login command not found)$normal\" && return 1\n\n CheckSSH $1\n if [ \"$?\" -ne 0 ];then\n echo -e \"${red}Pls configure your ssh key for $1 first$normal\"\n return 1\n else\n return 0\n fi\n}\n\nfunction configssh()\n{\n UI=root@\"$1\"\n\n CheckSSH $1\n [ \"$JUG\" != \"0\" ] && {\n eval sed -i '/$1/d' $HOME/.ssh/known_hosts &> /dev/null\n echo -ne ${white}\"Generate ssh key for host: $1\" ......${normal}\n /usr/bin/expect << EOF > /dev/null\n# cat << EOF > /tmp/aa.sh\nset timeout 10\nset password $2\n\nspawn ssh-copy-id $UI\nexpect {\n \"*yes*\"\n {\n send \"yes\\r\"\n expect eof {exit}\n expect \"*assword*\"\n send \"\\$password\\r\"\n exp_continue\n }\n\n \"*assword*\"\n {\n send \"\\$password\\r\"\n exp_continue\n }\n}\n\nEOF\n echo -e ${cyan} done${normal}\n}\n}\n\n#deploy the environment per host\nfunction Nodedeploy()\n{\n #printf \"%s %s\\n\" $$ \"deploy Node $1 ...... \"\n #username@ipaddress\n UI=\"root@$1\"\n Host=$1\n\n #If the target machine already have deployed, then, return;\n [ \"$(ssh $UI \"[ -f ~/.deploy.lock.$tester ] && echo 1\")\" == \"1\" ] && {\n echo -e $red\"$UI have already deployed, ignore.\"$normal\n\n #But still chang PS1 for that machine\n ssh $UI \"sed -i \"/PS1/s/-[0-9a-z]/$2/\" ~/.bashrc\"\n ssh $UI \"sed -i \"/PS1/s/-[0-9a-z]/$2/\" ~/.profile\"\n return 0\n }\n echo -e ${cyan}Begin to deploy host: $Host ......${normal}\n\n #Prepare environment\n [ \"$DEBUG\" ] && printf \"Prepare environment on host: %s ......\" $Host\n ssh \"$UI\" \"[ ! -d $DATA_DIR/bin ] && mkdir -p $DATA_DIR/bin;[ ! -d $DATA_DIR/$guest_version ] && mkdir -p $DATA_DIR/$guest_version;ln -s $DATA_DIR/$guest_version ~/$tester;[ ! -d /vol/s1xenimage ] && mkdir -p /vol/s1xenimage;mount 10.66.90.121:/vol/s1xenimage /vol/s1xenimage/ &>/dev/null;[ ! -d /vol/S1/iso ] && mkdir -p /vol/S1/iso;mount 10.66.90.128:/vol/S1/iso /vol/S1/iso/; touch ~/.deploy.lock.$tester\"\n [ \"$DEBUG\" ] && printf \" done\\n\"\n\n #upload customized file\n #rsync --suffix=.$tester -qbae ssh $DeDir/bashrc \"$UI\":~/.bashrc\n #rsync --suffix=.$tester -qbae ssh $DeDir/emacs \"$UI\":~/.emacs\n #rsync --suffix=.$tester -qbae ssh $DeDir/vimrc \"$UI\":~/.vimrc\n #cp -f $DeDir/conf/.bashrc $DeDir/conf/.profile\n for F in $(ls -A $DeDir/conf/)\n do\n [ \"$DEBUG\" ] && printf \"Upload conf/%s to %s:~/......\" $F $Host\n rsync --suffix=.$tester -qLbae ssh $DeDir/conf/$F \"$UI\":~/ > /dev/null\n [ \"$DEBUG\" ] && printf \" done\\n\"\n done\n\n #Concel this feature: deploy your environment for guest\n #printf \"Upload deploy tool to %s:~/$tester/ ......\" $Host\n #rsync -qae ssh $DeDir \"$UI\":~/$tester/\n #printf \" done\\n\"\n\n #printf \"Upload testenv.ini to %s:%s/testenv.ini ......\" $Host $DATA_DIR\n #rsync -qae ssh $DeDir/testenv.ini \"$UI\":$DATA_DIR/testenv.ini\n #printf \" done\\n\"\n\n\n [ \"$DEBUG\" ] && printf \"Upload bin/* to %s:%s/bin/ ......\" $Host $DATA_DIR\n rsync -qae ssh $DeDir/bin/* \"$UI\":$DATA_DIR/bin/\n [ \"$DEBUG\" ] && printf \" done\\n\"\n\n [ \"$DEBUG\" ] && printf \"Upload hvm-example.cfg to %s:%s/%s.cfg ......\" $Host $DATA_DIR/$guest_version ${hvm_cfg}\n rsync -qae ssh $DeDir/testfile/hvm-example.cfg \"$UI\":$DATA_DIR/$guest_version/${hvm_cfg}.cfg\n [ \"$DEBUG\" ] && printf \" done\\n\"\n\n [ \"$DEBUG\" ] && printf \"Upload pv-example.cfg to %s:%s/%s.cfg ......\" $Host $DATA_DIR/$guest_version ${pv_cfg}\n rsync -qae ssh $DeDir/testfile/pv-example.cfg \"$UI\":$DATA_DIR/$guest_version/${pv_cfg}.cfg\n [ \"$DEBUG\" ] && printf \" done\\n\"\n\n [ \"$DEBUG\" ] && printf \"Upload virsh-kvm.xml to %s:%s/ ......\" $Host $DATA_DIR/$guest_version\n rsync -qae ssh $DeDir/testfile/virsh-kvm.xml \"$UI\":$DATA_DIR/$guest_version/\n [ \"$DEBUG\" ] && printf \" done\\n\"\n\n [ \"$cowsay\" == \"no\" ] && ssh -n \"$UI\" \"mv -f $DATA_DIR/bin/cowsay2 $DATA_DIR/bin/cowsay\"\n\n #deal the file\n #sed -i 's/INSTEAD/$BASE_NAME\\/$tester/g' ~/.profile;sed -i 's/INSTEAD/$BASE_NAME\\/$tester/g' ~/.bashrc\n [ \"$DEBUG\" ] && printf \"Deal with file %s & %s & ~/.bashrc & ~/.profile ......\" $hvm_cfg.cfg $pv_cfg.cfg\n ssh -n \"$UI\" \"sed -i 's/INSTESTER/$tester/g' ~/.profile;sed -i 's/INSTESTER/$tester/g' ~/.bashrc;sed -i 's/hvm-example/$hvm_cfg/g' $DATA_DIR/$guest_version/${hvm_cfg}.cfg; sed -i 's/pv-example/$pv_cfg/g' $DATA_DIR/$guest_version/${pv_cfg}.cfg;sed -i '/PS1/s/-[0-9a-z]/$2/' ~/.bashrc;sed -i '/PS1/s/-[0-9a-z]/$2/' ~/.profile;cd $DATA_DIR/bin/;$DATA_DIR/bin/logview -a;$DATA_DIR/bin/logview $DATA_DIR/bin/awk.example -f /var/log/xen/xend-debug.log -f /var/log/xen/xen-hotplug.log -f /var/log/xen/xend.log -s 4s errorlog.log --notice=all &>/dev/null &\"\n [ \"$DEBUG\" ] && printf \" done\\n\"\n\n echo -e ${cyan}End to deploy host $Host${normal}\n}\n\nfunction NodeUndeploy()\n{\n #username@ipaddress\n UI=\"root@$1\"\n\n echo -e ${cyan}Begin to undeploy host $1 ......${normal}\n\n\n rmstr=\"\"\n for F in $(ls -A $DeDir/conf/)\n do\n str=\"${str}\"\"rm -f ~/$F;\"\n done\n\n #printf \"rm -rf ~/$tester & ${str} & umount /vol/s1xenimage/ & umount /vol/S1/iso/ & revert %s:~/*.$tester ......\\n\" $1\n [ \"$DEBUG\" ] && echo -n \"unlink ~/$tester & umount /vol/s1xenimage/ & umount /vol/S1/iso/ & revert $1:~/*.$tester & rm -rf $DATA_DIR ......\"\n #ssh \"$UI\" \"${str}rename .$tester '' .*.$tester &>/dev/null; rename .$tester '' ~/.ssh/*.$tester; umount /vol/s1xenimage/ &>/dev/null; umount /vol/S1/iso/ &>/dev/null;rm -rf ~/.deploy.lock;killall logview &>/dev/null;rm -rf $DATA_DIR;unlink ~/$tester\"\n ssh \"$UI\" \"rename .$tester '' .*.$tester &>/dev/null;umount /vol/s1xenimage/ &>/dev/null; umount /vol/S1/iso/ &>/dev/null;rm -rf ~/.deploy.lock;killall logview &>/dev/null;rm -rf $DATA_DIR;unlink ~/$tester &>/dev/null\"\n\n [ \"$DEBUG\" ] && echo \" done\"\n #printf \"rm -f %s ......\" $HostCom\n #rm -f $HostCom\n #printf \"done\\n\"\n\n echo -e ${cyan}End to undeploy host $1${normal}\n}\n\n#Check the version of the host & guest & xen,key=value are defined in the $DeDir/testenv.ini if the key have no value, then it will not be checked.\nfunction CheckKey()\n{\n UI=root@\"$1\"\n echo -e ${cyan}Begin to check host $1 ......${normal}\n\n echo -e \"#!/bin/bash\\n\\n\" > /tmp/CheckKey.tmp\n cat $DeDir/testenv.ini >> /tmp/CheckKey.tmp\n cat $DeDir/CheckKey.sh >> /tmp/CheckKey.tmp\n echo >> /tmp/CheckKey.tmp\n chmod 755 /tmp/CheckKey.tmp\n\n printf \"Upload CheckKey.sh to %s:/tmp/ ......\" $Host\n rsync -qae ssh /tmp/CheckKey.tmp \"$UI\":/tmp/CheckKey.sh\n printf \" done\\n\"\n\n printf \"execute /tmp/CheckKey.sh & show check result:\\n\"\n echo -e $red\n ssh \"$UI\" \"/tmp/CheckKey.sh\"\n echo -e $normal\n #printf \"done\\n\"\n\n echo -e ${cyan}End to check host $1${normal}\n}\n\n#generate host command\nfunction GenHostCom()\n{\n\n echo -ne Generate $HostCom ......\n\n cat /dev/null > $HostCom\n cat << EOF >> $HostCom\n#!/bin/bash\n\nwhile getopts OPTS opt\ndo\n case \\$opt in\nEOF\n\n i=0\n HelpStr=\"\"\n for host in \"${staticip[@]}\"\n do\n opt=${dict2[i]}\n opts=$opts$opt\n cat << EOF >> $HostCom\n $opt) ssh -Y root@$host ;;\nEOF\n HelpStr=\"$HelpStr\"\" echo $opt: root@$host \\\"->host-$opt\\\"\\n\"\n ((i++))\n done\n\n i=0\n for host in \"${ip[@]}\"\n do\n opt=${dict[i]}\n opts=$opts$opt\n\n cat << EOF >> $HostCom\n $opt) ssh -Y root@$host ;;\nEOF\n HelpStr=\"$HelpStr\"\" echo $opt: root@$host \\\"->host-$opt\\\"\\n\"\n ((i++))\n done\n cat << EOF >> $HostCom\n h)\n $(echo -e \" $HelpStr\")\n ;;\n *) echo \"command error, pls check the parameter\"\n exit 1\n ;;\n esac\ndone\nEOF\n\n eval sed -i 's/OPTS/$opts/g' $HostCom\n\n chmod 755 $HostCom\n echo -e \" done\"\n}\n\n\n[ $# -eq 0 ] && Usage\n\n#Check host environment\n[ ! -f $HOME/.ssh/id_rsa.pub ] && {\n echo -e $red\"Please generate your public key first, use ssh-keygen to generate.\"$normal\n exit\n}\n\n#Start\nwhile getopts edvcuUhso:O: opt\ndo\n case $opt in\n e) $EDITOR $HOME/.testenv.ini\n ;;\n d)\n GenHostCom\n i=0;fip=\"\"\n for host in \"${staticip[@]}\"\n do\n [ \"$fip\" == \"\" ] && fip=$host\n opt=${dict2[i]}\n configssh $host $password\n Nodedeploy \"$host\" \"-$opt\"\n ((i++))\n done\n\n i=0;fip=\"\"\n for host in \"${ip[@]}\"\n do\n [ \"$fip\" == \"\" ] && fip=$host\n opt=${dict[i]}\n configssh $host $password\n Nodedeploy \"$host\" \"-$opt\"\n ((i++))\n done\n echo\n echo -e ${magenta}You can use \\\"tsh\\\" \"command\" to login the host. \"\\n\"eg: tsh -a to login $fip${normal} | cowsay -n -e \"..\"\n echo\n tsh -h\n ;;\n c)\n for host in \"${ip[@]}\"\n do\n CheckBefore $host $HostCom\n [ \"$?\" -ne 0 ] && continue\n CheckKey \"$host\"\n done\n ;;\n u)\n for host in \"${ip[@]}\"\n do\n CheckBefore $host $HostCom\n [ \"$?\" -ne 0 ] && continue\n NodeUndeploy \"$host\"\n done\n ;;\n o)\n len=${#ip[*]}\n ((len++))\n ip[len]=\"$2\"\n opt=${dict[len]}\n GenHostCom\n configssh $host $password\n Nodedeploy \"$2\" \"-$opt\"\n echo\n echo -e ${magenta}You can use \\\"tsh\\\" \"command\" to login the host. \"\\n\"eg: tsh -a to login $fip${normal} | cowsay -n -e \"..\"\n echo\n tsh -h\n ;;\n O)\n cat << EOF > $BIN_DIR/ts\n #!/bin/bash\n ssh -XY root@$2\nEOF\n chmod +x $BIN_DIR/ts\n host=\"$2\"\n configssh $host $password\n Nodedeploy \"$2\" \"\"\n echo\n echo -e ${magenta}You can use \\\"ts\\\" \"command\" to login $2${normal} | cowsay -n -e \"..\"\n ;;\n U)\n CheckBefore \"$2\" $HostCom\n [ \"$?\" -ne 0 ] && exit\n NodeUndeploy \"$2\"\n eval sed -i '/$2/d' $HostCom\n ;;\n s)\n [ \"$(whoami)\" != \"root\" ] && {\n echo \"You must have root permission to config cowsay!\" |\n cowsay -n -e \"OO\"\n exit\n }\n\n if [ \"$2\" == \"yes\" ];then\n cp -f $DeDir/bin/cowsay /usr/local/bin/cowsay\n elif [ \"$2\" == \"no\" ];then\n cp -f $DeDir/bin/cowsay2 /usr/local/bin/cowsay\n else\n echo \"ERROR: no such value - only yes/no\" | cowsay -n -d\n fi\n ;;\n h) Usage\n ;;\n v) :\n ;;\n *)\n Usage\n ;;\n esac\ndone\n" } ]
14
rwohleb/tecancavro
https://github.com/rwohleb/tecancavro
d69ffe8fc07391a526dbbabc50e1eead7cc3abdb
38343df02165a1db1499103dd1f5432ae112a6bf
a2bbc40e8efe16d805934e270094f863429b3562
refs/heads/master
2021-01-20T23:16:43.037417
2013-07-26T14:05:34
2013-07-26T14:05:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7230392098426819, "alphanum_fraction": 0.781862735748291, "avg_line_length": 30.461538314819336, "blob_id": "6c37b60aafc772416b545f20498d50a6b5c91ae5", "content_id": "e4764d29543fd39e3339aea91f066950816d70c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "permissive", "max_line_length": 78, "num_lines": 13, "path": "/test.py", "repo_name": "rwohleb/tecancavro", "src_encoding": "UTF-8", "text": "from tecancavro.models import XCaliburD\n\nfrom tecancavro.transport import TecanAPISerial, TecanAPINode\n\n# Functions to return instantiated XCaliburD objects for testing\n\ndef returnSerialXCaliburD():\n test0 = XCaliburD(com_link=TecanAPISerial(0, '/dev/ttyUSB0', 9600))\n return test0\n\ndef returnNodeXCaliburD():\n\ttest0 = XCaliburD(com_link=TecanAPINode(0, '192.168.1.140:80'), waste_port=6)\n\treturn test0" }, { "alpha_fraction": 0.8702702522277832, "alphanum_fraction": 0.8702702522277832, "avg_line_length": 45.25, "blob_id": "b1225841d7b21b9a7dd19c7f4985abedfca69a35", "content_id": "5d99f0c0f09d40aa6607570304e8f47d4c010d66", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "permissive", "max_line_length": 67, "num_lines": 4, "path": "/tecancavro/__init__.py", "repo_name": "rwohleb/tecancavro", "src_encoding": "UTF-8", "text": "from tecanapi import TecanAPI\nfrom transport import TecanAPISerial, TecanAPINode, TecanAPITimeout\nfrom syringe import Syringe, SyringeError, SyringeTimeout\nfrom models import XCaliburD\n" } ]
2
fakecoinbase/giacomocaironislashtoykoin
https://github.com/fakecoinbase/giacomocaironislashtoykoin
22f5e7ab2282768c884df7880accea9ad664a061
cd5c891819338479eab50bc83bf7cf867394ed5a
adb504d14aa109c9212e7d481ded93e5402a42ce
refs/heads/master
2022-11-26T02:37:59.616647
2020-07-21T09:50:50
2020-07-21T09:50:50
282,570,861
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5027382373809814, "alphanum_fraction": 0.5744797587394714, "avg_line_length": 34.11538314819336, "blob_id": "9690b3966510beff11cd1be6aaf1178242383c71", "content_id": "73224b2c4d91fa099f52d0403091acc1ee15f553", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1826, "license_type": "permissive", "max_line_length": 75, "num_lines": 52, "path": "/toykoin/tests/test_script.py", "repo_name": "fakecoinbase/giacomocaironislashtoykoin", "src_encoding": "UTF-8", "text": "from toykoin.core.script import Script\nfrom toykoin.core.utils import hash256\n\nfrom btclib import ssa\nfrom btclib.curvemult import mult\nfrom btclib.secpoint import bytes_from_point\n\n\ndef test_serialization():\n assert Script() == Script.from_hex(\"\")\n assert Script.from_hex(Script().hex) == Script()\n\n\ndef test_valid_schnorr():\n sighash = bytes.fromhex(\"00\" * 32)\n false_sighash = bytes.fromhex(\"aa\" * 32)\n sig = ssa.serialize(*ssa._sign(sighash, 1))\n pubkey = bytes_from_point(mult(1))\n pubkey_hash = hash256(pubkey)\n script = Script(\n [\n [0x00, 0x00, pubkey],\n [0x01, 0x00, sig],\n [0x02, 0x00, pubkey_hash], # push pubkey_hash\n [0x03, 0x02, b\"\\x00\"], # hash of pub key from unlocking script\n [0xFF, 0x01, b\"\\x03\\x02\"], # check equality\n [0xFF, 0x04, b\"\\xff\"], # exit if not equal\n [0xFF, 0x03, b\"\\x00\\x01\"], # schnorr verify\n [0xFF, 0x04, b\"\\xff\"],\n ] # exit if not equal]) # push signature\n )\n assert not script.execute(memory={0x100: false_sighash})\n\n\ndef test_invalid_schnorr():\n sighash = bytes.fromhex(\"00\" * 32)\n sig = ssa.serialize(*ssa._sign(sighash, 1))\n pubkey = bytes_from_point(mult(1))\n pubkey_hash = hash256(pubkey)\n script = Script(\n [\n [0x00, 0x00, pubkey],\n [0x01, 0x00, sig],\n [0x02, 0x00, pubkey_hash], # push pubkey_hash\n [0x03, 0x02, b\"\\x00\"], # hash of pub key from unlocking script\n [0xFF, 0x01, b\"\\x03\\x02\"], # check equality\n [0xFF, 0x04, b\"\\xff\"], # exit if not equal\n [0xFF, 0x03, b\"\\x00\\x01\"], # schnorr verify\n [0xFF, 0x04, b\"\\xff\"],\n ] # exit if not equal]) # push signature\n )\n assert script.execute(memory={0x100: sighash})\n" }, { "alpha_fraction": 0.6355932354927063, "alphanum_fraction": 0.6581920981407166, "avg_line_length": 17.63157844543457, "blob_id": "2b1d33538cf3415b40b82e0b278e87843035a909", "content_id": "175e8b59eee767eca3bf377e3d49c4c303157473", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 354, "license_type": "permissive", "max_line_length": 36, "num_lines": 19, "path": "/pyproject.toml", "repo_name": "fakecoinbase/giacomocaironislashtoykoin", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"Toykoin\"\nversion = \"0.0.0\"\ndescription = \"A toy cryptocurrency\"\nauthors = [\"Giacomo Caironi <[email protected]>\"]\nlicense = \"MIT\"\nreadme = 'README.md'\n\n[tool.poetry.dependencies]\npython = \"^3.8\"\nbtclib = \"*\"\npytest = \"*\"\nsqlalchemy = \"*\"\n\n[tool.poetry.dev-dependencies]\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n" } ]
2
Mi7ai/Django
https://github.com/Mi7ai/Django
49696a52b5e8bfa39b51a7c954f32ecc9edfcaa1
76feac7dea2f41a4c125ff635ce5a971f30c5158
3b59e06cb1784d0b5f4977d4054c53c2f5a5064d
refs/heads/master
2022-11-14T08:00:03.564062
2020-07-09T18:44:09
2020-07-09T18:44:09
278,442,636
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7207392454147339, "alphanum_fraction": 0.7207392454147339, "avg_line_length": 36.53845977783203, "blob_id": "25cf91593f11b1060455b3250c4efbe72e681066", "content_id": "ed570a5ce0fbb91f43427d94de93368675eeb03c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/accounts/urls.py", "repo_name": "Mi7ai/Django", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom django.contrib.auth import views as auth_views\nfrom .views import ProfileView, SignupView, SignupDoneView\nfrom django.views.generic import base as generic_views\n\n# app_name = 'accounts'\n\nurlpatterns = [\n path('', include('django.contrib.auth.urls')),\n path('profile/', ProfileView.as_view(), name='profile'),\n path('signup/', SignupView.as_view(), name='signup'),\n path('signupdone/', SignupDoneView.as_view(), name='signupdone'),\n]" }, { "alpha_fraction": 0.7836363911628723, "alphanum_fraction": 0.7836363911628723, "avg_line_length": 31.41176414489746, "blob_id": "4186370ec5c83e75f660c9a05891197adae47e58", "content_id": "ecad308db591a6c5d071e040e7eab440eac1b2e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "no_license", "max_line_length": 57, "num_lines": 17, "path": "/accounts/views.py", "repo_name": "Mi7ai/Django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import TemplateView, CreateView\n# from django.views.generic import CreateView\nfrom .forms import UserSignUpForm\n# Create your views here.\n\nclass ProfileView(TemplateView):\n template_name = 'accounts/profile.html'\n\nclass SignupView(CreateView):\n form_class = UserSignUpForm\n success_url = reverse_lazy(\"signupdone\")\n template_name = 'accounts/signup.html'\n \nclass SignupDoneView(TemplateView):\n template_name = 'accounts/signup_done.html'" } ]
2
kkonrad/troia-extras
https://github.com/kkonrad/troia-extras
584adac9b0b1837a7bb813201127e650c24f8ca4
c4fcdbbb4860716ef2f745ede9c740e5911f67d4
d1bb6cf18de4597f27d37308a290cfd6a1dd6945
refs/heads/master
2020-08-05T00:40:22.137831
2013-07-08T13:05:56
2013-07-08T13:05:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5373134613037109, "alphanum_fraction": 0.5477612018585205, "avg_line_length": 19.600000381469727, "blob_id": "6c6af7396e690e24686536b9a4507a2dbfd4c816", "content_id": "9eaac65649e6de3ba9b2c32515dfee440a9d1a8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1340, "license_type": "no_license", "max_line_length": 78, "num_lines": 65, "path": "/big_file/job_from_files.py", "repo_name": "kkonrad/troia-extras", "src_encoding": "UTF-8", "text": "import sys\n\nfrom client.gal import TroiaClient\n\nTROIA_ADDRESS = 'http://localhost:8080/service'\n\nALGORITHM = \"BDS\"\n\nITERATIONS = 10\nAPACK_SIZE = 5000\n\n\ndef create():\n categories = [\"broken\", \"invalid\", \"matched\", \"new\", \"reconciled\", \"skip\"]\n tc = TroiaClient(TROIA_ADDRESS)\n tc.create(categories, algorithm=ALGORITHM, iterations=ITERATIONS)\n return tc\n\n\ndef wc(tc, resp):\n resp = tc.await_completion(resp)\n if resp['status'] != 'OK':\n import pprint\n pprint.pprint(resp)\n assert False\n\n\ndef load_assigns():\n with open('/home/konrad/troia/answers.out', 'r') as F:\n i = 0\n w = []\n for l in F:\n l = l.strip()\n i += 1\n object, worker, label = l.split(' ')\n w.append((worker, object, label))\n if i == APACK_SIZE:\n yield w\n w = []\n i = 0\n if w:\n yield w\n\n\ndef post_assigns(tc):\n assigns_packages = load_assigns()\n for i, package in enumerate(assigns_packages):\n print i * APACK_SIZE,\n sys.stdout.flush()\n wc(tc, tc.post_assigned_labels(package))\n\n\ndef compute(tc):\n wc(tc, tc.post_compute())\n\n\ndef main(args):\n tc = create()\n print tc.jid\n post_assigns(tc)\n compute(tc)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n" }, { "alpha_fraction": 0.5808966755867004, "alphanum_fraction": 0.5883690714836121, "avg_line_length": 23.615999221801758, "blob_id": "1256ee8a067aa3663430ba6bf7eb383b9febaefb", "content_id": "03af05a9cdb965bc2b2cfd03e996014983853dc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3078, "license_type": "no_license", "max_line_length": 86, "num_lines": 125, "path": "/job_from_files.py", "repo_name": "kkonrad/troia-extras", "src_encoding": "UTF-8", "text": "import sys\nimport pprint\n\nfrom client.gal import TroiaClient\n\nTROIA_ADDRESS = 'http://localhost:8080/service'\n\nALGORITHM = \"BDS\"\n\nITERATIONS = 10\nAPACK_SIZE = 5000\n\n\ndef iter_assigns(assigns_fname):\n with open(assigns_fname, 'r') as F:\n for l in F:\n l = l.strip()\n if l == \"\":\n break\n spl = l.split(' ')\n worker, object, label = spl[0], spl[1], ' '.join(spl[2:])\n yield worker, object, label\n\n\ndef get_categories(assigns_fname):\n categories = set()\n for _, __, label in iter_assigns(assigns_fname):\n categories.add(label)\n return categories\n\n\ndef create(categories):\n tc = TroiaClient(TROIA_ADDRESS)\n tc.create(categories, algorithm=ALGORITHM, iterations=ITERATIONS)\n return tc\n\n\ndef wc(tc, resp):\n resp = tc.await_completion(resp)\n if resp['status'] != 'OK':\n pprint.pprint(resp)\n assert False\n return resp\n\n\ndef load_assigns(fname):\n i = 0\n w = []\n for worker, object, label in iter_assigns(fname):\n i += 1\n w.append((worker, object, label))\n if i == APACK_SIZE:\n yield w\n w = []\n i = 0\n if w:\n yield w\n\n\ndef post_assigns(tc, fname):\n assigns_packages = load_assigns(fname)\n for i, package in enumerate(assigns_packages):\n print i * APACK_SIZE,\n sys.stdout.flush()\n wc(tc, tc.post_assigned_labels(package))\n\n\ndef post_golds(tc, fname):\n with open(fname, 'r') as F:\n golds = [f.strip().split(\" \") for f in F]\n golds = [(x[0], \" \".join(x[1:])) for x in golds]\n wc(tc, tc.post_gold_data(golds))\n\ndef compute(tc):\n return wc(tc, tc.post_compute())\n\n\ndef get_res(tc, resp):\n return wc(tc, resp)['result']\n\n\ndef get_objects_results(tc, cost_alg):\n return [(d['objectName'], d['categoryName']) for d in\n get_res(tc, tc.get_objects_prediction(cost_alg))]\n\n\ndef get_workers_results(tc, cost_alg):\n return [(d['workerName'], d['value']) for d in\n get_res(tc, tc.get_estimated_workers_quality(cost_alg))]\n\n\ndef sort_results(results):\n return sorted(results, key=lambda x: int(x[0]))\n\n\ndef save(results, kind, cost_alg):\n fname = \"%s_%d_%s_%s.tsv\" % (ALGORITHM, ITERATIONS, kind, cost_alg)\n with open(fname, 'w') as F:\n for r in results:\n F.write(\"%s\\t%s\\n\" % r)\n\n\ndef process_results(tc):\n for cost_alg in [\"MinCost\", \"MaxLikelihood\"]:\n short_save = lambda results, kind: save(sort_results(results), kind, cost_alg)\n obj_results = get_objects_results(tc, cost_alg)\n short_save(obj_results, \"objects\")\n work_results = get_workers_results(tc, cost_alg)\n short_save(work_results, \"workers\")\n\n\ndef main(args):\n assigns_fname = args[0]\n tc = create(list(get_categories(assigns_fname)))\n print tc.jid\n post_assigns(tc, assigns_fname)\n if len(args) > 1:\n golds_fname = args[1]\n post_golds(tc, golds_fname)\n pprint.pprint(compute(tc))\n process_results(tc)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n" }, { "alpha_fraction": 0.7763158082962036, "alphanum_fraction": 0.7763158082962036, "avg_line_length": 74, "blob_id": "4edabb5e10fe7d01c0d3bd68638a95f78d8d7234", "content_id": "311938772ec26c3c574a4b3ad56070eb81af18eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 76, "license_type": "no_license", "max_line_length": 74, "num_lines": 1, "path": "/requirements.txt", "repo_name": "kkonrad/troia-extras", "src_encoding": "UTF-8", "text": "-e git+https://github.com/kkonrad/troia-python-client.git#egg=troia_client\n\n" }, { "alpha_fraction": 0.42307692766189575, "alphanum_fraction": 0.42307692766189575, "avg_line_length": 12, "blob_id": "3bcd27fc8224340464ca7517be160e3768ca9127", "content_id": "1a09abf33b9d2ee283fe9fe804ec5002d0dc17a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26, "license_type": "no_license", "max_line_length": 12, "num_lines": 2, "path": "/README.md", "repo_name": "kkonrad/troia-extras", "src_encoding": "UTF-8", "text": "troia-extras\n============\n" } ]
4
lorischl-otter/cs-module-project-hash-tables
https://github.com/lorischl-otter/cs-module-project-hash-tables
caa5653c4e6aabd0f7762f6f4d2df932699cee89
e36c93ed0aaeaf39fb26b006d24ee38f39619fe8
1689b2e8772cd37f1e9ad89405aeadf68fd0d143
refs/heads/master
2022-12-03T02:25:43.340631
2020-08-06T14:07:33
2020-08-06T14:07:33
285,011,458
0
0
null
2020-08-04T14:52:42
2020-08-05T23:51:49
2020-08-06T14:07:33
Python
[ { "alpha_fraction": 0.5382652878761292, "alphanum_fraction": 0.5420918464660645, "avg_line_length": 26.034482955932617, "blob_id": "ba2ad46cbe6d48cf75f096545f29ecc2aaeb41a4", "content_id": "3e2ad2e605d351df4a3dee43612388bdc8c60c9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "no_license", "max_line_length": 96, "num_lines": 29, "path": "/applications/word_count/word_count.py", "repo_name": "lorischl-otter/cs-module-project-hash-tables", "src_encoding": "UTF-8", "text": "def word_count(s):\n\n # initialize dictionary\n d = {}\n\n # split words from string on whitespace\n words = s.split()\n\n # loop through words\n for w in words:\n # strip characters to ignore\n w = w.strip('\":;,.-+=/\\\\|[]{}()*^&').lower()\n # pass on word if only ignored characters\n if w == \"\":\n continue\n # if word not already in dictionary, add to dict\n if w not in d:\n d[w] = 0\n # add 1 count to key in dict\n d[w] += 1\n\n return d\n\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count('This is a test of the emergency broadcast network. This is only a test.'))\n" } ]
1
afcarl/MLP-Mixer-Pytorch-4
https://github.com/afcarl/MLP-Mixer-Pytorch-4
cb98f44cda9486499822988bd52dfbe72003dd13
793ccdeb73fd482e3fd5dacbb1cfc075af54fcd7
977a63fd8f0340034243377c3b11ec137e10264a
refs/heads/main
2023-04-20T22:39:49.121115
2021-05-11T15:33:25
2021-05-11T15:33:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5481033325195312, "alphanum_fraction": 0.5667949318885803, "avg_line_length": 34.90131759643555, "blob_id": "4163ceca645d819601472c9d13f119bfe3eff1e7", "content_id": "7e546d04c1b4ecb3e032ce1277678f8cb791a62b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5457, "license_type": "permissive", "max_line_length": 95, "num_lines": 152, "path": "/models/modeling.py", "repo_name": "afcarl/MLP-Mixer-Pytorch-4", "src_encoding": "UTF-8", "text": "import copy\n\nimport torch\n\nimport models.configs as configs\n\nfrom os.path import join as pjoin\n\nfrom torch import nn\nfrom torch.nn.modules.utils import _pair\n\nTOK_FC_0 = \"token_mixing/Dense_0\"\nTOK_FC_1 = \"token_mixing/Dense_1\"\nCHA_FC_0 = \"channel_mixing/Dense_0\"\nCHA_FC_1 = \"channel_mixing/Dense_1\"\nPRE_NORM = \"LayerNorm_0\"\nPOST_NORM = \"LayerNorm_1\"\n\n\ndef np2th(weights, conv=False):\n \"\"\"Possibly convert HWIO to OIHW.\"\"\"\n if conv:\n weights = weights.transpose([3, 2, 0, 1])\n return torch.from_numpy(weights)\n\n\nclass MlpBlock(nn.Module):\n def __init__(self, hidden_dim, ff_dim):\n super(MlpBlock, self).__init__()\n self.fc0 = nn.Linear(hidden_dim, ff_dim, bias=True)\n self.fc1 = nn.Linear(ff_dim, hidden_dim, bias=True)\n self.act_fn = nn.GELU()\n\n def forward(self, x):\n x = self.fc0(x)\n x = self.act_fn(x)\n x = self.fc1(x)\n return x\n\n\nclass MixerBlock(nn.Module):\n def __init__(self, config):\n super(MixerBlock, self).__init__()\n self.token_mlp_block = MlpBlock(config.n_patches, config.tokens_mlp_dim)\n self.channel_mlp_block = MlpBlock(config.hidden_dim, config.channels_mlp_dim)\n self.pre_norm = nn.LayerNorm(config.hidden_dim, eps=1e-6)\n self.post_norm = nn.LayerNorm(config.hidden_dim, eps=1e-6)\n\n def forward(self, x):\n h = x\n x = self.pre_norm(x)\n x = x.transpose(-1, -2)\n x = self.token_mlp_block(x)\n x = x.transpose(-1, -2)\n x = x + h\n\n h = x\n x = self.post_norm(x)\n x = self.channel_mlp_block(x)\n x = x + h\n return x\n\n def load_from(self, weights, n_block):\n ROOT = f\"MixerBlock_{n_block}\"\n with torch.no_grad():\n self.token_mlp_block.fc0.weight.copy_(\n np2th(weights[pjoin(ROOT, TOK_FC_0, \"kernel\")]).t())\n self.token_mlp_block.fc1.weight.copy_(\n np2th(weights[pjoin(ROOT, TOK_FC_1, \"kernel\")]).t())\n self.token_mlp_block.fc0.bias.copy_(\n np2th(weights[pjoin(ROOT, TOK_FC_0, \"bias\")]).t())\n self.token_mlp_block.fc1.bias.copy_(\n np2th(weights[pjoin(ROOT, TOK_FC_1, \"bias\")]).t())\n\n self.channel_mlp_block.fc0.weight.copy_(\n np2th(weights[pjoin(ROOT, CHA_FC_0, \"kernel\")]).t())\n self.channel_mlp_block.fc1.weight.copy_(\n np2th(weights[pjoin(ROOT, CHA_FC_1, \"kernel\")]).t())\n self.channel_mlp_block.fc0.bias.copy_(\n np2th(weights[pjoin(ROOT, CHA_FC_0, \"bias\")]).t())\n self.channel_mlp_block.fc1.bias.copy_(\n np2th(weights[pjoin(ROOT, CHA_FC_1, \"bias\")]).t())\n\n self.pre_norm.weight.copy_(np2th(weights[pjoin(ROOT, PRE_NORM, \"scale\")]))\n self.pre_norm.bias.copy_(np2th(weights[pjoin(ROOT, PRE_NORM, \"bias\")]))\n self.post_norm.weight.copy_(np2th(weights[pjoin(ROOT, POST_NORM, \"scale\")]))\n self.post_norm.bias.copy_(np2th(weights[pjoin(ROOT, POST_NORM, \"bias\")]))\n\n\nclass MlpMixer(nn.Module):\n def __init__(self, config, img_size=224, num_classes=1000, patch_size=16, zero_head=False):\n super(MlpMixer, self).__init__()\n self.zero_head = zero_head\n self.num_classes = num_classes\n patch_size = _pair(patch_size)\n n_patches = (img_size // patch_size[0]) * (img_size // patch_size[1])\n config.n_patches = n_patches\n\n self.stem = nn.Conv2d(in_channels=3,\n out_channels=config.hidden_dim,\n kernel_size=patch_size,\n stride=patch_size)\n self.head = nn.Linear(config.hidden_dim, num_classes, bias=True)\n self.pre_head_ln = nn.LayerNorm(config.hidden_dim, eps=1e-6)\n\n\n self.layer = nn.ModuleList()\n for _ in range(config.num_blocks):\n layer = MixerBlock(config)\n self.layer.append(copy.deepcopy(layer))\n\n def forward(self, x, labels=None):\n x = self.stem(x)\n x = x.flatten(2)\n x = x.transpose(-1, -2)\n\n for block in self.layer:\n x = block(x)\n x = self.pre_head_ln(x)\n x = torch.mean(x, dim=1)\n logits = self.head(x)\n\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_classes), labels.view(-1))\n return loss\n else:\n return logits\n\n def load_from(self, weights):\n with torch.no_grad():\n if self.zero_head:\n nn.init.zeros_(self.head.weight)\n nn.init.zeros_(self.head.bias)\n else:\n self.head.weight.copy_(np2th(weights[\"head/kernel\"]).t())\n self.head.bias.copy_(np2th(weights[\"head/bias\"]).t())\n self.stem.weight.copy_(np2th(weights[\"stem/kernel\"], conv=True))\n self.stem.bias.copy_(np2th(weights[\"stem/bias\"]))\n self.pre_head_ln.weight.copy_(np2th(weights[\"pre_head_layer_norm/scale\"]))\n self.pre_head_ln.bias.copy_(np2th(weights[\"pre_head_layer_norm/bias\"]))\n\n for bname, block in self.layer.named_children():\n block.load_from(weights, n_block=bname)\n\n\nCONFIGS = {\n 'Mixer-B_16': configs.get_mixer_b16_config(),\n 'Mixer-L_16': configs.get_mixer_l16_config(),\n 'Mixer-B_16-21k': configs.get_mixer_b16_config(),\n 'Mixer-L_16-21k': configs.get_mixer_l16_config()\n}\n" } ]
1
EvanHahn/brainfuck-interpreter
https://github.com/EvanHahn/brainfuck-interpreter
03dbf1de1ed393f5ec0a1ba96fe0556274806fa7
f946984cbad5f6c827bc65445544404b82474418
d3095fbe321c655de9fed400e35d3360a907516b
refs/heads/master
2020-05-30T11:23:38.726968
2014-04-15T02:36:43
2014-04-15T02:36:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5135135054588318, "alphanum_fraction": 0.5180180072784424, "avg_line_length": 17.5, "blob_id": "a42c1d118dfa9ff13065f3e83ed17d82a471a0a8", "content_id": "ecfc1d34ae1cdfb6d89b002224c065ef432a9256", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "permissive", "max_line_length": 46, "num_lines": 12, "path": "/brainfuck-shorten.py", "repo_name": "EvanHahn/brainfuck-interpreter", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom re import sub\nfrom sys import argv\n\nif __name__ == \"__main__\":\n\n filename = argv[1]\n\n with open(filename) as file:\n src = file.read()\n print sub(r\"[^<>\\[\\]\\+-\\.,]\", \"\", src)\n" }, { "alpha_fraction": 0.6699346303939819, "alphanum_fraction": 0.6699346303939819, "avg_line_length": 24.5, "blob_id": "e15718373e37de851f676bdc51602bd71a10893d", "content_id": "9fa2f91cc801abf264c18e342010e6c6527052e8", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 306, "license_type": "permissive", "max_line_length": 70, "num_lines": 12, "path": "/README.md", "repo_name": "EvanHahn/brainfuck-interpreter", "src_encoding": "UTF-8", "text": "the evan hahn brainfuck interpreter\n===================================\n\nnot too crazy.\n\n $ python brainfuck.py my_brainfuck_file.bf \"the input\"\n\nalso comes with a file to strip comments from a brainfuck file:\n\n $ python brainfuck-shorten.py my_brainfuck_file.bf > compressed.bf\n\nenjoy, brainfuckers\n" } ]
2
KatherineSeng/CECS450-Group3-Project1
https://github.com/KatherineSeng/CECS450-Group3-Project1
d2ae664a8d10746abcfdf5f1675ba61a5d96db6f
a898c832675cc7c02f3a796e0da3a3e9861862f1
e8d60e0af7e47ee25a6d079ba7c07fd65370a5d6
refs/heads/master
2022-12-31T18:51:07.588455
2020-10-19T22:09:10
2020-10-19T22:09:10
302,172,600
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6531873941421509, "alphanum_fraction": 0.6582099199295044, "avg_line_length": 38.41116714477539, "blob_id": "3febe3843ca46a1bcc70e1484b0aeca1b38d34e0", "content_id": "eb92a96deaffc1eb96a0e95b82de7e897866cd51", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15530, "license_type": "permissive", "max_line_length": 239, "num_lines": 394, "path": "/parse.py", "repo_name": "KatherineSeng/CECS450-Group3-Project1", "src_encoding": "UTF-8", "text": "import codecs\nimport re\nimport nltk\n\n\ndef stripExtra(name):\n \"\"\"This function removes paranthesis from a string\n *Can later be implemented for other uses like removing other characters from string\n \n Args:\n name (string): character's name\n\n Returns:\n string: character's name without paranthesis\n \"\"\"\n startIndexPer=name.find('(')\n\n start = 0\n if(startIndexPer!=-1):\n start = startIndexPer\n\n if(start==0):\n return name\n else:\n return name[0:start-1]\n\ndef parseText(textFileName):\n \"\"\"This function parses through a txt file that is formated as a transcript from the website https://www.imsdb.com/ \n\n Args:\n textFileName (string): name/path of txt file of film transcript.\n\n Returns:\n (string,dictionary): a string of the dialogue text and\n a dictionary of each characters said words in dialogue with word counter.\n dictionary is formated as {character : {word : 2 , anotherword : 4} }\n \"\"\"\n\n # using https://www.imsdb.com/ as script. Highlight/copy script and save to a text file\n charWordDic = {}\n with codecs.open(textFileName, 'r', 'utf-8') as f:\n # read the file content\n f = f.read()\n # store all the clean text that's accumulated\n spoken_text = ''\n test = ''\n # once a character's name is found turn True in order to write the following lines of text into dialogue string\n currentlySpeaking = False\n # once a character's name is found turn True in order to write character's name in the beginning of dialogue string\n writtenName=False\n\n # string of current character that is speaking \n currentSpeaker = ''\n\n spacing = 0\n # split the file into a list of strings, with each line a member in the list\n for line in f.split('\\n'):\n # split the line into a list of words in the line\n words = line.split()\n # if there are no words, reset speaking and name booleans\n if not words:\n currentlySpeaking = False\n writtenName=False\n spacing = 0\n continue\n\n # if this line is a person identifier, save characters name into currentSpeaker string and adjust booleans \n #Strip the name of non alphebetic characters\n nameStriped = [word for word in words if word.isalpha()]\n \n #used to determine if the following line is continuing the dialogue\n newSpacing = (len(line) - len(line.lstrip()))\n if (spacing == 0):\n spacing = newSpacing\n\n #Keep track of person identifer and if its a new one or not\n #Name must be less than 3 words, length of name must not be less than 1, len of whitespace should be very long, name should be all uppercase, spacing and newSpacing should be the same since it is the start of the character's dialogue\n if len(nameStriped) > 0 and len(nameStriped) <= 3 and len(nameStriped[0]) > 1 and (len(line) - len(line.lstrip()) < 45) and all([i.isupper() for i in nameStriped]) and spacing == newSpacing:\n currentSpeaker=line.strip()\n writtenName=False\n currentlySpeaking = True\n continue\n\n\n # if there's a good amount of whitespace to the left and currentlySpeaking boolean is true, this is a spoken line\n # Note: the whitespace indentions in text file may be tabs or spaces. Using integer 2 to satisfy both cases.\n if (len(line) - len(line.lstrip()) >=2) and currentlySpeaking:\n #strip extra characters such as paranthesis in character's name\n currentSpeaker=stripExtra(currentSpeaker)\n if '(' in line or ')' in line:\n #strip paranthesis from text since it's not dialogue \n continue\n #if writtenName boolean is false write the name of the speaker and then turn the boolean true\n if not writtenName:\n # spoken_text+=\"\\n\"+currentSpeaker + \": \"\n writtenName=True\n\n #Needed to know count of word by each character\n #Dictionary of Characters containning amount of words\n entireWord = \"\"\n\n #algorithm to find multiple word such that it should be one word. Like someone's First and Last Name said in a sentence\n #Example in lego movie: Black Falcon\n #Simialrly used in Common words function\n for word in words:\n if(word[0].isupper() and not word.isupper()):\n find = re.compile(\"['.!?,]\").search(word)\n if find != None:\n index=find.span()[0]\n entireWord+=word[:index]\n charWordDic=includeInCharacterDic(currentSpeaker,entireWord.strip().lower(), charWordDic)\n entireWord=\"\"\n continue\n else:\n entireWord+=word.strip()+\" \"\n continue\n else:\n find = re.compile(\"['.!?,]\").search(word)\n if find != None:\n index=find.span()[0]\n word=word[:index]\n\n if(entireWord.strip()!=\"\"):\n charWordDic=includeInCharacterDic(currentSpeaker,entireWord.strip().lower(), charWordDic)\n charWordDic=includeInCharacterDic(currentSpeaker,word.strip().lower(), charWordDic)\n entireWord=\"\"\n else:\n charWordDic=includeInCharacterDic(currentSpeaker,word.strip().lower(), charWordDic)\n entireWord=\"\"\n #last case\n if(entireWord.strip()!=\"\"):\n charWordDic=includeInCharacterDic(currentSpeaker,entireWord.strip().lower(), charWordDic)\n\n # # write the dialogue into after character's name or continue dialogue. \n # spoken_text += line.lstrip()\n\n #strip all words that are in paranthesis since it is not dialogue\n spoken_text+=re.sub(r\"\\(.*?\\)|[^\\w\\s'.!?,]\", '', line.lstrip()) \n\n #return the only the dialogue text and a dictionary of each characters said words in dialogue with word counter. Example: {character : {word : 2 , anotherword : 4} }\n return spoken_text, charWordDic\n\ndef includeInCharacterDic(currentSpeaker, word, charWordDic):\n \"\"\"This function inputs the word into the dictionary with the character and incrementing the word count \n\n Args:\n currentSpeaker (string): name/of character in script.\n word (string): word that is being placed into dictionary\n dic (dictionary): a dictionary of each characters said words in dialogue with word counter.\n dictionary is formated as such {character : {word : 2 , anotherword : 4} }\n \n Returns:\n charWordDic (dictionary): a dictionary of each characters said words in dialogue with word counter.\n dictionary is formated as such {character : {word : 2 , anotherword : 4} }\n \"\"\"\n word=word.strip()\n if currentSpeaker not in charWordDic:\n charWordDic[currentSpeaker]={}\n #striping useless characters from word such as -- , . ? !\n word = re.sub(r\"[^\\w\\s']\", '', word) \n charWordDic[currentSpeaker][word.lower()]=1\n else:\n word = re.sub(r\"[^\\w\\s']\", '', word) \n if word.lower() not in charWordDic[currentSpeaker]:\n charWordDic[currentSpeaker][word.lower()]=1\n else: \n #increment word count by one\n charWordDic[currentSpeaker][word.lower()]+=1 \n return charWordDic\n\n\ndef commonWords(text,amount,stopwords):\n \"\"\"This function finds the common words of a dialogue only script excluding character's names.\n \n Args:\n text (string): a corpus of only dialogue \n\n amount (int): number of common words you'd like to be returned.\n\n stopwords (list): of your own stopwords\n\n Returns:\n list: a list of tuples that include the word and amount of times frequently said.\n list is formated as such: [(word,2),(anotherword,4),(newword,3)]\n \"\"\"\n\n #splits the entire text into a list of words\n words = text.split()\n\n #second list of stopwords\n stopwords2 = nltk.corpus.stopwords.words()\n\n\n stopwordList=stopwords + list(set(stopwords2) - set(stopwords))\n\n #removes all stopwords from the list of words\n\n #algorithm to find multiple word such that it should be one word. Like someone's First and Last Name said in a sentence\n #Example in lego movie: Black Falcon\n #Simialrly used in Parse Text function\n entireWord=\"\"\n newWordsList=[]\n for word in words:\n if(word[0].isupper() and not word.isupper()):\n find = re.compile(\"['.!?,]\").search(word)\n if find != None:\n index=find.span()[0]\n entireWord+=word[:index]\n newWordsList.append(entireWord.strip())\n entireWord=\"\"\n continue\n else:\n entireWord+=word+\" \"\n continue\n else:\n find = re.compile(\"['.!?,]\").search(word)\n if find != None:\n index=find.span()[0]\n word=word[:index]\n\n if(entireWord.strip()!=\"\"):\n newWordsList.append(entireWord.strip().lower())\n newWordsList.append(word.strip().lower())\n entireWord=\"\"\n else:\n newWordsList.append(word.strip().lower())\n entireWord=\"\"\n #last case\n if(entireWord.strip()!=\"\"):\n newWordsList.append(entireWord.strip())\n\n\n # cleansed_words = [word.lower() for word in newWordsList if word.isalpha() and word.lower() not in stopwordList]\n cleansed_words=[]\n for word in newWordsList:\n if(word.lower() not in stopwordList and not word.isdigit() and word):\n cleansed_words.append(word.lower())\n\n #using the nltk package, easily find most common words shown from the list of words\n fdist = nltk.FreqDist(cleansed_words)\n # print(cleansed_words)\n common=fdist.most_common(amount)\n\n #returns a list of tuples that include the word and amount of times frequently said\n return common\n\ndef removeStopwordsDic(dic,stopwords):\n \"\"\"This function removes stopwords from the dictionary used in parseText(textFileName) function\n \n Args:\n dic (dictionary): a dictionary of each characters said words in dialogue with word counter.\n dictionary is formated as such {character : {word : 2 , anotherword : 4} }\n\n Returns:\n dictionary: a dictionary of each characters said words in dialogue with word counter with no stopwords\n dictionary is formated as such {character : {word : 2 , anotherword : 4} }\n \"\"\"\n stopwords2 = nltk.corpus.stopwords.words()\n stopwordList=stopwords + list(set(stopwords2) - set(stopwords))\n #create temp dictionary that will contain no stopwords\n characterDic={}\n \n for character in dic:\n #only include words not in stopword lists\n # print(character)\n characterDic[character]={}\n for word in dic[character]:\n if word.lower() not in stopwordList and not word.isdigit() and word.strip() and word:\n characterDic[character][word]=dic[character][word]\n # print(word)\n\n #return a dictionary of each characters said words in dialogue with word counter with no stopwords. \n #formated as such {character : {word : 2 , anotherword : 4} }\n return characterDic\n\ndef keepInCommon(dic,common):\n newDic = {}\n sort_orders = sorted(dic.items(), key=lambda x: x[1], reverse=True)\n \n for i in sort_orders:\n #only show the words that are in common throughout the text\n #character may have said more words but we're only showing those that are most common throughout the film\n if i[0] in [lis[0] for lis in common]:\n newDic[i[0]]=i[1]\n return newDic\n\ndef formatnSortByChar(dic,text,common):\n \"\"\"This function returns a formated string of the words each character has said in common with the text\n \n Args:\n dic (dictionary): a dictionary of each characters said words in dialogue with word counter.\n dictionary is formated as such {character : {word : 2 , anotherword : 4} }\n\n text (string): a corpus of only dialogue \n\n common (list): a list of tuples that include the word and amount of times frequently said.\n list is formated as such: [(word,2),(anotherword,4),(newword,3)]\n\n Returns:\n text (string): a string that is formated to show only the words that each character said that is commonly said throughout the text \n Example: \n Character1\n word 4\n newword 3\n\n Character2 \n word 2\n\n \"\"\"\n #remove any character that is not a letter or ' from text \n # text = re.sub(r\"[^\\w\\s'-]\", ' ', text) \n\n text = ''\n\n for character in dic:\n boolean = False\n # characterDic = removeStopwordsDic(dic[character],stopwords)\n sort_orders = sorted(dic[character].items(), key=lambda x: x[1], reverse=True)\n \n for i in sort_orders:\n #only show the words that are in common throughout the text\n #character may have said more words but we're only showing those that are most common throughout the film\n if i[0] in [lis[0] for lis in common]:\n if not boolean:\n text +=\"\\n\\n\"+ character\n boolean = True\n text+=\"\\n\"+ str(i[0])+ \" \"+ str(i[1])\n return text\n\n#give each word said by each character a ratio based on amount said and how many characters said it.\ndef computeWeightedRatio(dic):\n \"\"\"This function should create a weighted ratio based on amount of times the word was said by character, said throughout the film, and how many characters said that word\n \n Args:\n dic (dictionary): a dictionary of each characters said words in dialogue with word counter.\n dictionary is formated as such {character : {word : 2 , anotherword : 4} }\n\n Returns:\n to be determined. probably best to return the same dictionary that will also include the ratio with the word count\n \"\"\"\n return\n\ndef createNewStopwords(textFileName):\n \"\"\"This function creates our own stopword list that we can use to remove from the dialogue text.\n \n Args:\n textFileName (string): name/path of txt file of stopwords where each word is in a new line.\n\n Returns:\n list: of stopword strings\n \"\"\"\n #download nltk stopwords list\n nltk.download('stopwords')\n stopwords = []\n with codecs.open(textFileName, 'r') as f:\n line = f.readlines()\n for word in line:\n stopwords.append(word.strip())\n return stopwords\n\ndef main():\n #download nltk stopwords list\n nltk.download('stopwords')\n\n #Our movie transcript string path of txt file using this transcript format only works so far https://www.imsdb.com/scripts/Kung-Fu-Panda.html/ \n textFileName = 'FilmScripts/LegoMovie.txt'\n\n #create our own stopword list since nltk's stopword list may not remove all stopwords we need.\n #stopwords from https://www.ranks.nl/stopwords\n stopwords = createNewStopwords('stopwords.txt')\n\n #parse the text and get the dialogue only text and also the character word counter dictionary\n spoken_text, charWordDic = parseText(textFileName)\n\n #remove stopwords from dictionary \n charWordDic = removeStopwordsDic(charWordDic,stopwords)\n\n #Get 150 most common words from dialogue text\n common = commonWords(spoken_text,100,stopwords)\n\n #string that is formated to show only the words that each character said that is commonly said throughout the text \n formatedString = formatnSortByChar(charWordDic,spoken_text,common)\n\n #Common Words said in all dialogue of film\n # print(common)\n newcommon = [i[1] for i in common]\n # print(newcommon)\n # dic = keepInCommon(charWordDic[\"EMMET\"],common)\n # print(dic)\n # print(len(dic))\n #formated string of each character's said words that are commonly said through the dialogue\n # print(formatedString)\n\n# main()\n\n\n" }, { "alpha_fraction": 0.7755905389785767, "alphanum_fraction": 0.7755905389785767, "avg_line_length": 22.090909957885742, "blob_id": "57c8fd008c4f40e772606574ff0acb21af44076b", "content_id": "eb5a343dd3ded0eef78f232f96e2e1dadd028d5a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 254, "license_type": "permissive", "max_line_length": 105, "num_lines": 11, "path": "/README.md", "repo_name": "KatherineSeng/CECS450-Group3-Project1", "src_encoding": "UTF-8", "text": "# Film Word Cloud\n\nMust install nltk library through pip:\npip install nltk\n\nThen clone the repo and run wordcloud.py\n\nTo run: \npython wordcloud.py\n\nThen enter the text file name of the film you'd like to view from the folder \"FilmScripts\" when prompted.\n" }, { "alpha_fraction": 0.5671863555908203, "alphanum_fraction": 0.5753526091575623, "avg_line_length": 22.964284896850586, "blob_id": "6579da8025556b5fecc81ccb25140a3d772fe025", "content_id": "1002e30ba5a6521c3ee9229f7c6bb795aca9c6d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1347, "license_type": "permissive", "max_line_length": 78, "num_lines": 56, "path": "/beemovie.py", "repo_name": "KatherineSeng/CECS450-Group3-Project1", "src_encoding": "UTF-8", "text": " \n\nimport codecs\nimport re\n\ndef reduceLine(dialogue):\n keepReducing = True\n spokenTxt = \"\"\n while(keepReducing):\n \n space=dialogue.find(\" \",34)\n newDialogue = dialogue[:space]\n\n spokenTxt+=\"\\n \" + newDialogue.strip()\n dialogue=dialogue[space:]\n\n if(len(dialogue)>34):\n keepReducing = True\n else:\n keepReducing = False\n return spokenTxt\nspokenTxt = ' BEE MOVIE\\n'\nwith codecs.open(\"FilmScripts/originalBeeMovie.txt\", 'r', 'utf-8') as f:\n # read the file content\n f = f.read()\n \n # split the file into a list of strings, with each line a member in the list\n for line in f.split('\\n'):\n # split the line into a list of words in the line\n name = ''\n dialogue =''\n index = line.find(\":\")\n nameFound = False\n if(line.strip()==\"\"):\n continue\n if(index!=-1):\n nameFound = True\n name = line[0:index]\n dialogue = line[index+1:]\n # print(name)\n # print(dialogue)\n else:\n dialogue=line.strip()\n\n if(nameFound): \n spokenTxt+=\"\\n\\n \"+name.upper()\n\n if(len(dialogue)>34):\n spokenTxt+=reduceLine(dialogue)\n\n else:\n spokenTxt+=\"\\n \" + dialogue.strip()\n\n print(spokenTxt)\n\n\nwith codecs.open(\"FilmScripts/NewBeeMovie.txt\", 'w', 'utf-8') as f:\n f.write(spokenTxt)\n\n" }, { "alpha_fraction": 0.6129941344261169, "alphanum_fraction": 0.6239330172538757, "avg_line_length": 31.060274124145508, "blob_id": "7a74ca2c96862e459d2a8f356021141b99a881cd", "content_id": "c1b2c2cd0be01c524921494c80f9912389860d10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12067, "license_type": "permissive", "max_line_length": 197, "num_lines": 365, "path": "/wordcloud.py", "repo_name": "KatherineSeng/CECS450-Group3-Project1", "src_encoding": "UTF-8", "text": "import tkinter as tk\r\nimport random\r\nimport parse\r\nfrom tkinter import simpledialog\r\n\r\n\r\n\r\ncolors = [\"blue\",\"red\",\"orange\",\"green\",\"purple\"]\r\n\r\n\r\n#word:[label,place]\r\nlabelDic = {}\r\n#label placement coordinates\r\nplacements = []\r\nclass Text(tk.Label):\r\n \"\"\"This class is used to generate the words in the word cloud. Also is used to create hover over text in second frame.\"\"\"\r\n def __init__(self,mainFrame,secondFrame, word, count, charDic,hoverLabel,individualChar=False,char=None):\r\n self.label = tk.Label(mainFrame, text=word)\r\n self.count = count\r\n self.word = word\r\n self.individualChar = individualChar\r\n self.char = char\r\n self.hoverLabel = hoverLabel\r\n self.charDic = charDic\r\n\r\n\r\n\r\n self.label.bind(\"<Enter>\", self.on_enter)\r\n self.label.bind(\"<Leave>\", self.on_leave)\r\n\r\n def hoverText(self):\r\n hoverString = \"Word: \"+self.word + \"\\nTotal : \"+ str(self.count)+\"\\n\"\r\n if(self.individualChar):\r\n hoverString=self.char+\"\\nWord: \"+self.word + \"\\nTotal : \"+ str(self.count)+\"\\n\"\r\n return hoverString\r\n\r\n tupleList = []\r\n\r\n for char in self.charDic: \r\n if(self.word in self.charDic[char]):\r\n tupleList.append( (char,self.charDic[char][self.word]) )\r\n sort_orders = sorted(tupleList, key=lambda x: x[1], reverse=True)\r\n for i in sort_orders:\r\n char = i[0]\r\n charCount = i[1]\r\n hoverString+=\"{0:20} : {1:2} \\n\".format(char,charCount)\r\n\r\n \r\n return hoverString\r\n\r\n def on_enter(self,event):\r\n hoverString = self.hoverText()\r\n self.hoverLabel.configure(text=hoverString)\r\n\r\n def on_leave(self, enter):\r\n if(self.individualChar):\r\n self.hoverLabel.configure(text=self.char+\"\\nHOVER OVER A WORD TO VIEW DETAILS\")\r\n else:\r\n self.hoverLabel.configure(text=\"HOVER OVER A WORD TO VIEW DETAILS\")\r\n\r\ndef place_label(root, label, word,fontSize):\r\n \"\"\"This function is the algorithm to generate the word cloud word placements.\r\n \r\n Args:\r\n root: tk root\r\n label: tk label for word\r\n word: string of word\r\n fontSize: int for font size of word to be used\r\n dic: dictionary that will contain all the labels that will be made.\r\n we will use this dictionary to edit existing labels for when we want to create new word cloud based on character's words\r\n \"\"\"\r\n redo = True\r\n tries = 0\r\n \r\n \r\n #algorithm to make sure word is not placed in same location as another word\r\n # print(word)\r\n while redo:\r\n colorIndex=random.randint(0,len(colors))-1\r\n if(tries>500):\r\n fontSize=10\r\n elif(tries>10000):\r\n fontSize=3\r\n\r\n tries+=1\r\n # print(tries)\r\n label.config(font=(\"Courier\", fontSize),fg=colors[colorIndex])\r\n # print(tries)\r\n # print(fontSize)\r\n root.update()\r\n width = label.winfo_reqwidth()\r\n height = label.winfo_reqheight()\r\n \r\n try:\r\n x = random.randint(0, 812-width)\r\n y = random.randint(0, 750-height)\r\n except ValueError:\r\n if(tries>100000):\r\n redo=False\r\n return\r\n else:\r\n continue\r\n\r\n\r\n # print(placements)\r\n\r\n x2=x+width\r\n y2=y+height\r\n\r\n xmid = (x+x2)/2\r\n ymid = (y+y2)/2\r\n for placement in placements:\r\n #check if x is between a word that is already placed x,x2 coordinates, also check if the word is between our new word for safety measure.\r\n if(x > placement[0] and x < placement[1]) or (x2 > placement[0] and x2 < placement[1]) or (xmid > placement[0] and xmid < placement[1]) or (placement[2] > x and placement[2] < x2):\r\n if (y > placement[3] and y < placement[4]) or (y2 > placement[3] and y2 < placement[4]) or (ymid > placement[3] and ymid < placement[4]) or (placement[5] > y and placement[5] < y2):\r\n redo = True\r\n break\r\n else:\r\n redo = False\r\n\r\n else:\r\n redo = False\r\n if(len(placements)==0):\r\n redo = False\r\n\r\n label.place(x=x,y=y) \r\n root.update()\r\n\r\n place = [x, label.winfo_width()+x, xmid, y, label.winfo_height()+y, ymid]\r\n\r\n placements.append(place)\r\n labelDic[word]=[label,place]\r\n\r\n \r\ndef createWordCloud(root,mainFrame,secondFrame, tuples, charDic, hoverLabel,tupleFontSizeList,individualChar=False,char=None):\r\n \"\"\"This function generates words and places the words in the frames\r\n \r\n Args:\r\n root: tk root\r\n mainFrame: tk frame\r\n secondFrame: tk frame\r\n tuples: list of tuples of word and count\r\n charDic: dictionary of character and word with count\r\n hoverLabel: tk label of hover text \r\n newTupleSizes: list of tuple of wordcount and font size\r\n \"\"\"\r\n\r\n # test = {}\r\n #tuples is a list of tuples. example: [(word, count), (word2, count)]\r\n\r\n for word in labelDic:\r\n label = labelDic[word][0]\r\n label.destroy()\r\n labelDic.clear()\r\n placements.clear()\r\n\r\n\r\n # print(tuples)\r\n index = 0\r\n # print(labelDic)\r\n for tup in tuples:\r\n # print(index)\r\n word = tup[0]\r\n count = tup[1]\r\n text = Text(mainFrame,secondFrame,word,count,charDic,hoverLabel,individualChar,char)\r\n\r\n \r\n size = tupleFontSizeList[index][1]\r\n \r\n place_label(root, text.label, word,size)\r\n index+=1\r\n\r\ndef createWordCloudChar(char,root,mainFrame,secondFrame,charWordDic, hoverLabel,common,sizes):\r\n \"\"\"This function generates words and places the words in the frames based on individual character\r\n \r\n Args:\r\n char : string of name\r\n root: tk root\r\n mainFrame: tk frame\r\n secondFrame: tk frame\r\n charWordDic: dictionary of character and word with count\r\n hoverLabel: tk label of hover text \r\n common: list of tuple of word and wordcount \r\n sizez: list of default sizes \r\n \"\"\"\r\n tupleList = []\r\n\r\n # charDic=parse.keepInCommon(charWordDic[char],common)\r\n for word in charWordDic[char]:\r\n tupleList.append ( (word,charWordDic[char][word]) )\r\n tupleList = sorted(tupleList, key=lambda x: x[1], reverse=True)\r\n tupleFontSizeList = generateNewSizes(tupleList,sizes,True)\r\n # print(tupleList)\r\n\r\n # createWordCloud(root,mainFrame,secondFrame,common, newChar, hoverLabel, tupleFontSizeList)\r\n hoverLabel.configure(text=char+\"\\nHOVER OVER A WORD TO VIEW DETAILS\")\r\n createWordCloud(root,mainFrame,secondFrame, tupleList, charWordDic, hoverLabel,tupleFontSizeList,True,char)\r\n\r\ndef parseFunction(fileName,amountOfCommon):\r\n #Our movie transcript string path of txt file using this transcript format only works so far https://www.imsdb.com/scripts/Kung-Fu-Panda.html/ \r\n textFileName = fileName\r\n\r\n \r\n \r\n #create our own stopword list since nltk's stopword list may not remove all stopwords we need.\r\n #stopwords from https://www.ranks.nl/stopwords\r\n stopwords = parse.createNewStopwords('stopwords.txt')\r\n\r\n #parse the text and get the dialogue only text and also the character word counter dictionary\r\n spoken_text, charWordDic = parse.parseText(textFileName)\r\n\r\n #remove stopwords from dictionary \r\n charWordDic = parse.removeStopwordsDic(charWordDic,stopwords)\r\n\r\n #Get amount most common words from dialogue text\r\n common = parse.commonWords(spoken_text,amountOfCommon,stopwords)\r\n\r\n # #string that is formated to show only the words that each character said that is commonly said throughout the text \r\n # formatedString = parse.formatnSortByChar(charWordDic,spoken_text,common)\r\n\r\n return charWordDic, common\r\n\r\ndef createRangeList(countList,spreadAmount):\r\n newSet=[]\r\n maxNum = max(countList)\r\n for num in countList:\r\n if(num in range(maxNum-spreadAmount,maxNum)):\r\n continue\r\n else:\r\n if(num not in newSet):\r\n newSet.append(num)\r\n maxNum = num\r\n\r\n return newSet\r\n\r\ndef generateNewSizes(tupleList,sizes,individualChar=False):\r\n tupleList = sorted(tupleList, key=lambda x: x[1], reverse=True)\r\n\r\n countList = [i[1] for i in tupleList]\r\n if(individualChar):\r\n rangeList = createRangeList(countList,1)\r\n else:\r\n rangeList = createRangeList(countList,6)\r\n\r\n\r\n if(len(rangeList)>len(sizes)):\r\n rangeList=rangeList[0:len(sizes)-1]\r\n newSizeList = []\r\n newTupleList = []\r\n for count in countList:\r\n added = False\r\n for index in range(0,len(rangeList)-1):\r\n if(count in range(rangeList[index+1],rangeList[index]+1)):\r\n added = True\r\n newSizeList.append(sizes[index])\r\n break\r\n #hard coding this case. if all number counts are the same, set to default size of sizes[1]. \r\n if(len(rangeList)==1):\r\n newSizeList.append(sizes[1])\r\n continue\r\n #last case for iteratin of loop\r\n if(not added):\r\n newSizeList.append(sizes[len(sizes)-1])\r\n\r\n for count in range(0,len(newSizeList)):\r\n newTupleList.append( (tupleList[count][1],newSizeList[count] ) )\r\n\r\n return newTupleList\r\n\r\ndef movieSelection():\r\n\r\n\r\n userinput = tk.Tk()\r\n userinput.withdraw()\r\n USER_INP = simpledialog.askstring(title=\"Test\",prompt=\"Enter Movie Title: \")\r\n return USER_INP\r\n\r\n\r\ndef main():\r\n \r\n \r\n #run parse function\r\n fileName = (\"FilmScripts/\"+movieSelection()+\".txt\")\r\n amountOfCommon = 100\r\n charWordDic , common = parseFunction(fileName,amountOfCommon)\r\n\r\n #Default Sizes\r\n sizes=[60,35,20,15,10]\r\n\r\n #generate wordCloud text sizes using default sizes. returns list tuple of (count,FontSize)\r\n tupleFontSizeList = generateNewSizes(common,sizes)\r\n\r\n #Generate UI\r\n root = tk.Tk()\r\n root.geometry(\"1124x768\")\r\n\r\n mainFrame = tk.Frame(root, width=824, height=750)\r\n secondFrame = tk.Frame(root, width=192, height=750)\r\n\r\n mainFrame.config(bd=4, relief=tk.SOLID)\r\n secondFrame.config(bd=4, relief=tk.SOLID)\r\n\r\n newChar = {}\r\n for char in charWordDic:\r\n # charDic=parse.keepInCommon(charWordDic[char],common)\r\n # charDic=charWordDic[char]\r\n if(not charWordDic[char]):\r\n continue\r\n else:\r\n newChar[char]=charWordDic[char]\r\n \r\n\r\n\r\n\r\n #Hover Overable label\r\n hoverLabel = tk.Label(secondFrame, text=\"HOVER OVER A WORD TO VIEW DETAILS\", width=192)\r\n hoverLabel.config(font=(\"Courier\", 10))\r\n\r\n\r\n #Create Main Word Cloud\r\n #using common as a list of tuples that contain word and count. \r\n\r\n\r\n text = tk.Text(secondFrame, wrap=\"none\")\r\n vsb = tk.Scrollbar(orient=\"vertical\", command=text.yview)\r\n text.configure(yscrollcommand=vsb.set)\r\n\r\n\r\n text.insert(\"end\", \"Characters: \\n\")\r\n\r\n button = tk.Button (secondFrame, text = \"ALL\",command= lambda: createWordCloud(root,mainFrame,secondFrame,common, newChar, hoverLabel, tupleFontSizeList))\r\n text.window_create(\"end\", window=button)\r\n text.insert(\"end\", \"\\n\")\r\n #this will be the character buttons will probably create for loop and generate multiple buttons\r\n num = 0 \r\n for char in sorted(newChar):\r\n if(len(newChar[char])<3):\r\n continue\r\n name = char\r\n button = tk.Button (secondFrame, text = name,command= lambda name=char: createWordCloudChar(name,root,mainFrame,secondFrame,newChar,hoverLabel,common, sizes))\r\n text.window_create(\"end\", window=button)\r\n text.insert(\"end\", \"\\n\")\r\n\r\n\r\n\r\n text.configure(state=\"disabled\")\r\n\r\n\r\n \r\n createWordCloud(root,mainFrame,secondFrame,common, newChar, hoverLabel, tupleFontSizeList)\r\n\r\n #left frame and right frame\r\n mainFrame.pack(side=\"left\", fill=\"both\")\r\n secondFrame.pack(side=\"right\", fill=\"both\")\r\n hoverLabel.pack(side=\"top\", fill=\"both\")\r\n vsb.pack(side=\"right\", fill=\"y\")\r\n text.pack(fill=\"both\", expand=True)\r\n\r\n root.mainloop()\r\n\r\n\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n" } ]
4
TNFlexso/photoboothdiy
https://github.com/TNFlexso/photoboothdiy
76f79ca5f71ffacc0811ead7ec2eac54735f0316
361611d7e2b980c9b1ed22e9e5abd57bbf32d1fc
ad1747ade28a786c926dcc0bc079392725be0597
refs/heads/master
2020-07-01T23:16:57.073940
2019-09-12T10:45:50
2019-09-12T10:45:50
201,338,415
0
0
null
2019-08-08T21:12:51
2019-07-22T13:41:08
2019-07-15T17:56:32
null
[ { "alpha_fraction": 0.609388530254364, "alphanum_fraction": 0.6228997111320496, "avg_line_length": 30.489089965820312, "blob_id": "401007bf0fade1d126985dc71e98515298289560", "content_id": "4deaa354447df84e404793abb8a71c8853624d39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17319, "license_type": "no_license", "max_line_length": 112, "num_lines": 550, "path": "/camera.py", "repo_name": "TNFlexso/photoboothdiy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport pygame\nimport pygame.camera\nimport time\nimport os\nimport PIL.Image\nimport cups\nimport RPi.GPIO as GPIO\nimport subprocess\n\nfrom shutil import copyfile\nfrom threading import Thread\nfrom pygame.locals import *\nfrom time import sleep\nfrom PIL import Image, ImageDraw\nfrom subprocess import *\n\n# initialise global variables\nNumeral = \"\" # Numeral is the number display\nMessage = \"\" # Message is a fullscreen message\nSubMessage = \"\"\nLongMessage = \"\"\nBackgroundColor = \"\"\nCountDownPhoto = \"\"\nCountPhotoOnCart = \"\"\nSmallMessage = \"\" # SmallMessage is a lower banner message\nTotalImageCount = 0 # Counter for Display and to monitor paper usage\nPhotosPerCart = 30 # Selphy takes 16 sheets per tray\nimagecounter = 0\nimagefolder = 'Photos'\nbackupfolder = '/media/pi/90B6-124C/trouw'\ntemplatePath = os.path.join('Photos', 'Template', \"template.png\") #Path of template image\nImageShowed = False\nPrinting = False\nBUTTON_PIN = 25\nIMAGE_WIDTH = 594\nIMAGE_HEIGHT = 445\nimage_size = (IMAGE_WIDTH, IMAGE_HEIGHT)\nos.environ[\"SDL_AUDIODRIVER\"] = \"dsp\"\n\n# Load the background template\nbgimage = PIL.Image.open(templatePath)\n\n#Setup GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n# initialise pygame\npygame.init() # Initialise pygame\npygame.font.init()\npygame.camera.init()\npygame.mouse.set_visible(False)\n# pygame.mouse.set_visible(False) #hide the mouse cursor\ninfoObject = pygame.display.Info()\nscreen_size = (infoObject.current_w,infoObject.current_h)\nscreen = pygame.display.set_mode(screen_size, pygame.FULLSCREEN) # Full screen , pygame.FULLSCREEN\nbackground = pygame.Surface(screen_size) # Create the background object\nbackground = background.convert() # Convert it to a background\n\nscreenPicture = pygame.display.set_mode(screen_size, pygame.FULLSCREEN) # Full screen\nbackgroundPicture = pygame.Surface(screen_size) # Create the background object\nbackgroundPicture = background.convert() # Convert it to a background\n\ntransform_x = infoObject.current_w # how wide to scale the jpg when replaying\ntransfrom_y = infoObject.current_h # how high to scale the jpg when replaying\n\n# Initialise the camera object\n# camera.resolution = (infoObject.current_w, infoObject.current_h)\ncamera_resolution = (IMAGE_WIDTH, IMAGE_HEIGHT)\n#(infoObject.current_w, infoObject.current_h)\ncamera_devices = pygame.camera.list_cameras()\ncamera = pygame.camera.Camera(camera_devices[0],camera_resolution)\n# camera.set_controls(True, False, 50)\n\n# A function to handle keyboard/mouse/device input events\ndef input(events):\n for event in events: # Hit the ESC key to quit the slideshow.\n if (event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE)):\n pygame.quit()\n\n \n# set variables to properly display the image on screen at right ratio\ndef set_demensions(img_w, img_h):\n # Note this only works when in booting in desktop mode. \n # When running in terminal, the size is not correct (it displays small). Why?\n\n # connect to global vars\n global transform_y, transform_x, offset_y, offset_x\n\n # based on output screen resolution, calculate how to display\n ratio_h = (infoObject.current_w * img_h) / img_w \n\n if (ratio_h < infoObject.current_h):\n #Use horizontal black bars\n #print \"horizontal black bars\"\n transform_y = ratio_h\n transform_x = infoObject.current_w\n offset_y = (infoObject.current_h - ratio_h) / 2\n offset_x = 0\n elif (ratio_h > infoObject.current_h):\n #Use vertical black bars\n #print \"vertical black bars\"\n transform_x = (infoObject.current_h * img_w) / img_h\n transform_y = infoObject.current_h\n offset_x = (infoObject.current_w - transform_x) / 2\n offset_y = 0\n else:\n #No need for black bars as photo ratio equals screen ratio\n #print \"no black bars\"\n transform_x = infoObject.current_w\n transform_y = infoObject.current_h\n offset_y = offset_x = 0\n\ndef InitFolder():\n global imagefolder\n global Message\n global LongMessage\n global SubMessage\n \n Message = 'Folder Check...'\n UpdateDisplay()\n Message = ''\n\n #check image folder existing, create if not exists\n if not os.path.isdir(imagefolder): \n os.makedirs(imagefolder) \n \n imagefolder2 = os.path.join(imagefolder, 'images')\n if not os.path.isdir(imagefolder2):\n os.makedirs(imagefolder2)\n \ndef DisplayText(fontSize, textToDisplay):\n global Numeral\n global Message\n global LongMessage\n global SubMessage\n global screen\n global background\n global pygame\n global ImageShowed\n global screenPicture\n global backgroundPicture\n global CountDownPhoto\n\n if (BackgroundColor != \"\"):\n background.fill(pygame.Color(\"black\"))\n if (textToDisplay != \"\"):\n font = pygame.font.Font(None, fontSize)\n text = font.render(textToDisplay, 1, (227, 157, 200))\n textpos = text.get_rect()\n textpos.centerx = background.get_rect().centerx\n textpos.centery = background.get_rect().centery\n if(ImageShowed):\n backgroundPicture.blit(text, textpos)\n else:\n background.blit(text, textpos)\n\ndef UpdateDisplay():\n # init global variables from main thread\n global Numeral\n global Message\n global SubMessage\n global LongMessage\n global screen\n global background\n global pygame\n global ImageShowed\n global screenPicture\n global backgroundPicture\n global CountDownPhoto\n \n background.fill(pygame.Color(\"white\")) # White background\n\n if (BackgroundColor != \"\"):\n background.fill(pygame.Color(\"black\"))\n \n if (Message != \"\"):\n font = pygame.font.SysFont(\"Brandon Grotesque Bold\", 100)\n text = font.render(Message, True, (0, 0, 0))\n textpos = text.get_rect()\n textpos.centerx = background.get_rect().centerx\n textpos.centery = background.get_rect().centery\n if(ImageShowed):\n backgroundPicture.blit(text, textpos)\n else:\n background.blit(text, textpos)\n \n if(LongMessage != \"\"):\n font = pygame.font.SysFont(\"Brandon Grotesque Bold\", 80)\n text = font.render(LongMessage, True, (0, 0, 0))\n textpos = text.get_rect()\n textpos.centerx = background.get_rect().centerx\n textpos.centery = background.get_rect().centery - 40\n if(ImageShowed):\n backgroundPicture.blit(text, textpos)\n else:\n background.blit(text, textpos)\n font = pygame.font.SysFont(\"Brandon Grotesque Bold\", 100)\n text = font.render(SubMessage, True, (0, 0, 0))\n textpos = text.get_rect()\n textpos.centerx = background.get_rect().centerx\n textpos.centery = background.get_rect().centery + 40\n if(ImageShowed):\n backgroundPicture.blit(text, textpos)\n else:\n background.blit(text, textpos)\n \n if (Numeral != \"\"):\n font = pygame.font.SysFont(\"Brandon Grotesque Bold\", 200)\n text = font.render(Numeral, True, (255, 255, 255))\n textpos = text.get_rect()\n textpos.centerx = background.get_rect().centerx\n textpos.centery = background.get_rect().centery\n if(ImageShowed):\n backgroundPicture.blit(text, textpos)\n else:\n background.blit(text, textpos)\n\n if (CountDownPhoto != \"\"):\n #print(displaytext)\n font = pygame.font.SysFont(\"Brandon Grotesque Bold\", 200)\n text = font.render(CountDownPhoto, True, (0, 0, 0))\n textpos = text.get_rect()\n textpos.centerx = background.get_rect().centerx\n textpos.centery = background.get_rect().centery\n if(ImageShowed):\n backgroundPicture.blit(text, textpos)\n else:\n background.blit(text, textpos)\n\n if(ImageShowed == True):\n screenPicture.blit(backgroundPicture, (0, 0)) \n else:\n screen.blit(background, (0, 0))\n \n pygame.display.flip()\n return\n\n\ndef ShowPicture(file, delay):\n global pygame\n global screenPicture\n global backgroundPicture\n global ImageShowed\n backgroundPicture.fill((0, 0, 0))\n img = pygame.image.load(file)\n img = pygame.transform.scale(img, (1020,780)) # Make the image full screen\n #backgroundPicture.set_alpha(200)\n backgroundPicture.blit(img, (120,0))\n screen.blit(backgroundPicture, (0, 0))\n pygame.display.flip() # update the display\n ImageShowed = True\n time.sleep(delay)\n \ndef ShowResult(file, delay):\n global pygame\n global screenPicture\n global backgroundPicture\n global ImageShowed\n backgroundPicture.fill((255, 255, 255))\n img = pygame.image.load(file)\n img = pygame.transform.scale(img, (320,720)) # Make the image fit screen\n #backgroundPicture.set_alpha(200)\n backgroundPicture.blit(img, (480,0))\n screen.blit(backgroundPicture, (0, 0))\n pygame.display.flip() # update the display\n ImageShowed = True\n time.sleep(delay)\n \n# display one image on screen\ndef show_image(image_path): \n screen.fill(pygame.Color(\"white\")) # clear the screen \n img = pygame.image.load(image_path) # load the image\n img = img.convert() \n set_demensions(img.get_width(), img.get_height()) # set pixel dimensions based on image \n x = (infoObject.current_w / 2) - (img.get_width() / 2)\n y = (infoObject.current_h / 2) - (img.get_height() / 2)\n screen.blit(img,(x,y))\n pygame.display.flip()\n\ndef CapturePicture():\n global imagecounter\n global imagefolder\n global Numeral\n global Message\n global LongMessage\n global SubMessage\n global screen\n global background\n global screenPicture\n global backgroundPicture\n global pygame\n global ImageShowed\n global CountDownPhoto\n global BackgroundColor\n \n image = None\n\n BackgroundColor = \"\"\n Numeral = \"\"\n Message = \"\"\n UpdateDisplay()\n time.sleep(1)\n CountDownPhoto = \"\"\n UpdateDisplay()\n background.fill(pygame.Color(\"black\"))\n screen.blit(background, (0, 0))\n pygame.display.flip()\n BackgroundColor = \"black\"\n \n streaming = True\n countdown = 0\n x = 3\n Numeral = str(x)\n Message = \"\" \n \n while streaming:\n if image:\n if camera.query_image():\n image = camera.get_image(image)\n else:\n image = camera.get_image()\n \n image = pygame.transform.flip(image, True, False) \n image2 = pygame.transform.scale(image, screen_size)\n background.blit(image2, (0, 0))\n font = pygame.font.SysFont(\"Brandon Grotesque Bold\", 200)\n text = font.render(str(x), True, (255, 255, 255))\n textpos = text.get_rect()\n textpos.centerx = background.get_rect().centerx\n textpos.centery = background.get_rect().centery\n background.blit(text, textpos)\n screen.blit(background, (0,0))\n pygame.display.flip() # update the display\n countdown = countdown + 1\n if countdown == 10:\n x = x - 1\n if x == 0:\n streaming = False\n else:\n countdown = 0 \n BackgroundColor = \"\"\n Numeral = \"\"\n LongMessage = \"\"\n Message = \"\"\n UpdateDisplay()\n photo_ready = camera.query_image()\n time.sleep(0.5)\n while not photo_ready:\n photo_ready = camera.query_image()\n image = camera.get_image(image)\n image = pygame.transform.flip(image, True, False)\n image2 = pygame.transform.scale(image,screen_size)\n image = pygame.transform.scale(image,camera_resolution)\n background.blit(image2, (0, 0))\n screen.blit(background, (0,0))\n imagecounter = imagecounter + 1\n ts = time.time()\n filename = os.path.join(imagefolder, 'images', str(imagecounter)+\"_\"+str(ts) + '.png')\n pygame.image.save(image, filename)\n #time.sleep(1)\n ShowPicture(filename, 1)\n ImageShowed = False\n return filename\n \ndef TakePictures():\n global imagecounter\n global imagefolder\n global backupfolder\n global Numeral\n global Message\n global LongMessage\n global SubMessage\n global screen\n global background\n global pygame\n global ImageShowed\n global CountDownPhoto\n global BackgroundColor\n global Printing\n global PhotosPerCart\n global TotalImageCount\n \n LongMessage = \"\"\n\n input(pygame.event.get())\n \n camera.start()\n \n CountDownPhoto = \"Foto 1/3\" \n filename1 = CapturePicture()\n\n CountDownPhoto = \"Foto 2/3\"\n filename2 = CapturePicture()\n\n CountDownPhoto = \"Foto 3/3\"\n filename3 = CapturePicture()\n \n camera.stop()\n\n CountDownPhoto = \"\"\n Message = \"Even geduld...\"\n UpdateDisplay()\n\n image1 = PIL.Image.open(filename1)\n image2 = PIL.Image.open(filename2)\n image3 = PIL.Image.open(filename3) \n TotalImageCount = TotalImageCount + 1\n\n bgimage.paste(image1, (57, 194))\n bgimage.paste(image2, (57, 664))\n bgimage.paste(image3, (57, 1137))\n \n # Create the final filename\n ts = time.time()\n Final_Image_Name = os.path.join(imagefolder, \"Final_\" + str(TotalImageCount)+\"_\"+str(ts) + \".png\")\n Backup_Image_Name = os.path.join(backupfolder, \"Final_\" + str(TotalImageCount)+\"_\"+str(ts) + \".png\")\n # Save it to the raspberry SD card and backup to usb drive\n bgimage.save(Final_Image_Name)\n try:\n copyfile(Final_Image_Name, Backup_Image_Name)\n except Exception as filex:\n print(\"USB not found\")\n # Save a temp file, its faster to print from the pi than usb\n temppath = os.path.join('Temp', 'tempprint.png')\n bgimage.save(temppath)\n ShowResult(temppath,3)\n #bgimage2 = bgimage.rotate(90)\n #bgimage2.save(temppath)\n ImageShowed = False\n Message = \"\"\n Printing = False\n WaitForPrintingEvent()\n Numeral = \"\"\n Message = \"\"\n SubMessage = \"\"\n LongMessage = \"\"\n if Printing:\n if os.path.isfile(temppath):\n # Open a connection to cups\n conn = cups.Connection()\n # get a list of printers\n # printers = conn.getPrinters()\n # select printer 0\n printer_name = \"Canon_MP250_series\"\n Message = \"Aan het printen...\"\n UpdateDisplay()\n time.sleep(1)\n # print the buffer file\n printqueuelength = len(conn.getJobs())\n if printqueuelength > 1:\n ShowPicture(temppath,3)\n conn.enablePrinter(printer_name)\n Message = \"Probleem met de printer... :(\" \n UpdateDisplay()\n time.sleep(1)\n else:\n conn.printFile(printer_name, temppath, \"PhotoBooth\", {})\n time.sleep(40) \n Message = \"\"\n Numeral = \"\"\n ImageShowed = False\n UpdateDisplay()\n os.remove(filename1)\n os.remove(filename2)\n os.remove(filename3) \n\ndef MyCallback(channel):\n global Printing\n GPIO.remove_event_detect(BUTTON_PIN)\n pygame.event.clear()\n Printing=True\n\ndef WaitForPrintingEvent():\n global BackgroundColor\n global Numeral\n global Message\n global LongMessage\n global SubMessage\n global Printing\n global pygame\n countDown = 5\n GPIO.add_event_detect(BUTTON_PIN, GPIO.RISING)\n GPIO.add_event_callback(BUTTON_PIN, MyCallback)\n \n while Printing == False and countDown > 0:\n if(Printing == True):\n return\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_DOWN:\n GPIO.remove_event_detect(BUTTON_PIN)\n Printing = True\n return \n BackgroundColor = \"\"\n Numeral = \"\"\n SubMessage = str(countDown)\n LongMessage = \"Druk op de knop om je foto af te drukken\"\n UpdateDisplay() \n countDown = countDown - 1\n time.sleep(1)\n\n GPIO.remove_event_detect(BUTTON_PIN)\n pygame.event.clear()\n \ndef uploadToGP(filename):\n p = Popen(['/usr/bin/share/gpup','-a \"trouw\"', filename], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n \ndef WaitForEvent():\n global pygame\n NotEvent = True\n while NotEvent:\n input_state = GPIO.input(BUTTON_PIN)\n if input_state == False:\n NotEvent = False\n return\n \n try:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n if event.key == pygame.K_DOWN:\n NotEvent = False\n return\n if event.type == QUIT:\n pygame.quit()\n except:\n pygame.quit()\n \n time.sleep(0.2)\n pygame.event.clear()\n\ndef main(threadName, *args):\n InitFolder()\n \n try:\n while True:\n show_image('images/start_camera.jpg')\n WaitForEvent()\n time.sleep(0.2)\n TakePictures()\n except Exception as e:\n print(e)\n GPIO.cleanup()\n pygame.quit()\n \n\n# launch the main thread\nThread(target=main, args=('Main', 1)).start()\n" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.739130437374115, "avg_line_length": 13, "blob_id": "db966f22ddadc52b728555893cb690fa0414ff97", "content_id": "32b7ec3ed1555369295b7ef47acfaa4faa2db688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 69, "license_type": "no_license", "max_line_length": 30, "num_lines": 5, "path": "/photobooth-script.sh", "repo_name": "TNFlexso/photoboothdiy", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ncd /\ncd home/pi/Partage/Photobooth/\nsudo python3 camera.py" } ]
2
hdodenhof/NuimoSonosController
https://github.com/hdodenhof/NuimoSonosController
7831b1be035ab3e8983ac67cffcdec3be625ad04
1d22041f230c80ea6c22de2222c2389773e82460
165fd34b6e7ab3fe840414ca83bd900775192f1e
refs/heads/master
2023-05-26T10:08:38.287627
2017-01-05T20:50:24
2017-01-05T21:55:03
62,498,258
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6042047739028931, "alphanum_fraction": 0.6051188111305237, "avg_line_length": 23.584270477294922, "blob_id": "708514062a2dbec06364185d6d451941dbb6fee3", "content_id": "219a35be4edbcafd3308769eef55b97bbf82611a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2188, "license_type": "no_license", "max_line_length": 83, "num_lines": 89, "path": "/sonos.py", "repo_name": "hdodenhof/NuimoSonosController", "src_encoding": "UTF-8", "text": "import logging\nimport threading\nfrom Queue import Empty\n\nimport soco\nfrom soco.events import event_listener\n\n\nclass SonosAPI:\n\n STATE_PLAYING = 'PLAYING'\n STATE_PAUSED = 'PAUSED_PLAYBACK'\n STATE_TRANSITIONING = 'TRANSITIONING'\n\n def __init__(self):\n self.players = soco.discover()\n\n for player in self.players:\n if player.is_coordinator:\n self.coordinator = player\n\n self.state = 'UNKNOWN'\n\n self.eventReceiver = EventReceiver(self.coordinator, self._on_state_change)\n self.eventReceiver.start()\n\n def _on_state_change(self, new_state):\n logging.debug(\"New transport state: {}\".format(new_state))\n\n if (new_state == self.STATE_TRANSITIONING):\n return\n\n self.state = new_state\n\n def disconnect(self):\n self.eventReceiver.stop()\n\n def is_playing(self):\n return self.state == self.STATE_PLAYING\n\n def get_volume(self):\n return self.coordinator.volume\n\n def play(self):\n self.coordinator.play()\n\n def pause(self):\n self.coordinator.pause()\n\n def next(self):\n self.coordinator.next()\n\n def prev(self):\n self.coordinator.previous()\n\n def vol_up(self, value):\n self._set_volume(self.coordinator.volume + value)\n\n def vol_down(self, value):\n self._set_volume(self.coordinator.volume - value)\n\n def _set_volume(self, value):\n for player in self.players:\n player.volume = value\n\n\nclass EventReceiver(threading.Thread):\n\n def __init__(self, coordinator, state_callback):\n super(EventReceiver, self).__init__()\n self.subscription = coordinator.avTransport.subscribe()\n self.state_callback = state_callback\n self.terminate = False\n\n def run(self):\n while True:\n if self.terminate:\n self.subscription.unsubscribe()\n event_listener.stop()\n break\n\n try:\n event = self.subscription.events.get(timeout=0.5)\n self.state_callback(event.transport_state)\n except Empty:\n pass\n\n def stop(self):\n self.terminate = True\n" }, { "alpha_fraction": 0.6294742822647095, "alphanum_fraction": 0.6342281699180603, "avg_line_length": 27.607999801635742, "blob_id": "753544e69c76c1ad5220045e74b74f187a1feaa0", "content_id": "171c2d192bee9fb0cb450da511b36676ce4f4089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3576, "license_type": "no_license", "max_line_length": 93, "num_lines": 125, "path": "/controller.py", "repo_name": "hdodenhof/NuimoSonosController", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom __future__ import division\n\nimport logging\nimport math\nimport signal\nimport sys\nimport time\nfrom threading import Timer\n\nimport led_configs\nfrom nuimo import Nuimo, NuimoDelegate\nfrom sonos import SonosAPI\n\n\nnuimo_sonos_controller = None\n\n\nclass NuimoSonosController(NuimoDelegate):\n\n def __init__(self, bled_com, nuimo_mac):\n NuimoDelegate.__init__(self)\n self.nuimo = Nuimo(bled_com, nuimo_mac, self)\n self.sonos = SonosAPI()\n self.default_led_timeout = 3\n self.max_volume = 42 # should be dividable by 7\n self.volume_bucket_size = int(self.max_volume / 7)\n self.last_vol_matrix = None\n self.vol_reset_timer = None\n self.stop_pending = False\n\n def start(self):\n self.nuimo.connect()\n\n while not self.stop_pending:\n time.sleep(0.1)\n\n self.sonos.disconnect()\n self.nuimo.disconnect()\n self.nuimo.terminate()\n\n def stop(self):\n self.stop_pending = True\n\n def on_button(self):\n if self.sonos.is_playing():\n self.sonos.pause()\n self.nuimo.display_led_matrix(led_configs.pause, self.default_led_timeout)\n else:\n self.sonos.play()\n self.nuimo.display_led_matrix(led_configs.play, self.default_led_timeout)\n\n def on_swipe_right(self):\n self.sonos.next()\n self.nuimo.display_led_matrix(led_configs.next, self.default_led_timeout)\n\n def on_swipe_left(self):\n self.sonos.prev()\n self.nuimo.display_led_matrix(led_configs.previous, self.default_led_timeout)\n\n def on_fly_right(self):\n self.on_swipe_right()\n\n def on_fly_left(self):\n self.on_swipe_left()\n\n def on_wheel_right(self, value):\n self.sonos.vol_up(self._calculate_volume_delta(value))\n self._show_volume()\n\n def on_wheel_left(self, value):\n self.sonos.vol_down(self._calculate_volume_delta(value))\n self._show_volume()\n\n def on_connect(self):\n self.nuimo.display_led_matrix(led_configs.default, self.default_led_timeout)\n\n def _calculate_volume_delta(self, value):\n return min(value / 20 + 1, 5)\n\n def _show_volume(self):\n volume = self.sonos.get_volume()\n if volume is None: volume = 0\n\n bucket = min(int(math.ceil(volume / self.volume_bucket_size)), 7)\n matrix = getattr(led_configs, 'vol' + str(bucket))\n\n if matrix != self.last_vol_matrix:\n self.last_vol_matrix = matrix\n self.nuimo.display_led_matrix(matrix, self.default_led_timeout)\n if self.vol_reset_timer is not None:\n self.vol_reset_timer.cancel()\n self.vol_reset_timer = Timer(self.default_led_timeout+1, self._reset_vol).start()\n\n def _reset_vol(self):\n self.last_vol_matrix = None\n self.vol_reset_timer = None\n\n\ndef signal_term_handler(signal, frame):\n logging.info('Received SIGTERM signal!')\n nuimo_sonos_controller.stop()\n\n\ndef signal_int_handler(signal, frame):\n logging.info('Received SIGINT signal. This makes Panda sad! :(')\n nuimo_sonos_controller.stop()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='%(message)s')\n\n signal.signal(signal.SIGTERM, signal_term_handler)\n signal.signal(signal.SIGINT, signal_int_handler)\n\n if len(sys.argv) != 3:\n raise RuntimeError('Invalid number of arguments')\n\n com = sys.argv[1]\n mac = sys.argv[2]\n\n nuimo_sonos_controller = NuimoSonosController(com, mac)\n nuimo_sonos_controller.start()\n" }, { "alpha_fraction": 0.5320642590522766, "alphanum_fraction": 0.5655460953712463, "avg_line_length": 30.653846740722656, "blob_id": "f4733fccefcbac83f232680dc2c6e689fc4534d7", "content_id": "98da244a394fce07c1ea70dd86e8cd775c6434a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7407, "license_type": "no_license", "max_line_length": 280, "num_lines": 234, "path": "/nuimo.py", "repo_name": "hdodenhof/NuimoSonosController", "src_encoding": "UTF-8", "text": "from __future__ import division\n\nimport threading\n\nfrom bled112 import Bled112Com\nfrom gatt import BleManager, BleRemoteTimeout, BleLocalTimeout\nimport logging\nimport time\n\n\nSERVICE_UUIDS = [\n '180f', # Battery\n 'f29b1525-cb19-40f3-be5c-7241ecb82fd2', # Sensors\n 'f29b1523-cb19-40f3-be5c-7241ecb82fd1' # LED Matrix\n]\n\nCHARACTERISTIC_UUIDS = {\n '2a19': 'BATTERY',\n 'f29b1529-cb19-40f3-be5c-7241ecb82fd2': 'BUTTON',\n 'f29b1528-cb19-40f3-be5c-7241ecb82fd2': 'ROTATION',\n 'f29b1527-cb19-40f3-be5c-7241ecb82fd2': 'SWIPE',\n 'f29b1526-cb19-40f3-be5c-7241ecb82fd2': 'FLY',\n 'f29b1524-cb19-40f3-be5c-7241ecb82fd1': 'LED_MATRIX'\n}\n\nNOTIFICATION_CHARACTERISTIC_UUIDS = [\n 'BATTERY',\n 'BUTTON',\n 'ROTATION',\n 'SWIPE',\n 'FLY'\n]\n\n\nclass Nuimo:\n def __init__(self, com, address, delegate):\n self.com = com\n self.address = address\n self.delegate = delegate\n self.bled112 = None\n self.ble = None\n self.characteristics_handles = {}\n self.message_handler = MessageHandler()\n self.message_handler.start()\n\n def connect(self):\n self.bled112 = Bled112Com(self.com)\n self.bled112.start()\n self.ble = BleManager(self.bled112, self.address, self)\n\n while not self.ble.isConnected():\n try:\n self.ble.connect()\n\n self._discover_characteristics()\n self._setup_notifications()\n\n self.delegate.on_connect()\n except (BleRemoteTimeout, BleLocalTimeout):\n time.sleep(5)\n\n def disconnect(self):\n self.bled112.reset()\n self.bled112.close()\n\n def terminate(self):\n self.message_handler.terminate()\n\n def _discover_characteristics(self):\n logging.debug(\"Reading service groups\")\n groups = self.ble.readAll()\n\n handles = {}\n for group in groups.values():\n if group.uuid not in SERVICE_UUIDS:\n continue\n group_handles = self.ble.findInformation(group.start, group.end)\n for uuid, handle in group_handles.iteritems():\n if uuid not in CHARACTERISTIC_UUIDS:\n continue\n logging.debug(\"Found handle {} for {}\".format(handle, uuid))\n handles[uuid] = handle\n\n self.characteristics_handles = dict((name, handles[uuid]) for uuid, name in CHARACTERISTIC_UUIDS.items())\n\n def _setup_notifications(self):\n for name in NOTIFICATION_CHARACTERISTIC_UUIDS:\n logging.debug(\"Setup notifications for {}\".format(name))\n self.ble.configClientCharacteristic(self.characteristics_handles[name] + 1, notify=True)\n\n\n def display_led_matrix(self, matrix, timeout):\n try:\n matrix = '{:<81}'.format(matrix[:81])\n bytes = list(map(lambda leds: reduce(lambda acc, led: acc + (1 << led if leds[led] not in [' ', '0'] else 0), range(0, len(leds)), 0), [matrix[i:i+8] for i in range(0, len(matrix), 8)]))\n self.ble.writeAttributeByHandle(self.characteristics_handles['LED_MATRIX'], [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], bytes[8], bytes[9], bytes[10], max(0, min(255, int(255.0 * 1))), max(0, min(255, int(timeout * 10.0)))], False)\n except Exception as e:\n logging.exception(e)\n\n def on_message(self, message):\n if message.attHandle == self.characteristics_handles['BATTERY']:\n logging.debug('Battery state')\n level = int(message.data[0] / 255 * 100)\n MessageHandler.queue((self.delegate.on_battery_state, level))\n if message.attHandle == self.characteristics_handles['BUTTON']:\n if (message.data[0] == 1):\n logging.debug('Button pressed')\n MessageHandler.queue(self.delegate.on_button)\n else:\n logging.debug('Button released')\n elif message.attHandle == self.characteristics_handles['SWIPE']:\n if (message.data[0] == 0):\n logging.debug('Swipe left')\n MessageHandler.queue(self.delegate.on_swipe_left)\n elif (message.data[0] == 1):\n logging.debug('Swipe right')\n MessageHandler.queue(self.delegate.on_swipe_right)\n elif (message.data[0] == 2):\n logging.debug('Swipe up')\n else:\n logging.debug('Swipe down')\n elif message.attHandle == self.characteristics_handles['ROTATION']:\n if (message.data[1] == 0):\n value = message.data[0]\n logging.debug('Wheel right, value: {}'.format(value))\n MessageHandler.queue((self.delegate.on_wheel_right, value))\n else:\n value = 255 - message.data[0]\n logging.debug('Wheel left, value: {}'.format(value))\n MessageHandler.queue((self.delegate.on_wheel_left, value))\n elif message.attHandle == self.characteristics_handles['FLY']:\n if (message.data[0] == 0):\n logging.debug('Fly left')\n MessageHandler.queue(self.delegate.on_fly_left)\n elif (message.data[0] == 1):\n logging.debug('Fly right')\n MessageHandler.queue(self.delegate.on_fly_right)\n elif (message.data[0] == 2):\n logging.debug('Fly towards')\n MessageHandler.queue(self.delegate.on_fly_towards)\n elif (message.data[0] == 3):\n logging.debug('Fly backwards')\n MessageHandler.queue(self.delegate.on_fly_backwards)\n else:\n logging.debug('Fly up/down, value {}'.format(message.data[1]))\n\n def on_disconnect(self):\n self.bled112.close()\n time.sleep(5)\n logging.debug('Reconnecting...')\n self.connect()\n\nclass NuimoDelegate:\n def __init__(self):\n pass\n\n def on_connect(self):\n pass\n\n def on_battery_state(self, value):\n pass\n\n def on_button(self):\n pass\n\n def on_swipe_right(self):\n pass\n\n def on_swipe_left(self):\n pass\n\n def on_swipe_up(self):\n pass\n\n def on_swipe_down(self):\n pass\n\n def on_wheel_right(self, value):\n pass\n\n def on_wheel_left(self, value):\n pass\n\n def on_fly_right(self):\n pass\n\n def on_fly_left(self):\n pass\n\n def on_fly_towards(self):\n pass\n\n def on_fly_backwards(self):\n pass\n\nclass MessageHandler(threading.Thread):\n\n next_msg = None\n\n def __init__(self):\n super(MessageHandler, self).__init__()\n self.stop = False\n\n def run(self):\n while True:\n if self.stop:\n break\n\n if not MessageHandler.next_msg:\n time.sleep(0.01)\n continue\n\n try:\n msg = MessageHandler.next_msg\n if isinstance(msg, tuple):\n func = msg[0]\n args = msg[1:]\n func(*args)\n else:\n msg()\n except Exception as e:\n logging.exception(e)\n\n MessageHandler.next_msg = None\n\n def terminate(self):\n self.stop = True\n\n @staticmethod\n def queue(msg):\n if (MessageHandler.next_msg):\n return\n\n MessageHandler.next_msg = msg\n" }, { "alpha_fraction": 0.6585635542869568, "alphanum_fraction": 0.6662983298301697, "avg_line_length": 36.940120697021484, "blob_id": "95166a9666d695e2a487392700e6bc78b8bc7c29", "content_id": "7f00d417ffa0f3bc01ef5b1d0bab77a5d2f72fc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6335, "license_type": "no_license", "max_line_length": 98, "num_lines": 167, "path": "/gatt.py", "repo_name": "hdodenhof/NuimoSonosController", "src_encoding": "UTF-8", "text": "from bled112 import *\n\nDEBUG = True\nINFO = True\n\ndef macString(mac):\n return '%02X:%02X:%02X:%02X:%02X:%02X' % (mac[5], mac[4], mac[3], mac[2], mac[1], mac[0])\n\nclass Timeout:\n \"\"\"Simplify timeout interval management\"\"\"\n def __init__(self, interval):\n self.start = time.time()\n self.interval = interval\n\n def isExpired(self):\n return (time.time() - self.start >= self.interval)\n\n# Custom BLED112 exceptions\nclass BleException(Exception): pass\nclass BleProcedureFailure(BleException): pass\nclass BleLocalTimeout(BleException): pass\nclass BleRemoteTimeout(BleException): pass\nclass BleValueError(BleException): pass\n\nclass BleConnection:\n def __init__(self, mac=None):\n self.id = None\n self.address = mac\n\nclass AttributeGroup:\n \"\"\"Encapsulate a group of GATT attribute/descriptor handles.\n uuid -- UUID of the containing characteristic for the group\n start -- first handle in the group\n end -- last handle in the group\n \"\"\"\n def __init__(self, uuid=None, start=None, end=None):\n self.uuid = uuid\n self.start = start\n self.end = end\n\nclass BleManager:\n def __init__(self, com, address, delegate = None):\n self.reactions = {\n ConnectionStatusEvent : self.onConnectionStatusEvent,\n ConnectionDisconnectedEvent : self.onConnectionDisconnectedEvent,\n AttClientGroupFoundEvent : self.onAttClientGroupFoundEvent,\n AttClientFindInformationFoundEvent: self.onAttClientFindInformationFoundEvent,\n AttClientAttributeValueEvent : self.onAttClientAttributeValueEvent\n }\n mac = [int(i, 16) for i in reversed(address.split(':'))]\n self.connection = BleConnection(mac)\n self.com = com\n self.delegate = delegate\n self.expectedMessage = None\n com.listener = self\n self.localTimeout = 5\n self.remoteTimeout = 10\n\n # Called by BLED112 thread\n def onMessage(self, message):\n if self.expectedMessage and message.__class__ == self.expectedMessage.__class__:\n self.actualMessage = message\n self.expectedMessage = None\n else:\n reaction = self.reactions.get(message.__class__)\n if reaction: reaction(message)\n\n def onConnectionDisconnectedEvent(self, message):\n logging.info('Disconnected')\n self.connection.id = None\n if self.delegate is not None: self.delegate.on_disconnect()\n\n def onConnectionStatusEvent(self, message):\n self.connection.id = message.connection\n\n def waitForMessage(self, message, timeout):\n t = Timeout(timeout)\n self.expectedMessage = message\n self.actualMessage = None\n while self.expectedMessage and not t.isExpired(): time.sleep(0.01)\n return self.actualMessage\n\n def waitLocal(self, message):\n msg = self.waitForMessage(message, self.localTimeout)\n if not msg: raise BleLocalTimeout()\n return msg\n\n def waitRemote(self, message, timeout=None):\n msg = self.waitForMessage(message, timeout if timeout is not None else self.remoteTimeout)\n if not msg: raise BleRemoteTimeout()\n return msg\n\n def connect(self):\n logging.info('Connecting to %s...' % macString(self.connection.address))\n self.com.send(ConnectDirectCommand(self.connection.address))\n self.waitLocal(ConnectDirectResponse())\n try:\n msg = self.waitRemote(ConnectionStatusEvent())\n except BleRemoteTimeout:\n logging.error('Failed connecting to %s' % macString(self.connection.address))\n raise\n logging.info('Connected to %s' % macString(self.connection.address))\n self.connection.id = msg.connection\n\n def writeAttribute(self, uuid, data):\n logging.debug('Write attribute %s = %s' % (uuid, str(data)))\n handle = self.connection.handleByUuid(uuid)\n self.writeAttributeByHandle(handle, data)\n\n def writeAttributeByHandle(self, handle, data, wait=True):\n self.com.send(AttClientAttributeWriteCommand(self.connection.id, handle, data))\n if wait:\n self.waitLocal(AttClientAttributeWriteResponse())\n self.completeProcedure()\n\n def completeProcedure(self):\n msg = self.waitRemote(AttClientProcedureCompleted())\n logging.debug('Procedure completed')\n return msg.result == 0\n\n def configClientCharacteristic(self, handle, notify=False, indicate=False):\n NOTIFY_ENABLE = 1\n INDICATE_ENABLE = 2\n flags = 0\n if notify: flags = flags | NOTIFY_ENABLE\n if indicate: flags = flags | INDICATE_ENABLE\n self.writeAttributeByHandle(handle, [flags])\n\n def isConnected(self): return self.connection.id is not None\n\n def waitValue(self, uuid):\n handle = self.connection.handleByUuid(uuid)\n return self.waitRemote(AttClientAttributeValueEvent()).data\n\n def readAttribute(self, uuid):\n logging.info('Reading attribute %s' % uuid)\n handle = self.connection.handleByUuid(uuid)\n self.com.send(AttClientReadByHandleCommand(self.connection.id,\n handle))\n self.waitLocal(AttClientReadByHandleResponse())\n return self.waitValue(uuid)\n\n def readAll(self):\n return self.readByGroupType(1, 0xFFFF, Uint16(int('2800',16)).serialize())\n\n def readByGroupType(self, start, end, uuid):\n self.groups = {}\n self.com.send(ReadByGroupTypeCommand(self.connection.id, start, end, uuid))\n self.waitLocal(ReadByGroupTypeResponse())\n self.completeProcedure()\n return self.groups\n\n def onAttClientGroupFoundEvent(self, message):\n self.groups[message.uuid] = AttributeGroup(message.uuid, message.start, message.end)\n\n def findInformation(self, start, end):\n self.handles = {}\n self.com.send(AttClientFindInformationCommand(self.connection.id, start, end))\n self.waitLocal(AttClientFindInformationResponse())\n self.completeProcedure()\n return self.handles\n\n def onAttClientFindInformationFoundEvent(self, message):\n self.handles[message.uuid] = message.chrHandle\n\n def onAttClientAttributeValueEvent(self, message):\n if self.delegate is not None: self.delegate.on_message(message)" }, { "alpha_fraction": 0.6214098930358887, "alphanum_fraction": 0.6788511872291565, "avg_line_length": 24.53333282470703, "blob_id": "18366dff4a9bf8a17c6f142f38d633ee17a17d08", "content_id": "95574fc202914a623f5907a9bfb8ae6a6d7dd80b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 383, "license_type": "no_license", "max_line_length": 93, "num_lines": 15, "path": "/README.md", "repo_name": "hdodenhof/NuimoSonosController", "src_encoding": "UTF-8", "text": "NuimoSonosController\n====================\n\nControl your Sonos system using a Nuimo connected to your Raspberry Pi.\n\nRequirements\n------------\n\n* Bluegiga BLED112 Bluetooth Dongle\n* [Sonos HTTP API](https://github.com/jishi/node-sonos-http-api) running on your Raspberry Pi\n\nRun the script\n--------------\n\n`./controller.py /dev/ttyACM0 A1:B2:C3:D4:E5:F6 127.0.0.1 5005 Living%20Room`\n" } ]
5
ArmindoFlores/fenixapi
https://github.com/ArmindoFlores/fenixapi
283454ebe70efa3aff23a66116e50cee829007e2
c5e972649bf8522a4ace5d34796da541a3bb2a84
f73c08bce2546f91f38e97c4ca2b6141f209d9af
refs/heads/master
2021-02-15T05:47:37.186007
2020-05-18T12:48:38
2020-05-18T12:48:38
244,868,656
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7428571581840515, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 22.33333396911621, "blob_id": "e6bdbb74550f3f6675fc5423e3a17a83b0c72989", "content_id": "e4a58179033c895b7fdf0ba28f476403fe9f8c61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "permissive", "max_line_length": 25, "num_lines": 3, "path": "/fenix/__init__.py", "repo_name": "ArmindoFlores/fenixapi", "src_encoding": "UTF-8", "text": "from .session import *\nfrom .exceptions import *\nfrom .utils import *\n" }, { "alpha_fraction": 0.506766140460968, "alphanum_fraction": 0.5183899998664856, "avg_line_length": 35.8684196472168, "blob_id": "bf2e47e8de9290f6d3a28d094e91b0ec7aed2207", "content_id": "03a6c42e955ff66b36741f7d2edb84c5db8054e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5764, "license_type": "permissive", "max_line_length": 167, "num_lines": 152, "path": "/fenix/session.py", "repo_name": "ArmindoFlores/fenixapi", "src_encoding": "UTF-8", "text": "import requests\r\nimport datetime\r\nfrom bs4 import BeautifulSoup\r\nfrom .exceptions import *\r\nfrom operator import itemgetter\r\n\r\n__all__ = [\r\n \"Session\"\r\n ]\r\n\r\nFENIX_SERVICE = \"aHR0cHM6Ly9mZW5peC50ZWNuaWNvLnVsaXNib2EucHQvbG9naW4uZG8=\" # https://fenix.tecnico.ulisboa.pt/login.do (b64 encoded)\r\nFENIX_SERVICE_URL = f\"https://fenix.tecnico.ulisboa.pt/api/cas-client/login/{FENIX_SERVICE}\"\r\n\r\nclass Session:\r\n def __init__(self):\r\n self._logged_in = False\r\n self._session = None\r\n\r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, *args):\r\n if self._session is not None:\r\n self._session.close()\r\n\r\n @property\r\n def logged_in(self):\r\n return self._logged_in\r\n\r\n @property\r\n def session(self):\r\n return self._session\r\n\r\n def login(self, username, password):\r\n if self._session is None:\r\n self._session = requests.Session()\r\n self._session.headers[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36\"\r\n\r\n # Get session cookie and login codes\r\n req = self._session.get(\"https://id.tecnico.ulisboa.pt/cas/login\")\r\n jsessionid = self._session.cookies.get_dict()[\"JSESSIONID\"]\r\n soup = BeautifulSoup(req.content, features=\"html.parser\")\r\n # Find lt and event id fields\r\n lt = soup.find(\"input\", {\"name\": \"lt\"})[\"value\"]\r\n eventid = soup.find(\"input\", {\"name\": \"execution\"})[\"value\"]\r\n\r\n # Login\r\n data = {\"service\": FENIX_SERVICE_URL,\r\n \"username\": username,\r\n \"password\": password,\r\n \"submit-istid\": \"Entrar\",\r\n \"lt\": lt,\r\n \"execution\": eventid,\r\n \"_eventId\": \"submit\"}\r\n \r\n req = self._session.post(f\"https://id.tecnico.ulisboa.pt/cas/login;{jsessionid}?service={FENIX_SERVICE_URL}\", data=data, allow_redirects=False)\r\n if req.status_code != 200:\r\n raise UndefinedError(\"Something unexpected has happened. Are you on the latest version of this API?\")\r\n else:\r\n if \"error-message\" in req.text: # Login failed\r\n raise LoginError(\"Wrong username or password\")\r\n else:\r\n self._logged_in = True\r\n\r\n # Get rid of the inquiry presented\r\n soup = self.get(\"\")\r\n url = soup.find(\"a\", {\"class\": \"btn btn-default\"})[\"href\"][1:]\r\n self._get(url)\r\n\r\n def get(self, url, prefix=\"https://fenix.tecnico.ulisboa.pt\"):\r\n \"\"\"Returns a BeautifulSoup object with the contents of the desired website.\"\"\"\r\n r = self._get(url, prefix)\r\n soup = BeautifulSoup(r.content, features=\"html.parser\")\r\n return soup\r\n\r\n def _get(self, url, prefix=\"https://fenix.tecnico.ulisboa.pt\"):\r\n if prefix != \"\":\r\n return self._session.get(f\"{prefix}/{url}\")\r\n else:\r\n return self._session.get(url)\r\n\r\n def get_user_image(self, istid, size=100):\r\n \"\"\"Retrieves the profile picture of the specified user.\r\nAuthentication required.\"\"\"\r\n if not self.logged_in:\r\n raise AuthenticationError(\"Authentication required\")\r\n \r\n r = self._get(f\"user/photo/ist{istid}?s={size}\")\r\n if r.content[1:4].decode() == \"PNG\": # Check magic number\r\n return r.content\r\n else:\r\n raise NotFoundError(f\"User ist{istid} not found\")\r\n\r\n def get_course(self):\r\n \"\"\"Returns the user's course\"\"\"\r\n s = self.get(\"student\")\r\n return s.h3.contents[0].strip()\r\n\r\n def get_courses(self):\r\n \"\"\"Returns every course the user is enrolled in\"\"\"\r\n courses = []\r\n s = self.get(\"student\")\r\n tables = s.table\r\n cs = tables.findAll(\"h4\", {\"class\": \"mtop025\"})\r\n for c in cs:\r\n courses.append([c.text.strip(), c.a[\"href\"]])\r\n return courses\r\n\r\n def get_student_name(self, istid):\r\n \"\"\"Returns the name of a student, provided he is enrolled in\\\r\none of the user's courses\"\"\"\r\n courses = self.get_courses()\r\n for course in courses:\r\n students = self.get(course[1]+\"/notas\", \"\")\r\n entries = students.table.findAll(\"tr\")\r\n for entry in entries:\r\n l = entry.findAll(\"td\")\r\n if l is not None:\r\n if len(l) >= 2:\r\n if l[0].text == f\"ist{istid}\":\r\n return l[2].text.strip()\r\n raise NotFoundError(f\"Couldn't find user ist{istid}\")\r\n\r\n def get_tests(self):\r\n url = \"/student/enroll/evaluations\"\r\n soup = self.get(url)\r\n table = soup.find(\"table\", {\"class\": \"evallist\"})\r\n tests = []\r\n tr = table.findAll(\"tr\")\r\n for cell in tr:\r\n type_ = cell.find(\"td\")\r\n if type_ == None:\r\n continue\r\n type_ = type_.getText()\r\n if type_ == \"Exame\" or type_ == \"Teste\":\r\n test = []\r\n c = 0\r\n for td in cell.findAll(\"td\"):\r\n if c == 4:\r\n break\r\n if c != 3:\r\n test.append(td.getText().strip())\r\n else:\r\n date_string = td.getText().strip().split(\" \")[0]\r\n date_list = date_string.split(\"/\")\r\n date_list.reverse()\r\n number = int(''.join(date_list))\r\n test.append(number)\r\n c += 1\r\n tests.append(test)\r\n tests = sorted(tests, key = itemgetter(3))\r\n return tests\r\n \r\n\r\n" }, { "alpha_fraction": 0.8032786846160889, "alphanum_fraction": 0.8032786846160889, "avg_line_length": 29.5, "blob_id": "1b25a92dd4de81fbc3f6058a666f55d59eedcf15", "content_id": "64d340bcbaafc4de5d235612908914099664a02e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "permissive", "max_line_length": 49, "num_lines": 2, "path": "/README.md", "repo_name": "ArmindoFlores/fenixapi", "src_encoding": "UTF-8", "text": "# fenixapi\nAn API to interface with fenix.tecnico.ulisboa.pt\n" }, { "alpha_fraction": 0.5985801815986633, "alphanum_fraction": 0.6027750968933105, "avg_line_length": 32.29999923706055, "blob_id": "2f75201163adf6ffd8733973816910ea7b80da5d", "content_id": "f1d7362c4b343734c34ad2166831d65d1527a372", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3099, "license_type": "permissive", "max_line_length": 137, "num_lines": 90, "path": "/fenix/utils.py", "repo_name": "ArmindoFlores/fenixapi", "src_encoding": "UTF-8", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom .exceptions import *\r\n\r\n__all__ = [\r\n \"exists_user\",\r\n \"get_name\",\r\n \"search_space\",\r\n \"Space\"\r\n ]\r\n\r\nclass Space:\r\n def __init__(self, location, name, description, link):\r\n self._location = location\r\n self._name = name\r\n self._description = description\r\n self._link = link\r\n \r\n @property\r\n def location(self):\r\n return self._location\r\n\r\n @property\r\n def name(self):\r\n return self._name\r\n\r\n @property\r\n def description(self):\r\n return self._description\r\n\r\n @property\r\n def loaded(self):\r\n return self._loaded\r\n\r\n @property\r\n def link(self):\r\n return self._link\r\n \r\n \r\ndef exists_user(istid):\r\n \"\"\"Returns true if the specified user exists.\"\"\"\r\n r = requests.get(f\"https://fenix.tecnico.ulisboa.pt/user/photo/ist{istid}\")\r\n return r.content[1:4].decode() == \"PNG\" # Check magic number\r\n\r\ndef get_name(istid):\r\n \"\"\"Tries to fetch the real name of the user.\"\"\"\r\n if not exists_user(istid):\r\n raise NotFoundError(f\"User ist{istid} does not exist\")\r\n r = requests.get(f\"https://fenix.tecnico.ulisboa.pt/homepage/ist{istid}\")\r\n soup = BeautifulSoup(r.content, features=\"html.parser\")\r\n h2 = soup.find(\"h2\", {\"class\": \"site-header\"})\r\n if h2 is None:\r\n raise NotFoundError(f\"User ist{istid} does not have a public webpage\")\r\n name = soup.find(\"h2\", {\"class\": \"site-header\"}).a\r\n if name is None:\r\n raise NotFoundError(f\"User ist{istid} does not have a public webpage\")\r\n return name.text.replace(chr(183), \"\").strip()\r\n \r\n\r\ndef search_space(space):\r\n r = requests.get(\"https://fenix.tecnico.ulisboa.pt/conteudos-publicos/pesquisa-de-espacos\")\r\n soup = BeautifulSoup(r.content, features=\"html.parser\")\r\n viewstate = soup.find(\"input\", {\"name\": \"pt.ist.fenixWebFramework.renderers.components.state.LifeCycleConstants.VIEWSTATE\"})[\"value\"]\r\n search_type = soup.find(\"label\", {\"class\": \"control-label col-sm-2\"})[\"for\"]\r\n search_label = soup.find(\"input\", {\"class\": \"form-control\", \"type\": \"text\"})[\"id\"]\r\n data = {\"method\": \"search\",\r\n search_type: \"SPACE\",\r\n search_label: space,\r\n \"pt.ist.fenixWebFramework.renderers.components.state.LifeCycleConstants.VIEWSTATE\": viewstate}\r\n\r\n r = requests.post(\"https://fenix.tecnico.ulisboa.pt/publico/findSpaces.do\", data=data)\r\n soup = BeautifulSoup(r.content, features=\"html.parser\")\r\n table = soup.table\r\n if table is None:\r\n return\r\n table = table.tbody\r\n entries = table.findAll(\"tr\")\r\n spaces = []\r\n for entry in entries:\r\n defs = entry.findAll(\"td\")\r\n location = []\r\n for a in defs[0].findAll(\"a\"):\r\n location.append(a.text)\r\n name = a.text\r\n link = \"https://fenix.tecnico.ulisboa.pt\" + a[\"href\"]\r\n location = \" > \".join(location)\r\n classification = defs[1].text.strip()\r\n \r\n spaces.append(Space(location, name, classification, link))\r\n return spaces\r\n \r\n \r\n" } ]
4
yirkkiller/Python
https://github.com/yirkkiller/Python
a05cdaab89c2ca32ca1ceea3b1501781887f36cc
8f503df1a630d9014b2c4a3501278467f8cec482
65e5131e648223120806a471867bed241c8d362f
refs/heads/master
2021-04-15T09:36:54.997134
2018-04-13T12:25:32
2018-04-13T12:25:32
126,456,206
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6153226494789124, "alphanum_fraction": 0.6317732930183411, "avg_line_length": 41.3125, "blob_id": "96e4561e58e11815cd0564e2064f52e34b3802ce", "content_id": "e8c56210fb06a9aef2eee4d08774b4139f941962", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14893, "license_type": "no_license", "max_line_length": 174, "num_lines": 352, "path": "/badgesRepartition-NEW.py", "repo_name": "yirkkiller/Python", "src_encoding": "UTF-8", "text": "import itertools\nfrom collections import Counter\n\nPATH = \"D:\\\\Badges\\\\\"\n\n# Priority of badge effects per class of survivor\ndictPriorities = {\n \"Hunter\" : {\n \"Critical Damage\" : 2,\n \"Damage\" : 3,\n \"Health\" : 1,\n \"Critical Chance\" : 0,\n \"Damage Reduction\" : 0\n },\n \"Assault\" : {\n \"Critical Damage\" : 0,\n \"Damage\" : 0,\n \"Health\" : 3,\n \"Critical Chance\" : 1,\n \"Damage Reduction\" : 2\n },\n \"Shooter\" : {\n \"Critical Damage\" : 2,\n \"Damage\" : 3,\n \"Health\" : 0,\n \"Critical Chance\" : 1,\n \"Damage Reduction\" : 0\n },\n \"Warrior\" : {\n \"Critical Damage\" : 0,\n \"Damage\" : 2,\n \"Health\" : 3,\n \"Critical Chance\" : 0,\n \"Damage Reduction\" : 1\n },\n \"Bruiser\" : {\n \"Critical Damage\" : 0,\n \"Damage\" : 1,\n \"Health\" : 3,\n \"Critical Chance\" : 0,\n \"Damage Reduction\" : 2\n },\n \"Scout\" : {\n \"Critical Damage\" : 2,\n \"Damage\" : 3,\n \"Health\" : 0,\n \"Critical Chance\" : 1,\n \"Damage Reduction\" : 0\n }\n \n }\n\n# Read CSV class\ndef readCSVFile(filename):\n csv_path = str(PATH)+str(filename)+'.csv'\n file_csv = open(csv_path,'r')\n result = file_csv.read().split(\"\\n\")\n file_csv.close()\n return result\n\n# Set the list of badges onto a dictionary\ndef setOnDictBadges(mappingBadges, listOfBadges):\n dictBadges = {}\n for elt in listOfBadges:\n splittedBadge = elt.split(\";\")\n if splittedBadge[0] != \"\":\n dictBadges[splittedBadge[0]] = {\n mappingBadges[1] : splittedBadge[1],\n mappingBadges[2] : splittedBadge[2],\n mappingBadges[3] : splittedBadge[3],\n mappingBadges[4] : splittedBadge[4],\n mappingBadges[5] : splittedBadge[5],\n mappingBadges[6] : splittedBadge[6],\n mappingBadges[7] : splittedBadge[7],\n mappingBadges[8] : splittedBadge[8],\n mappingBadges[9] : splittedBadge[9],\n }\n return dictBadges\n\n# Set the list of survivors and their traits onto a dictionary\ndef setOnDictSurvivors(survivor1, survivor2, survivor3):\n dictSurvivor = {}\n for survivor in [survivor1, survivor2, survivor3]:\n survivor.remove(\"Field;Value\")\n name_survivor = survivor[0].split(\";\")[1]\n for elt in survivor:\n splittedSurvivor = elt.split(\";\")\n try:\n dictSurvivor[name_survivor][splittedSurvivor[0]] = splittedSurvivor[1]\n except:\n if not elt == \"\":\n dictSurvivor[name_survivor] = { splittedSurvivor[0] : splittedSurvivor[1] }\n dictSurvivor[name_survivor].pop(\"Name\")\n return dictSurvivor\n\n# Eliminate badges with priority = 0 (related to GLOBAL dictionary dictPriorities)\ndef eliminateBadgesNotIntersting(dictSurvivor, dictBadges):\n listOfClasses = []\n for elt in dictSurvivor.keys():\n if not dictSurvivor[elt][\"Class\"] in listOfClasses:\n listOfClasses.append(dictSurvivor[elt][\"Class\"])\n \n priorityByClass = {}\n for badge in dictBadges.keys():\n for class_survivor in listOfClasses:\n type_effect = dictBadges[badge][\"Effect\"]\n priority_badge = int(dictPriorities[class_survivor][type_effect])\n if not priority_badge == 0:\n if not class_survivor in priorityByClass.keys():\n priorityByClass[class_survivor] = [badge]\n else:\n if badge not in priorityByClass[class_survivor]:\n priorityByClass[class_survivor].append(badge)\n\n return priorityByClass, listOfClasses\n\n# Affect a list of possible badges per survivor related to his class\ndef createCombosPerSurvivor(priorityByClass, listOfClasses, dictBadges, dictSurvivor):\n badges_survivor1 = []\n badges_survivor2 = []\n badges_survivor3 = []\n \n survivor1 = dictSurvivor.keys()[0]\n class_survivor1 = dictSurvivor[survivor1][\"Class\"]\n survivor2 = dictSurvivor.keys()[1]\n class_survivor2 = dictSurvivor[survivor2][\"Class\"]\n survivor3 = dictSurvivor.keys()[2]\n class_survivor3 = dictSurvivor[survivor3][\"Class\"]\n badges_survivor1 = priorityByClass[class_survivor1]\n badges_survivor2 = priorityByClass[class_survivor2]\n badges_survivor3 = priorityByClass[class_survivor3]\n\n return badges_survivor1, badges_survivor2, badges_survivor3, survivor1, survivor2, survivor3\n\n\ndef getPossibleCombos(survivor, badges_survivor, dictBadges, dictSurvivor):\n # In a 6 badges combination, each badge should have a unique orientation.\n liste_west = []\n liste_south_west = []\n liste_north_west = []\n liste_east = []\n liste_north_east = []\n liste_south_east = []\n \n # Set the badges into a list created by its orientation\n for elt in badges_survivor:\n if dictBadges[elt][\"Orientation\"] == \"W\":\n liste_west.append(elt)\n elif dictBadges[elt][\"Orientation\"] == \"NW\":\n liste_north_west.append(elt)\n elif dictBadges[elt][\"Orientation\"] == \"SW\":\n liste_south_west.append(elt)\n elif dictBadges[elt][\"Orientation\"] == \"E\":\n liste_east.append(elt)\n elif dictBadges[elt][\"Orientation\"] == \"NE\":\n liste_north_east.append(elt)\n elif dictBadges[elt][\"Orientation\"] == \"SE\":\n liste_south_east.append(elt)\n \n # Get the list of all possible combinations with right orientation (one of each orientation)\n listCombinaisons_survivor = list(itertools.product(liste_west, liste_south_west, liste_north_west, liste_east, liste_north_east, liste_south_east))\n \n # Eliminate combinations of badges where more than 3 badges on 6 have the same effect\n newlist_combinations = []\n for elt in listCombinaisons_survivor: \n effects = []\n for idBadge in elt:\n effects.append(dictBadges[idBadge][\"Effect\"])\n cpt = Counter(effects)\n for type_effect in cpt.keys():\n cpt_effect = cpt[type_effect]\n if cpt_effect > 3:\n break\n if not cpt_effect > 3:\n newlist_combinations.append(elt)\n \n \n # Affect a priority to the badge based on the badge rarety (rarer = better)\n prioritySortedCombos = []\n for elt in newlist_combinations:\n rarety = []\n for idBadge in elt:\n rarety.append(dictBadges[idBadge][\"Rarety\"])\n cpt = Counter(rarety)\n priority = 0\n for rarety in cpt.keys():\n priority += int(cpt[rarety])*int(rarety)\n prioritySortedCombos.append((elt,priority))\n \n # Increase the badge priority related to the class of the survivor (related to the GLOBAL dictionary dictPriority)\n priorityCombosByClass = [] \n class_survivor = dictSurvivor[survivor][\"Class\"]\n\n for badgeSetWithPriority in prioritySortedCombos:\n badgeSet = badgeSetWithPriority[0]\n priority = 0\n for idBadge in badgeSet:\n type_effect = dictBadges[idBadge][\"Effect\"]\n priority_badge = int(dictPriorities[class_survivor][type_effect])\n priority += priority_badge\n priority = priority + badgeSetWithPriority[1]\n priorityCombosByClass.append((priority, badgeSet))\n \n # Increase the badge set priority if at least 4 badges have the same letter (20% bonus) \n priorityCombosPerBonus = []\n for badgeSet in priorityCombosByClass:\n letter = []\n for idBadge in badgeSet[1]:\n letter.append(dictBadges[idBadge][\"Letter\"])\n cpt = Counter(letter)\n for letter in cpt.keys():\n if cpt[letter] > 3:\n priority = 10 + badgeSet[0]\n else:\n priority = badgeSet[0]\n priorityCombosPerBonus.append((priority,badgeSet[1]))\n \n return priorityCombosPerBonus\n\n# Eliminate sets of badge with a priority lower than the total average of all sets priority.\ndef eliminateLowAverageSets(badgeSet, numberOfBadgesSets):\n newbadgeSet = badgeSet\n while len(newbadgeSet) > numberOfBadgesSets:\n priorities_survivor = 0\n temp_badgeSet = []\n for elt in newbadgeSet:\n priorities_survivor+= elt[0]\n moyenne_survivor = priorities_survivor/len(newbadgeSet)\n \n for elt in newbadgeSet:\n if elt[0] > moyenne_survivor:\n temp_badgeSet.append(elt)\n newbadgeSet = temp_badgeSet\n \n return newbadgeSet\n \n# Create the combination of 3 badges sets, one for each survivor\n# PRE-REQUISITE : One badge is unique and can only be used by only one survivor at the time\ndef createSetsOfBadgesFor3Survivors(name_survivor1, name_survivor2, name_survivor3, listPossibleCombos_survivor1, listPossibleCombos_survivor2, listPossibleCombos_survivor3):\n list_CombosSurvivors = []\n \n for elt_survivor3 in sorted(listPossibleCombos_survivor3, reverse=True):\n if list_CombosSurvivors != [] :\n break\n set_badge_survivor3 = set(elt_survivor3[1])\n for elt_survivor2 in sorted(listPossibleCombos_survivor2, reverse=True):\n if list_CombosSurvivors != [] :\n break\n set_badge_survivor2 = set(elt_survivor2[1])\n for elt_survivor1 in sorted(listPossibleCombos_survivor1, reverse=True):\n set_badge_survivor1 = set(elt_survivor3[1])\n liste_badges = elt_survivor1[1]+elt_survivor2[1]+elt_survivor3[1]\n set_badges = set(liste_badges)\n if len(liste_badges) == len(set_badges):\n priority_survivor1 = int(elt_survivor1[0])\n priority_survivor2 = int(elt_survivor2[0])\n priority_survivor3 = int(elt_survivor3[0])\n priority = priority_survivor1+priority_survivor2+priority_survivor3\n list_CombosSurvivors.append((priority, ((name_survivor1, elt_survivor1[1]), (name_survivor2, elt_survivor2[1]), (name_survivor3, elt_survivor3[1]))))\n if list_CombosSurvivors != [] :\n break\n return list_CombosSurvivors\n\n# Prepare string to write in the CSV\ndef prepareCSV(listSetsOfBadges, dictBadges):\n setsOfBadges_survivor1 = listSetsOfBadges[0][1][0][1]\n setsOfBadges_survivor2 = listSetsOfBadges[0][1][1][1]\n setsOfBadges_survivor3 = listSetsOfBadges[0][1][2][1]\n name_survivor1 = listSetsOfBadges[0][1][0][0]\n name_survivor2 = listSetsOfBadges[0][1][1][0]\n name_survivor3 = listSetsOfBadges[0][1][2][0]\n \n csvString = \"\"\n firstLine = \"Survivor;BadgeID;Letter;Rarety;Effect;Orientation;Value;%/Value;Bonus;%/Value Bonus;Activation Bonus\\n\"\n csvString += firstLine\n \n for badge in setsOfBadges_survivor1:\n csvString += createLineBadge(badge, dictBadges, name_survivor1)\n for badge in setsOfBadges_survivor2:\n csvString += createLineBadge(badge, dictBadges, name_survivor2)\n for badge in setsOfBadges_survivor3:\n csvString += createLineBadge(badge, dictBadges, name_survivor3)\n \n return csvString\n\ndef createLineBadge(badge, dictBadges, nameSurvivor):\n letter = dictBadges[badge][\"Letter\"]\n rarety = dictBadges[badge][\"Rarety\"]\n effect = dictBadges[badge][\"Effect\"]\n orientation = dictBadges[badge][\"Orientation\"]\n value = dictBadges[badge][\"Value\"]\n type_value = dictBadges[badge][\"%/Value\"]\n bonus = dictBadges[badge][\"Bonus\"]\n type_value_bonus = dictBadges[badge][\"%/Value Bonus\"]\n activation_bonus = dictBadges[badge][\"Activation Bonus\"]\n \n lineString = nameSurvivor+\";\"\n lineString += badge+\";\"\n lineString += letter+\";\"\n lineString += rarety+\";\"\n lineString += effect+\";\"\n lineString += orientation+\";\"\n lineString += value+\";\"\n lineString += type_value+\";\"\n lineString += bonus+\";\"\n lineString += type_value_bonus+\";\"\n lineString += activation_bonus+\"\\n\"\n \n return lineString\n\ndef createCSV(csv_string):\n csv_path = str(PATH)+'output.csv'\n file_csv = open(csv_path,'w')\n file_csv.write(csv_string)\n file_csv.close()\n\n\nif __name__ == \"__main__\":\n # Read CSV Files : list of Badges, Traits of the 3 characters\n listOfBadges = readCSVFile(\"badges\")\n survivor1 = readCSVFile(\"survivor1\")\n survivor2 = readCSVFile(\"survivor2\")\n survivor3 = readCSVFile(\"survivor3\")\n # Retrieve name of the fields from badges CSV-extracted list\n mappingBadges = listOfBadges[0].split(\";\")\n listOfBadges.remove(listOfBadges[0])\n # Format the list of badges into dictionary\n dictBadges = setOnDictBadges(mappingBadges, listOfBadges)\n # Format the survivors and their traits into a dictionary\n dictSurvivor = setOnDictSurvivors(survivor1, survivor2, survivor3)\n # Eliminate the badges not interesting for the survivor, related to the GLOBAL dict dictPriorities \n badgesPerClass, listOfClasses = eliminateBadgesNotIntersting(dictSurvivor, dictBadges)\n # Affect the badges to a survivor\n badges_survivor1, badges_survivor2, badges_survivor3, survivor1, survivor2, survivor3 = createCombosPerSurvivor(badgesPerClass, listOfClasses, dictBadges, dictSurvivor)\n # Process the list of badges to get the possible combinations of badges filtered by several criterias\n listPossibleCombos_survivor1 = getPossibleCombos(survivor1, badges_survivor1, dictBadges, dictSurvivor)\n listPossibleCombos_survivor2 = getPossibleCombos(survivor2, badges_survivor2, dictBadges, dictSurvivor)\n listPossibleCombos_survivor3 = getPossibleCombos(survivor3, badges_survivor3, dictBadges, dictSurvivor)\n # Eliminate the not interesting combinations (priority lower than the average priority)\n numberOfBadgesSets = 560\n listSetsOfBadges = None\n while listSetsOfBadges == None:\n listCombos_survivor1 = eliminateLowAverageSets(listPossibleCombos_survivor1, numberOfBadgesSets)\n listCombos_survivor2 = eliminateLowAverageSets(listPossibleCombos_survivor2, numberOfBadgesSets)\n listCombos_survivor3 = eliminateLowAverageSets(listPossibleCombos_survivor3, numberOfBadgesSets)\n # Get the better combination of badges for each of the 3 survivors\n listSetsOfBadges = createSetsOfBadgesFor3Survivors(survivor1, survivor2, survivor3, listCombos_survivor1, listCombos_survivor2, listCombos_survivor3)\n numberOfBadgesSets += 10\n # Prepare the CSV\n csv_string = prepareCSV(listSetsOfBadges, dictBadges)\n # Write the Output CSV\n createCSV(csv_string)" } ]
1
dalaomai/stuInfoManag
https://github.com/dalaomai/stuInfoManag
e6ff6fb75832f813840b420e186032d6162ee378
40daef68dde31b3c9296ed97d4d18d5e03745aa5
e0b3857c09c9ef6f68db856efe9b05daa3becc16
refs/heads/master
2020-09-03T10:14:28.566667
2019-06-26T03:11:14
2019-06-26T03:11:14
219,442,863
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6623488664627075, "alphanum_fraction": 0.6623488664627075, "avg_line_length": 14.86301326751709, "blob_id": "b48187ba9d59e70da231aa887be69577a97bb8bd", "content_id": "fca6d0cda58faf14b53701431d81037b9c3376fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1158, "license_type": "no_license", "max_line_length": 41, "num_lines": 73, "path": "/triggers.sql", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "DELIMITER $\ncreate trigger verify_insert_data\nbefore insert on course\nfor each row\nBEGIN\n\tif new.name='' then\n set new.name=null;\n\tend if ;\n if new.id='' then\n set new.id=null;\n\tend if ;\n if new.college='' then\n set new.college=null;\n\tend if ;\nend\n$\nDELIMITER ;\n\nDELIMITER $\ncreate trigger verify_update_data\nbefore update on course\nfor each row\nBEGIN\n\tif new.name='' then\n set new.name=null;\n\tend if ;\n if new.id='' then\n set new.id=null;\n\tend if ;\n if new.college='' then\n set new.college=null;\n\tend if ;\nend\n$\nDELIMITER ;\n\nDELIMITER $\ncreate trigger verify_student_insert_data\nbefore insert on student\nfor each row\nBEGIN\n\tif new.name='' then\n set new.name=null;\n\tend if ;\n if new.id='' then\n set new.id=null;\n\tend if ;\n if new._class='' then\n set new._class=null;\n\tend if ;\nend\n$\nDELIMITER ;\n\nDELIMITER $\ncreate trigger verify_student_update_data\nbefore update on student\nfor each row\nBEGIN\n\tif new.name='' then\n set new.name=null;\n\tend if ;\n if new.id='' then\n set new.id=null;\n\tend if ;\n if new._class='' then\n set new._class=null;\n\tend if ;\nend\n$\nDELIMITER ;\n\ndrop trigger verify_update_data\n" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 28.5, "blob_id": "ac2e6aa47f694ac2aeb8dbf660a45acc7b2675f4", "content_id": "d3fbf1aaa6df0e20b2ed1c0a4969145ed6b1fd7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/app/statistic/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nstatistic = Blueprint('statistic',__name__)\nfrom . import views\nfrom ..main import errors" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 27, "blob_id": "c36e5ba35cef229b2c4bd9e8ed47ab7afb1bc7ac", "content_id": "479467ef702a191d4e76e01dbed8178cef5a570c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/app/course/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\ncourse = Blueprint('course',__name__)\nfrom . import views\nfrom ..main import errors" }, { "alpha_fraction": 0.7366459369659424, "alphanum_fraction": 0.7366459369659424, "avg_line_length": 33.23404312133789, "blob_id": "845e2ac1e13f894560bca4d5f3e136f5015f28d3", "content_id": "56e006a4b4c1a2be681f95181fdbb9336938585a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1610, "license_type": "no_license", "max_line_length": 69, "num_lines": 47, "path": "/app/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_moment import Moment\nfrom flask_login import LoginManager\nfrom config import config\n\nbootstrap = Bootstrap()\ndb = SQLAlchemy()\nmoment = Moment()\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'auth.login'\n\n\ndef create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n\n bootstrap.init_app(app)\n db.init_app(app)\n moment.init_app(app)\n login_manager.init_app(app)\n\n from app.main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n from app.auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint,url_prefix='/auth')\n from app.personal import personal as personal_blueprint\n app.register_blueprint(personal_blueprint,url_prefix='/personal')\n from app.course import course \n app.register_blueprint(course,url_prefix='/course')\n from app.student import student \n app.register_blueprint(student,url_prefix='/student')\n from app.source import source \n app.register_blueprint(source,url_prefix='/source')\n from app.teacher import teacher \n app.register_blueprint(teacher,url_prefix='/teacher')\n from app.admin import admin \n app.register_blueprint(admin,url_prefix='/admin')\n from app.aclass import aclass \n app.register_blueprint(aclass,url_prefix='/aclass')\n from app.statistic import statistic \n app.register_blueprint(statistic,url_prefix='/statistic')\n\n app.app_context().push()\n return app\n\n" }, { "alpha_fraction": 0.6754550933837891, "alphanum_fraction": 0.7055869698524475, "avg_line_length": 27.96363639831543, "blob_id": "8bf5853e8bb660bec906b27d308b7889ff0fbb36", "content_id": "e3257b08be1c695e2f4441e50884467349a14f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1593, "license_type": "no_license", "max_line_length": 190, "num_lines": 55, "path": "/config.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nclass Permission:\n PERSONAL_INFO=0b0\n COURSE_INFO=0b1\n STUDENT_INFO=0b10\n SOURCE_INFO=0b100\n TEACHER_INFO=0b1000\n ADMIN_INFO=0b10000\n CLASS_INFO=0b100000\n STATISTIC_INFO=0b1000000\n\nclass RolePermission:\n STUDENT = Permission.PERSONAL_INFO | Permission.COURSE_INFO\n TEACHER = Permission.PERSONAL_INFO | Permission.COURSE_INFO | Permission.STUDENT_INFO | Permission.SOURCE_INFO\n ADMIN = Permission.PERSONAL_INFO | Permission.COURSE_INFO | Permission.STUDENT_INFO | Permission.SOURCE_INFO | Permission.TEACHER_INFO | Permission.CLASS_INFO | Permission.STATISTIC_INFO\n ROOT = ADMIN | Permission.ADMIN_INFO \n\n\n\nclass Config:\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'\n POSTS_PER_PAGE = 10\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n #SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'StuInfoMange.sqlite')\n SQLALCHEMY_DATABASE_URI = \"mysql://root:[email protected]/StuInfoMange\"\n\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \\\n 'sqlite://'\n\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \\\n 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\n\n\nconfig = {\n 'development': DevelopmentConfig,\n 'testing': TestingConfig,\n 'production': ProductionConfig,\n 'default': DevelopmentConfig\n}\n" }, { "alpha_fraction": 0.76106196641922, "alphanum_fraction": 0.76106196641922, "avg_line_length": 27.5, "blob_id": "2b1ef42d949f0f6dba220ac96e1c5b99cf7df88e", "content_id": "30b311156223a02526c565eb73318a93a3c1d145", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 39, "num_lines": 4, "path": "/app/teacher/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nteacher = Blueprint('teacher',__name__)\nfrom . import views\nfrom ..main import errors" }, { "alpha_fraction": 0.7652173638343811, "alphanum_fraction": 0.7652173638343811, "avg_line_length": 28, "blob_id": "67acd2dd4df570a2d4e79e99b3bede61a1c0e18c", "content_id": "77638ac249041359aa332bbe6f6a4a25d2633c21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 41, "num_lines": 4, "path": "/app/personal/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\npersonal = Blueprint('personal',__name__)\nfrom . import views\nfrom ..main import errors" }, { "alpha_fraction": 0.7232635021209717, "alphanum_fraction": 0.7254685759544373, "avg_line_length": 28.29032325744629, "blob_id": "e2eb831ef0a515d2fc7bacceb970c5ef89f12584", "content_id": "b595e6658893017cb3a3e1e3c9854d5f5b31ceab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 907, "license_type": "no_license", "max_line_length": 95, "num_lines": 31, "path": "/flasky.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "import os\nimport click\nfrom flask_migrate import Migrate\nfrom app import create_app, db\nfrom app.models import Teacher,Student,Course,Course_Teach_Stu,Admin\nfrom flask_script import Manager,Shell\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\n\[email protected]_context_processor\ndef make_shell_context():\n return dict(db=db,stu=Student,teach=Teacher,admin=Admin,course=Course,stc=Course_Teach_Stu)\n\[email protected]()\[email protected]('test_names', nargs=-1)\ndef test(test_names):\n \"\"\"Run the unit tests.\"\"\"\n import unittest\n if test_names:\n tests = unittest.TestLoader().loadTestsFromNames(test_names)\n else:\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\nmanager.add_command(\"shell\",Shell(make_context=make_shell_context))\n\nif __name__=='__main__':\n manager.run()" }, { "alpha_fraction": 0.752293586730957, "alphanum_fraction": 0.752293586730957, "avg_line_length": 26.5, "blob_id": "f47a3a77731d8b90cd0b6cf06f329bc7fab7ab9c", "content_id": "9da9fcba5bd14149dee7aabc3892dc1bb3c5dada", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/app/admin/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nadmin = Blueprint('admin',__name__)\nfrom . import views\nfrom ..main import errors" }, { "alpha_fraction": 0.6444622874259949, "alphanum_fraction": 0.6497592329978943, "avg_line_length": 33.236263275146484, "blob_id": "0803ffc059e1acd70aae792cbb9dc257e3bdffb9", "content_id": "ed60f96d333bcebb022aea5c7431c3a7b7e3660f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6244, "license_type": "no_license", "max_line_length": 227, "num_lines": 182, "path": "/app/models.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_login import UserMixin\nfrom app import login_manager\nfrom app import db\nfrom config import RolePermission\nfrom sqlalchemy import and_\n\nfrom app.decorators import permission_required\nfrom config import Permission\n\n\nclass User(UserMixin):\n type_id = []\n type = -1\n\n __tablename__ = 'User'\n _id = db.Column(db.Integer, primary_key=True)\n passwd_hash = db.Column(db.String(128),nullable=False)\n name = db.Column(db.String(64),nullable=False)\n id = db.Column(db.Integer,unique=True,nullable=False) \n permission = db.Column(db.Integer,default=0,nullable=False)\n sex = db.Column(db.Boolean)\n\n def query_user(type_id):\n try:\n if isinstance(type_id,str):\n type_id = eval(type_id)\n\n if not isinstance(type_id,list) or len(type_id)!=2:\n result = None\n if int(type_id[0]) == 0:\n result = Student.query.filter_by(id=int(type_id[1])).first()\n if int(type_id[0]) == 1:\n result = Teacher.query.filter_by(id=int(type_id[1])).first()\n if int(type_id[0]) == 2:\n result = Admin.query.filter_by(id=int(type_id[1])).first()\n if result != None :\n result.type_id = type_id\n result.type = type_id[0]\n except Exception as e:\n print(e)\n return None\n \n return result\n\n def get_id(self):\n return str(self.type_id)\n\n @property\n def passwd(self):\n raise AttributeError('password is not a readable attribute')\n\n @passwd.setter\n def passwd(self, passwd):\n if(len(passwd)<6):\n raise Exception('密码修改失败')\n return 0\n self.passwd_hash = generate_password_hash(passwd)\n\n def verify_passwd(self, passwd):\n return check_password_hash(self.passwd_hash, passwd)\n\n def can(self,permission):\n return (self.permission&permission)==permission\n\n def __repr__(self):\n return '<{} : {}>'.format(self.__tablename__,self.name)\n\n @permission_required(Permission.PERSONAL_INFO)\n def modifyBaseInfo(self,passwd=None):\n if passwd:\n self.passwd = passwd\n db.session.add(self)\n return db.session.commit()\n\n @permission_required(RolePermission.ADMIN)\n def getAllCourse(self):\n result = db.session.query(Course)\n return result\n\n @permission_required(RolePermission.ADMIN)\n def getAllStudent(self):\n result = db.session.query(Student,_class).filter(Student._class==_class._id)\n return result\n\n @permission_required(RolePermission.ADMIN)\n def getAllTeacher(self):\n result = db.session.query(Teacher)\n return result\n\n @permission_required(RolePermission.ADMIN)\n def getAllClass(self):\n result = db.session.query(_class)\n return result\n\n\n @permission_required(RolePermission.ROOT)\n def getAllAdmin(self):\n result = db.session.query(Admin)\n return result\n\n def getCoursesInfo(self):\n return db.session.query(Student,Teacher,Course,Course_Teach_Stu,_class).filter(and_(Student.id == Course_Teach_Stu.stu,Teacher.id == Course_Teach_Stu.teach,Course.id==Course_Teach_Stu.course,_class._id==Student._class))\n \nclass Student(User,db.Model):\n __tablename__ = 'student'\n\n permission = db.Column(db.Integer,default=RolePermission.STUDENT,nullable=False)\n _class = db.Column(db.Integer,db.ForeignKey('_class._id'),default=0,nullable=False)\n courses = db.relationship(\"Course_Teach_Stu\",backref='student')\n\n @permission_required(RolePermission.STUDENT)\n def modifyBaseInfo(self,passwd=None):\n if passwd:\n self.passwd = passwd\n db.session.add(self)\n return db.session.commit()\n\n @permission_required(RolePermission.STUDENT)\n def getCoursesInfo(self):\n result = super().getCoursesInfo().filter(Student.id==self.id)\n return result\n \nclass Teacher(User,db.Model):\n __tablename__ = 'teacher'\n permission = db.Column(db.Integer,default=RolePermission.TEACHER,nullable=False)\n courses = db.relationship(\"Course_Teach_Stu\",backref='teacher')\n\n @permission_required(RolePermission.TEACHER)\n def modifyBaseInfo(self,passwd=None):\n if passwd:\n self.passwd = passwd\n db.session.add(self)\n return db.session.commit()\n\n @permission_required(RolePermission.TEACHER)\n def getCoursesInfo(self):\n result = super().getCoursesInfo().filter(Teacher.id==self.id)\n return result\n\nclass Admin(User,db.Model):\n permission = db.Column(db.Integer,default=RolePermission.ADMIN,nullable=False)\n __tablename__ = 'admin'\n\n @permission_required(RolePermission.ADMIN)\n def modifyBaseInfo(self,passwd=None):\n if passwd:\n self.passwd = passwd\n db.session.add(self)\n return db.session.commit()\n\n @permission_required(RolePermission.ADMIN)\n def getCoursesInfo(self):\n result = super().getCoursesInfo()\n return result\n\nclass Course(db.Model):\n __tablename__ = 'course'\n _id = db.Column(db.Integer, primary_key=True)\n id = db.Column(db.String(64),unique=True,nullable=False)\n name = db.Column(db.String(64),nullable=False)\n college = db.Column(db.String(64),nullable=False)\n courses = db.relationship(\"Course_Teach_Stu\",backref='cour')\n\nclass _class(db.Model):\n __tablename__ = '_class'\n _id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64),nullable=False,unique=True)\n students = db.relationship(\"Student\",backref='aclass')\n \nclass Course_Teach_Stu(db.Model):\n __tablename__ = 'course_teach_stu'\n _id = db.Column(db.Integer, primary_key=True)\n stu = db.Column(db.Integer,db.ForeignKey('student.id'),nullable=False)\n teach = db.Column(db.Integer,db.ForeignKey('teacher.id'),nullable=False)\n course = db.Column(db.String(64),db.ForeignKey('course.id'),nullable=False)\n source = db.Column(db.Integer,nullable=True)\n semester = db.Column(db.String(64),nullable=False)\n \n@login_manager.user_loader\ndef load_user(type_id):\n return User.query_user(type_id)" }, { "alpha_fraction": 0.6047156453132629, "alphanum_fraction": 0.6074895858764648, "avg_line_length": 33.90322494506836, "blob_id": "58a31a7330357fea2d884da26dc945294d762562", "content_id": "7b7cef28c0ddd95b6ca86489626a1104f405e88c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2283, "license_type": "no_license", "max_line_length": 103, "num_lines": 62, "path": "/app/personal/forms.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import IntegerField,StringField,PasswordField,SubmitField,SelectField,BooleanField,widgets\nfrom wtforms.validators import Required,Length,EqualTo\n\nclass StuForm(FlaskForm):\n stype = StringField(\"角色\",render_kw={'readonly':'readonly'})\n id = StringField(\"学号\",render_kw={'readonly':'readonly'})\n aclass = StringField(\"班级\",render_kw={'readonly':'readonly'})\n sex = StringField(\"性别\",render_kw={'readonly':'readonly'})\n\n passwd = PasswordField(\"Password\")\n passwd2 = PasswordField(\"Confirm Password\",validators=[EqualTo('passwd',message='密码不一致')])\n\n submit = SubmitField(\"修改信息\")\n \n def __init__(self,stu):\n super().__init__()\n self.stype.data = \"学生\"\n self.id.data= stu.id\n self.aclass.data = stu.aclass.name\n if stu.sex == 0:\n self.sex.data = '男'\n if stu.sex:\n self.sex.data = '女'\n \nclass TeachForm(FlaskForm):\n stype = StringField(\"角色\",render_kw={'readonly':'readonly'})\n id = StringField(\"工号\",render_kw={'readonly':'readonly'},)\n sex = StringField(\"性别\",render_kw={'readonly':'readonly'})\n\n passwd = PasswordField(\"Password\")\n passwd2 = PasswordField(\"Confirm Password\",validators=[EqualTo('passwd',message='密码不一致')])\n\n submit = SubmitField(\"修改信息\")\n \n def __init__(self,user):\n super().__init__()\n self.stype.data = \"老师\"\n self.id.data= user.id\n if user.sex == 0:\n self.sex.data = '男'\n if user.sex:\n self.sex.data = '女'\n\nclass AdminForm(FlaskForm):\n stype = StringField(\"角色\",render_kw={'readonly':'readonly'})\n id = StringField(\"工号\",render_kw={'readonly':'readonly'},)\n sex = StringField(\"性别\",render_kw={'readonly':'readonly'})\n\n passwd = PasswordField(\"Password\")\n passwd2 = PasswordField(\"Confirm Password\",validators=[EqualTo('passwd',message='密码不一致')])\n\n submit = SubmitField(\"修改信息\")\n \n def __init__(self,user):\n super().__init__()\n self.stype.data = \"管理员\"\n self.id.data= user.id\n if user.sex == 0:\n self.sex.data = '男'\n if user.sex:\n self.sex.data = '女'" }, { "alpha_fraction": 0.6792559027671814, "alphanum_fraction": 0.6854565739631653, "avg_line_length": 40.22093200683594, "blob_id": "64720a278aa0d015e86a517691c1b25c23aa8c37", "content_id": "5de89c78566e55afd2b872ffdc4bda293cd8d2d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3620, "license_type": "no_license", "max_line_length": 204, "num_lines": 86, "path": "/app/statistic/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from . import statistic\n\nfrom flask import render_template,flash,redirect,url_for,request\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom config import Config\nfrom app.decorators import permission_required\nfrom config import Permission,RolePermission\nimport json\nfrom sqlalchemy import desc,asc\nfrom app.models import Student,Teacher,Course,Course_Teach_Stu,Admin\nfrom app import db\n\[email protected]('/student')\n@login_required\n@permission_required(Permission.STATISTIC_INFO)\ndef studentStatistic():\n return render_template('statistic/index.html',mainUrl='mainStudentData')\n\[email protected]('/mainStudentData')\n@login_required\n@permission_required(Permission.STATISTIC_INFO)\ndef mainStudentData():\n data = {'dataUrl':'studentDataForAdmin','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n if current_user.type == 2:\n data['dataTitles'] = ['学号','姓名','班级','学期','平均分']\n data['dataFieldes'] = ['StudentId','StudentName','ClassName','Semester','GAvg']\n return json.dumps(data)\n\[email protected]('/studentDataForAdmin')\n@login_required\n@permission_required(RolePermission.ADMIN)\ndef studentDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','StudentId')\n sortOrder = request.args.get('sortOrder','asc')\n \n selectResult = db.session.execute('select * from stu_semes order by ' + sort + ' ' + sortOrder + ' limit ' + str(rows*(page-1)) + ',' + str(rows))\n\n datas = []\n oldItem = []\n for item in selectResult :\n\n temp = {'StudentId':item[0],'StudentName':item[1],'ClassName':item[2],'Semester':item[3],'GAvg':str(item[4])}\n datas.append(temp)\n\n datas = {'total':next(db.session.execute('select count(*) from stu_semes'))[0],'rows':datas}\n return str(json.dumps(datas)) \n\n\[email protected]('/class')\n@login_required\n@permission_required(Permission.STATISTIC_INFO)\ndef classStatistic():\n return render_template('statistic/index.html',mainUrl='mainClassData')\n\[email protected]('/mainClassData')\n@login_required\n@permission_required(Permission.STATISTIC_INFO)\ndef mainClassData():\n data = {'dataUrl':'classDataForAdmin','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n if current_user.type == 2:\n data['dataTitles'] = ['班级ID','班级','学期','课程名','平均分','最高分','最低分','及格人数','及格率(%)']\n data['dataFieldes'] = ['ClassId','ClassName','Semester','CourseName','GAvg','GMax','GMin','PassNumber','PassRate']\n return json.dumps(data)\n\[email protected]('/classDataForAdmin')\n@login_required\n@permission_required(RolePermission.ADMIN)\ndef classDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','ClassId')\n sortOrder = request.args.get('sortOrder','asc')\n \n selectResult = db.session.execute('select * from class_semes order by ' + sort + ' ' + sortOrder + ' limit ' + str(rows*(page-1)) + ',' + str(rows))\n\n datas = []\n oldItem = []\n for item in selectResult :\n\n temp = {'ClassId':item[0],'ClassName':item[1],'Semester':item[2],'CourseName':item[3],'GAvg':str(item[4]),'GMax':str(item[5]),'GMin':str(item[6]),'PassNumber':str(item[7]),'PassRate':str(item[8])}\n datas.append(temp)\n\n datas = {'total':next(db.session.execute('select count(*) from class_semes'))[0],'rows':datas}\n return str(json.dumps(datas)) " }, { "alpha_fraction": 0.6972677707672119, "alphanum_fraction": 0.7005464434623718, "avg_line_length": 32.85185241699219, "blob_id": "e35a9bb805e82e7b967c65a12c80eade13937603", "content_id": "6be7f59978456d59e95739beef996acf6cb80faa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "no_license", "max_line_length": 74, "num_lines": 27, "path": "/app/personal/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from . import personal\nfrom .forms import StuForm,TeachForm,AdminForm\nfrom flask import render_template,flash,redirect,url_for\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom app.decorators import permission_required\nfrom config import Permission\nfrom app.models import Student,Teacher,Admin\n\[email protected]('/index',methods=['GET','POST'])\n@login_required\n@permission_required(Permission.PERSONAL_INFO)\ndef index():\n if current_user.type == 0:\n form = StuForm(current_user)\n elif current_user.type == 1:\n form = TeachForm(current_user)\n elif current_user.type == 2:\n form = AdminForm(current_user)\n\n if form.validate_on_submit():\n result = current_user.modifyBaseInfo(form.passwd.data)\n if result == None:\n flash(\"修改成功\")\n else:\n flash(\"修改失败\")\n\n return render_template('personal/index.html',form=form)\n\n" }, { "alpha_fraction": 0.6622007489204407, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 39.50251388549805, "blob_id": "c044171239247a33c674013832e452f083c08513", "content_id": "b6d82f0fcbe7afc4045c46422fb27db6529c4321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8189, "license_type": "no_license", "max_line_length": 305, "num_lines": 199, "path": "/app/source/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from . import source\n\nfrom flask import render_template,flash,redirect,url_for,request\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom config import Config\nfrom app.decorators import permission_required\nfrom config import Permission,RolePermission\nimport json\nfrom sqlalchemy import desc,asc,and_\nfrom app.models import Student,Teacher,Course,Course_Teach_Stu,_class\nfrom app import db\n\[email protected]('/index')\n@login_required\n@permission_required(Permission.SOURCE_INFO)\ndef index():\n\n return render_template('source/index.html',mainUrl='mainData')\n\[email protected]('/mainData')\n@login_required\n@permission_required(Permission.SOURCE_INFO)\ndef mainData():\n data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n\n if current_user.type == 1:\n data['operateUrls'] = {'addUrl':'','editUrl':'editSource','delUrl':''}\n data['dataTitles'] = ['Id','姓名','学号','性别','班级','班级ID','课程名','课程ID','开课学期','成绩']\n data['dataFieldes'] = ['Id','StudentName','StudentId','Sex','ClassName','ClassId','CourseName','CourseId','Semester','Source']\n data['editFieldes'] = ['Source']\n if current_user.type == 2:\n data['operateUrls'] = {'addUrl':'addSource','editUrl':'editSource','delUrl':'delSource'}\n data['dataTitles'] = ['Id','姓名','学号','性别','班级','班级ID','老师','老师工号','课程名','课程ID','开课学期','成绩']\n data['dataFieldes'] = ['Id','StudentName','StudentId','Sex','ClassName','ClassId','TeacherName','TeacherId','CourseName','CourseId','Semester','Source']\n data['addFieldes'] = ['StudentId','TeacherId','CourseId','Semester','Source']\n data['editFieldes'] = ['StudentId','TeacherId','CourseId','Semester','Source']\n\n return json.dumps(data)\n\[email protected]('/data')\n@login_required\n@permission_required(Permission.SOURCE_INFO)\ndef data():\n\n if current_user.type == 1:\n return getDataForTeacher()\n if current_user.type == 2:\n return getDataForAdmin()\n\n return None\n\n@permission_required(RolePermission.TEACHER)\ndef getDataForTeacher():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','StudentName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getCoursesInfo()\n\n targetDict = {'StudentName':Student.name,'StudentId':Student.id,'ClassId':_class._id,'CourseName':Course.name,'CourseId':Course.id,'Source':Course_Teach_Stu.source,'Semester':Course_Teach_Stu.semester,'ClassName':_class.name,'Id':Course_Teach_Stu._id}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'StudentName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'StudentName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n\n datas = []\n for item in pagination.items :\n\n temp = {'StudentName':item[0].name,'StudentId':item[0].id,'ClassId':item[4]._id,'CourseName':item[2].name,'CourseId':item[2].id,'Source':item[3].source,'Semester':item[3].semester,'ClassName':item[4].name,'Id':item[3]._id}\n datas.append(temp)\n\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\n@permission_required(RolePermission.ADMIN)\ndef getDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','Id')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getCoursesInfo()\n\n targetDict = {'StudentName':Student.name,'StudentId':Student.id,'ClassId':_class._id,'CourseName':Course.name,'CourseId':Course.id,'Source':Course_Teach_Stu.source,'Id':Course_Teach_Stu._id,'TeacherId':Teacher.id,'TeacherName':Teacher.name,'Semester':Course_Teach_Stu.semester,'ClassName':_class.name}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'name')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'name')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n\n datas = []\n for item in pagination.items :\n\n temp = {'StudentName':item[0].name,'StudentId':item[0].id,'ClassId':item[4]._id,'CourseName':item[2].name,'CourseId':item[2].id,'Source':item[3].source,'Id':item[3]._id,'TeacherId':item[1].id,'TeacherName':item[1].name,'Semester':item[3].semester,'ClassName':item[4].name}\n datas.append(temp)\n\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\[email protected]('/editSource',methods=['POST'])\n@login_required \n@permission_required(RolePermission.TEACHER)\ndef editSource():\n if(current_user.type==1):\n return editSourceForTeacher()\n if(current_user.type==2):\n return editSourceForAdmin()\n\n@permission_required(RolePermission.TEACHER)\ndef editSourceForTeacher():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n\n course_teach_stu = db.session.query(Course_Teach_Stu).filter(and_(Course_Teach_Stu._id==id,Course_Teach_Stu.teach==current_user.id)).first()\n\n course_teach_stu.source = request.form.get('Source',course_teach_stu.source)\n \n db.session.add(course_teach_stu)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '修改失败'\n print(e)\n return str(json.dumps(result)) \n\n@permission_required(RolePermission.ADMIN)\ndef editSourceForAdmin():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n course_teach_stu = db.session.query(Course_Teach_Stu).filter(Course_Teach_Stu._id==id).first()\n course_teach_stu.source = request.form.get('Source',course_teach_stu.source)\n course_teach_stu.stu = request.form.get('StudentId',course_teach_stu.stu)\n course_teach_stu.teach = request.form.get('TeacherId',course_teach_stu.teach)\n course_teach_stu.course = request.form.get('CourseId',course_teach_stu.course)\n course_teach_stu.semester = request.form.get('Semester',course_teach_stu.semester)\n \n db.session.add(course_teach_stu)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '修改失败'\n print(e)\n return str(json.dumps(result)) \n\[email protected]('/addSource',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef addSource():\n result={'code':1,'result':'success'}\n try:\n course_teach_stu = Course_Teach_Stu()\n\n course_teach_stu.stu = request.form.get('StudentId',course_teach_stu.stu)\n course_teach_stu.teach = request.form.get('TeacherId',course_teach_stu.teach)\n course_teach_stu.course = request.form.get('CourseId',course_teach_stu.course)\n course_teach_stu.source = request.form.get('Source',course_teach_stu.source)\n course_teach_stu.semester = request.form.get('Semester',course_teach_stu.semester)\n\n if(course_teach_stu.source==''):\n course_teach_stu.source=None\n\n\n \n db.session.add(course_teach_stu)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '添加失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/delSource',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef delSource():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n course_teach_stu = db.session.query(Course_Teach_Stu).filter(Course_Teach_Stu._id==id).first()\n db.session.delete(course_teach_stu)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '删除失败'\n print(e)\n return str(json.dumps(result))\n\ndef str_to_bool(str):\n if str.lower() == 'true':\n return True\n if str.lower() == 'false':\n return False\n return None\n\n" }, { "alpha_fraction": 0.6513460278511047, "alphanum_fraction": 0.6533158421516418, "avg_line_length": 31.628570556640625, "blob_id": "80a87a8799c7e5649fb6832e8c460ee3d2116e85", "content_id": "bde6c04120291e9fcc4cb3e2e9c6598a7eaef9a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4609, "license_type": "no_license", "max_line_length": 112, "num_lines": 140, "path": "/app/teacher/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from . import teacher\n\nfrom flask import render_template,flash,redirect,url_for,request\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom config import Config\nfrom app.decorators import permission_required\nfrom config import Permission,RolePermission\nimport json\nfrom sqlalchemy import desc,asc\nfrom app.models import Student,Teacher,Course,Course_Teach_Stu\nfrom app import db\n\[email protected]('/index')\n@login_required\n@permission_required(Permission.TEACHER_INFO)\ndef index():\n\n return render_template('teacher/index.html',mainUrl='mainData')\n\[email protected]('/mainData')\n@login_required\n@permission_required(Permission.TEACHER_INFO)\ndef mainData():\n data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n\n if current_user.type == 2:\n data['operateUrls'] = {'addUrl':'addTeacher','editUrl':'editTeacher','delUrl':'delTeacher'}\n data['dataTitles'] = ['Id','姓名','工号','性别','密码']\n data['dataFieldes'] = ['Id','TeacherName','TeacherId','Sex','Passwd']\n data['addFieldes'] = ['TeacherName','TeacherId','Sex','Passwd']\n data['editFieldes'] = ['TeacherName','TeacherId','Sex','Passwd']\n\n return json.dumps(data)\n\[email protected]('/data')\n@login_required\n@permission_required(Permission.TEACHER_INFO)\ndef data():\n\n if current_user.type == 2:\n return getDataForAdmin()\n\n return None\n\n\n@permission_required(RolePermission.ADMIN)\ndef getDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','TeacherName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getAllTeacher()\n\n targetDict = {'TeacherName':Teacher.name,'TeacherId':Teacher.id,'Sex':Teacher.sex,'Id':Teacher._id}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'TeacherName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'TeacherName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n\n datas = []\n for item in pagination.items :\n\n temp = {'TeacherName':item.name,'TeacherId':item.id,'Sex':item.sex,'Id':item._id,'Passwd':''}\n datas.append(temp)\n\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\[email protected]('/editTeacher',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef editTeacher():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n teacher = db.session.query(Teacher).filter(Teacher._id==id).first()\n\n teacher.id = request.form.get('TeacherId',teacher.id)\n teacher.name = request.form.get('TeacherName',teacher.name)\n teacher.sex = str_to_bool(request.form.get('Sex',teacher.sex))\n\n if(request.form.get('Passwd','')!=''):\n teacher.passwd = request.form.get('Passwd')\n\n\n db.session.add(teacher)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '修改失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/addTeacher',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef addTeacher():\n result={'code':1,'result':'success'}\n try:\n teacher = Teacher()\n teacher.id = request.form.get('TeacherId',teacher.id)\n teacher.name = request.form.get('TeacherName',teacher.name)\n teacher.sex = str_to_bool(request.form.get('Sex',teacher.sex))\n\n if(request.form.get('Passwd','')!=''):\n teacher.passwd = request.form.get('Passwd')\n \n db.session.add(teacher)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '添加失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/delTeacher',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef delTeacher():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n teacher = db.session.query(Teacher).filter(Teacher._id==id).first()\n db.session.delete(teacher)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '删除失败'\n print(e)\n return str(json.dumps(result))\n\ndef str_to_bool(str):\n if str.lower() == 'true':\n return True\n if str.lower() == 'false':\n return False\n return None\n\n" }, { "alpha_fraction": 0.645659327507019, "alphanum_fraction": 0.6479372382164001, "avg_line_length": 28.477611541748047, "blob_id": "f677110dcaa6be98d7d4fea875801d31d60ec502", "content_id": "c4cffc1f0bfd9416c76e244f91eb6e107aaec631", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3979, "license_type": "no_license", "max_line_length": 112, "num_lines": 134, "path": "/app/aclass/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from . import aclass\n\nfrom flask import render_template,flash,redirect,url_for,request\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom config import Config\nfrom app.decorators import permission_required\nfrom config import Permission,RolePermission\nimport json\nfrom sqlalchemy import desc,asc\nfrom app.models import Student,Teacher,Course,Course_Teach_Stu,_class\nfrom app import db\n\[email protected]('/index')\n@login_required\n@permission_required(Permission.CLASS_INFO)\ndef index():\n\n return render_template('class/index.html',mainUrl='mainData')\n\[email protected]('/mainData')\n@login_required\n@permission_required(Permission.CLASS_INFO)\ndef mainData():\n data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n\n if current_user.type == 2:\n data['operateUrls'] = {'addUrl':'addClass','editUrl':'editClass','delUrl':'delClass'}\n data['dataTitles'] = ['Id','班级']\n data['dataFieldes'] = ['Id','ClassName']\n data['addFieldes'] = ['ClassName']\n data['editFieldes'] = ['ClassName']\n\n return json.dumps(data)\n\[email protected]('/data')\n@login_required\n@permission_required(Permission.CLASS_INFO)\ndef data():\n\n if current_user.type == 2:\n return getDataForAdmin()\n\n return None\n\n\n@permission_required(RolePermission.ADMIN)\ndef getDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','ClassName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getAllClass()\n\n targetDict = {'ClassName':_class.name,'Id':_class._id}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'ClassName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'ClassName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n\n datas = []\n for item in pagination.items :\n\n temp = {'ClassName':item.name,'Id':item._id}\n datas.append(temp)\n\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\[email protected]('/editClass',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef editClass():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n aclass = db.session.query(_class).filter(_class._id==id).first()\n\n aclass.name = request.form.get('ClassName',aclass.name)\n\n\n\n db.session.add(aclass)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '修改失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/addClass',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef addClass():\n result={'code':1,'result':'success'}\n try:\n aclass = _class()\n aclass.name = request.form.get('ClassName',aclass.name)\n\n if(request.form.get('Passwd','')!=''):\n teacher.passwd = request.form.get('Passwd')\n \n db.session.add(aclass)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '添加失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/delClass',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef delClass():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n alcass = db.session.query(_class).filter(_class._id==id).first()\n db.session.delete(alcass)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '删除失败'\n print(e)\n return str(json.dumps(result))\n\ndef str_to_bool(str):\n if str.lower() == 'true':\n return True\n if str.lower() == 'false':\n return False\n return None\n\n" }, { "alpha_fraction": 0.6555445194244385, "alphanum_fraction": 0.6596620678901672, "avg_line_length": 37.07567596435547, "blob_id": "e66424e2be7fff33936c0b431ec1d52b4898c70e", "content_id": "9b78fdc5a7f47a5ddad7b94ff3891f9e58b8f704", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7143, "license_type": "no_license", "max_line_length": 159, "num_lines": 185, "path": "/app/course/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from app.course import course\n\nfrom flask import render_template,flash,redirect,url_for,request\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom config import Config\nfrom app.decorators import permission_required\nfrom config import Permission,RolePermission\nimport json\nfrom sqlalchemy import desc,asc\nfrom app.models import Student,Teacher,Course,Course_Teach_Stu,_class\nfrom app import db\n\[email protected]('/index')\n@login_required\n@permission_required(Permission.COURSE_INFO)\ndef index():\n\n return render_template('course/index.html',mainUrl='mainData')\n\[email protected]('/data')\n@login_required\n@permission_required(Permission.COURSE_INFO)\ndef data():\n if current_user.type == 0:\n return getDataForStudent()\n if current_user.type == 1:\n return getDataForTeacher()\n if current_user.type == 2:\n return getDataForAdmin()\n\n return None\n\[email protected]('/mainData')\n@login_required\n@permission_required(Permission.COURSE_INFO)\ndef mainData():\n data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n if current_user.type == 0:\n data['dataTitles'] = ['课程名','课程号','开课学院','学期','成绩']\n data['dataFieldes'] = ['CourseName','CourseId','College','Semester','Source']\n if current_user.type == 1:\n data['dataTitles'] = ['课程名','课程号','开课学院','学期','班级']\n data['dataFieldes'] = ['CourseName','CourseId','College','Semester','ClassName']\n if current_user.type == 2:\n data['operateUrls'] = {'addUrl':'addCourse','editUrl':'editCourse','delUrl':'delCourse'}\n data['dataTitles'] = ['Id','课程名','课程号','开课学院']\n data['dataFieldes'] = ['Id','CourseName','CourseId','College']\n data['addFieldes'] = ['CourseName','CourseId','College']\n data['editFieldes'] = ['CourseName','CourseId','College']\n\n return json.dumps(data)\n\[email protected]('/delCourse',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef delCourse():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n course = db.session.query(Course).filter(Course._id==id).first()\n db.session.delete(course)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '删除失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/editCourse',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef editCourse():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n course = db.session.query(Course).filter(Course._id==id).first()\n course.id = request.form.get('CourseId',course.id)\n course.name = request.form.get('CourseName',course.name)\n course.college = request.form.get('College',course.college)\n\n\n db.session.add(course)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '修改失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/addCourse',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef addCourse():\n result={'code':1,'result':'success'}\n try:\n course = Course()\n course.id = request.form.get('CourseId')\n course.name = request.form.get('CourseName')\n course.college = request.form.get('College')\n\n db.session.add(course)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '添加失败'\n print(e)\n return str(json.dumps(result))\n\n@permission_required(RolePermission.STUDENT)\ndef getDataForStudent():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','CourseName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getCoursesInfo()\n\n targetDict = {'CourseName':Course.name,'CourseId':Course.id,'College':Course.college,'Semester':Course_Teach_Stu.semester,'Source':Course_Teach_Stu.source}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'CourseName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'CourseName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n datas = []\n oldItem = []\n for item in pagination.items :\n if oldItem != item:\n temp = {'CourseName':item[2].name,'CourseId':item[2].id,'College':item[2].college,'Semester':item[3].semester,'Source':item[3].source}\n datas.append(temp)\n oldItem = item\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\n@permission_required(RolePermission.TEACHER)\ndef getDataForTeacher():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','CourseName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getCoursesInfo()\n\n targetDict = {'CourseName':Course.name,'CourseId':Course.id,'College':Course.college,'Semester':Course_Teach_Stu.semester,'ClassName':_class.name}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'CourseName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'CourseName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n datas = []\n oldItem = []\n for item in pagination.items :\n if oldItem==[] or (oldItem[4].name != item[4].name and oldItem[2].name != item[2].name):\n temp = {'CourseName':item[2].name,'CourseId':item[2].id,'College':item[2].college,'Semester':item[3].semester,'ClassName':item[4].name}\n datas.append(temp)\n oldItem = item\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\n@permission_required(RolePermission.ADMIN)\ndef getDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','CourseName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getAllCourse()\n\n targetDict = {'CourseName':Course.name,'CourseId':Course.id,'College':Course.college,'Id':Course._id}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'CourseName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'CourseName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n datas = []\n oldItem = []\n for item in pagination.items :\n if oldItem != item:\n temp = {'CourseName':item.name,'CourseId':item.id,'College':item.college,'Id':item._id}\n datas.append(temp)\n oldItem = item\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))" }, { "alpha_fraction": 0.649330198764801, "alphanum_fraction": 0.6532703042030334, "avg_line_length": 35.04545593261719, "blob_id": "ced7076ca2e470e62c38364c3e206a8e3fa39c3a", "content_id": "393e9f858a890f9a279ffe7b8b57939e864e8225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6419, "license_type": "no_license", "max_line_length": 163, "num_lines": 176, "path": "/app/student/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from . import student\n\nfrom flask import render_template,flash,redirect,url_for,request\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom config import Config\nfrom app.decorators import permission_required\nfrom config import Permission,RolePermission\nimport json\nfrom sqlalchemy import desc,asc\nfrom app.models import Student,Teacher,Course,Course_Teach_Stu,_class\nfrom app import db\n\[email protected]('/index')\n@login_required\n@permission_required(Permission.STUDENT_INFO)\ndef index():\n\n return render_template('student/index.html',mainUrl='mainData')\n\[email protected]('/mainData')\n@login_required\n@permission_required(Permission.STUDENT_INFO)\ndef mainData():\n data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n if current_user.type == 0:\n return getDataForStudent()\n if current_user.type == 1:\n data['dataTitles'] = ['姓名','学号','性别','班级','课程名','学期']\n data['dataFieldes'] = ['StudentName','StudentId','Sex','ClassName','CourseName','Semester']\n if current_user.type == 2:\n data['operateUrls'] = {'addUrl':'addStudent','editUrl':'editStudent','delUrl':'delStudent'}\n data['dataTitles'] = ['Id','姓名','学号','性别','班级','班级ID','密码']\n data['dataFieldes'] = ['Id','StudentName','StudentId','Sex','ClassName','ClassId','Passwd']\n data['addFieldes'] = ['StudentName','StudentId','Sex','ClassId','Passwd']\n data['editFieldes'] = ['StudentName','StudentId','Sex','ClassId','Passwd']\n\n return json.dumps(data)\n\[email protected]('/data')\n@login_required\n@permission_required(Permission.STUDENT_INFO)\ndef data():\n\n if current_user.type == 1:\n return getDataForTeacher()\n if current_user.type == 2:\n return getDataForAdmin()\n\n return None\n\n@permission_required(RolePermission.TEACHER)\ndef getDataForTeacher():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','StudentName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getCoursesInfo()\n\n targetDict = {'StudentName':Student.name,'StudentId':Student.id,'Sex':Student.sex,'ClassName':_class.name,'CourseName':Course.name,'Semester':Course.semester}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'StudentName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'StudentName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n datas = []\n oldItem = []\n for item in pagination.items :\n temp = {'StudentName':item[0].name,'StudentId':item[0].id,'Sex':item[0].sex,'ClassName':item[4].name,'CourseName':item[2].name,'Semester':item[2].semester}\n datas.append(temp)\n\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\n@permission_required(RolePermission.ADMIN)\ndef getDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','name')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getAllStudent()\n\n targetDict = {'StudentName':Student.name,'StudentId':Student.id,'Sex':Student.sex,'Id':Student._id,'ClassId':_class._id,'ClassName':_class.name}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'name')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'name')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n\n datas = []\n for item in pagination.items :\n\n temp = {'StudentName':item[0].name,'StudentId':item[0].id,'Sex':item[0].sex,'Id':item[0]._id,'ClassId':item[1]._id,'ClassName':item[1].name,'Passwd':''}\n datas.append(temp)\n\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\[email protected]('/editStudent',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef editStudent():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n student = db.session.query(Student).filter(Student._id==id).first()\n\n student.id = request.form.get('StudentId',student.id)\n student.name = request.form.get('StudentName',student.name)\n student.sex = str_to_bool(request.form.get('Sex',student.sex))\n student._class = request.form.get('ClassId',student._class)\n\n\n if(request.form.get('Passwd','')!=''):\n student.passwd = request.form.get('Passwd')\n\n\n db.session.add(student)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '修改失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/addStudent',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef addStudent():\n result={'code':1,'result':'success'}\n try:\n student = Student()\n\n student.id = request.form.get('StudentId',student.id)\n student.name = request.form.get('StudentName',student.name)\n student.sex = str_to_bool(request.form.get('Sex',student.sex))\n\n\n if(request.form.get('ClassId','')!=''):\n student._class = int(request.form.get('ClassId'))\n if(request.form.get('Passwd','')!=''):\n student.passwd = request.form.get('Passwd')\n \n db.session.add(student)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '添加失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/delStudent',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ADMIN)\ndef delStudent():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n student = db.session.query(Student).filter(Student._id==id).first()\n db.session.delete(student)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '删除失败'\n print(e)\n return str(json.dumps(result))\n\ndef str_to_bool(str):\n if str.lower() == 'true':\n return True\n if str.lower() == 'false':\n return False\n return None\n\n" }, { "alpha_fraction": 0.76106196641922, "alphanum_fraction": 0.76106196641922, "avg_line_length": 27.5, "blob_id": "7faf498a66668522e7209121869ef41b27ba1622", "content_id": "28474a275307c1a683bd3bb8df37c03a5c6d03fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 39, "num_lines": 4, "path": "/app/student/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nstudent = Blueprint('student',__name__)\nfrom . import views\nfrom ..main import errors" }, { "alpha_fraction": 0.633253276348114, "alphanum_fraction": 0.6356542706489563, "avg_line_length": 31.660131454467773, "blob_id": "514a26ee5342a68d94929ab1689bfe1452fe826d", "content_id": "8ff4c818878f440cf719c71daccbfb90462b2035", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5056, "license_type": "no_license", "max_line_length": 126, "num_lines": 153, "path": "/app/admin/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from . import admin\n\nfrom flask import render_template,flash,redirect,url_for,request\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom config import Config\nfrom app.decorators import permission_required\nfrom config import Permission,RolePermission\nimport json\nfrom sqlalchemy import desc,asc\nfrom app.models import Student,Teacher,Course,Course_Teach_Stu,Admin\nfrom app import db\n\[email protected]('/index')\n@login_required\n@permission_required(Permission.ADMIN_INFO)\ndef index():\n\n return render_template('admin/index.html',mainUrl='mainData')\n\[email protected]('/mainData')\n@login_required\n@permission_required(Permission.ADMIN_INFO)\ndef mainData():\n data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}\n if current_user.type == 0:\n return getDataForStudent()\n if current_user.type == 1:\n return getDataForTeacher()\n if current_user.type == 2:\n data['operateUrls'] = {'addUrl':'addAdmin','editUrl':'editAdmin','delUrl':'delAdmin'}\n data['dataTitles'] = ['Id','姓名','工号','性别','权限','密码']\n data['dataFieldes'] = ['Id','AdminName','AdminId','Sex','Permission','Passwd']\n data['addFieldes'] = ['AdminName','AdminId','Sex','Passwd']\n data['editFieldes'] = ['AdminName','AdminId','Sex','Passwd']\n\n return json.dumps(data)\n\[email protected]('/data')\n@login_required\n@permission_required(Permission.ADMIN_INFO)\ndef data():\n\n if current_user.type == 2:\n return getDataForAdmin()\n\n return None\n\n\n@permission_required(RolePermission.ROOT)\ndef getDataForAdmin():\n page = request.args.get('page',1,type=int)\n rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)\n sort = request.args.get('sort','AdminName')\n sortOrder = request.args.get('sortOrder','asc')\n queryResult = current_user.getAllAdmin()\n\n targetDict = {'AdminName':Admin.name,'AdminId':Admin.id,'Sex':Admin.sex,'Id':Admin._id,'Permission':Admin.permission}\n if sortOrder=='asc':\n queryResult = queryResult.order_by(asc(targetDict.get(sort,'AdminName')))\n else:\n queryResult = queryResult.order_by(desc(targetDict.get(sort,'AdminName')))\n \n\n pagination = queryResult.paginate(page,per_page=rows,error_out=False)\n\n datas = []\n for item in pagination.items :\n\n temp = {'AdminName':item.name,'AdminId':item.id,'Sex':item.sex,'Id':item._id,'Passwd':'','Permission':item.permission}\n datas.append(temp)\n\n datas = {'total':pagination.total,'rows':datas}\n return str(json.dumps(datas))\n\[email protected]('/editAdmin',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ROOT)\ndef editAdmin():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n admin = db.session.query(Admin).filter(Admin._id==id).first()\n\n admin.id = request.form.get('AdminName',admin.id)\n admin.name = request.form.get('Name',admin.name)\n admin.sex = str_to_bool(request.form.get('Sex',admin.sex))\n\n\n if(request.form.get('Passwd','')!=''):\n admin.passwd = request.form.get('Passwd')\n if(request.form.get('Permission','')!=''):\n admin.permission = request.form.get('Permission')\n\n\n db.session.add(admin)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '修改失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/addAdmin',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ROOT)\ndef addAdmin():\n result={'code':1,'result':'success'}\n try:\n admin = Admin()\n admin.id = request.form.get('AdminId',admin.id)\n admin.name = request.form.get('AdminName',admin.name)\n admin.sex = str_to_bool(request.form.get('Sex',admin.sex))\n\n if(request.form.get('Passwd','')!=''):\n admin.passwd = request.form.get('Passwd')\n if(request.form.get('Permission','')!=''):\n admin.passwd = request.form.get('Permission')\n \n db.session.add(admin)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '添加失败'\n print(e)\n return str(json.dumps(result))\n\[email protected]('/delAdmin',methods=['POST'])\n@login_required \n@permission_required(RolePermission.ROOT)\ndef delAdmin():\n result={'code':1,'result':'success'}\n try:\n id = request.form.get('Id',None)\n if(id==current_user._id):\n result['code'] = 0\n result['result'] = '不能把自己删了'\n return result\n\n admin = db.session.query(Admin).filter(Admin._id==id).first()\n db.session.delete(admin)\n db.session.commit()\n except Exception as e:\n result['code'] = 0\n result['result'] = '删除失败'\n print(e)\n return str(json.dumps(result))\n\ndef str_to_bool(str):\n if str.lower() == 'true':\n return True\n if str.lower() == 'false':\n return False\n return None\n\n" }, { "alpha_fraction": 0.7851372957229614, "alphanum_fraction": 0.7940226197242737, "avg_line_length": 60.849998474121094, "blob_id": "770d7327c2d522c6b6dd9f11b415d68ce3fb5836", "content_id": "fe63509f09230fdfaf12a5d656f347b982866ae6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 224, "num_lines": 20, "path": "/views.sql", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "create view c_t_s(StudentId,StudentName,CourseId,CourseName,TeacherId,TeacherName,Semester,ClassId,ClassName,Source)\nas\nselect student.id,student.name,course.id,course.name,teacher.id,teacher.name,course_teach_stu.semester,_class._id,_class.Name,course_teach_stu.source\nfrom _class,course,teacher,student,course_teach_stu\nwhere course.id = course_teach_stu.course and teacher.id = course_teach_stu.teach and student.id = course_teach_stu.stu and student._class = _class._id;\n\n\ncreate view stu_semes(StudentId,StudentName,ClassName,Semester,GAvg)\nas\nselect StudentId,StudentName,ClassName,Semester,Avg(Source) \nfrom c_t_s \ngroup by Semester,StudentId;\n\ncreate view class_semes(ClassId,ClassName,Semester,CourseName,GAvg,GMax,GMin,PassNumber,PassRate)\nas\nselect ClassId,c_t_s.ClassName,Semester,c_t_s.CourseName,Avg(Source) as GAvg,MAX(Source) as GMax,MIN(Source) as GMin,PassNumber,PassRate\nfrom c_t_s,\n(select ClassName,c_t_s.CourseName,sum(case when source >=60 then 1 else 0 end) as PassNumber,(100*(sum(case when source >=60 then 1 else 0 end)/count(*))) as PassRate from c_t_s group by Semester,CourseName,ClassName) as a\nwhere a.ClassName = c_t_s.ClassName and a.CourseName=c_t_s.CourseName\ngroup by Semester,CourseName,ClassName;\n\n" }, { "alpha_fraction": 0.7244898080825806, "alphanum_fraction": 0.7244898080825806, "avg_line_length": 20, "blob_id": "354622a8f12e578b9d8873246bd4af25b414153f", "content_id": "11bba7c8942e19a72a3fc8f92f2770c334e252c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 294, "license_type": "no_license", "max_line_length": 74, "num_lines": 14, "path": "/app/main/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import render_template, session, redirect, url_for, current_app\nfrom flask_login import current_user,login_required\nfrom app import db\n\n\n\nfrom app.main import main\n\n\[email protected]('/', methods=['GET', 'POST'])\n@login_required\ndef index():\n \n return render_template('index.html')\n" }, { "alpha_fraction": 0.681664764881134, "alphanum_fraction": 0.681664764881134, "avg_line_length": 31.925926208496094, "blob_id": "b68feb25a6996363f7ebfaf4f9e2a36ccf6bfb73", "content_id": "30ec026eb4726c4687565951ef098741b76b37aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "no_license", "max_line_length": 74, "num_lines": 27, "path": "/app/auth/views.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import render_template,flash,redirect,url_for\nfrom flask_login import login_user,current_user,login_required,logout_user\nfrom . import auth\nfrom app.auth.forms import LoginForm\nfrom app.models import User\n\[email protected]('/login',methods=['GET','POST'])\ndef login():\n form = LoginForm()\n #进入登陆页面\n if not form.validate_on_submit():\n return render_template('auth/login.html',form = form)\n #登陆\n user = User.query_user([form.type.data,form.id.data])\n if user is not None and user.verify_passwd(form.passwd.data):\n login_user(user,form.remember.data)\n return redirect(url_for(\"main.index\"))\n else:\n flash(\"登陆失败\",'error')\n return render_template('auth/login.html',form = form)\n\[email protected]('/logout',methods=['GET','POST'])\n@login_required\ndef logout():\n logout_user()\n flash(\"已退出登陆!\")\n return redirect(url_for(\"auth.login\"))\n" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 27, "blob_id": "0d2719ff61e5187659dba76a0e64b99ee11d5cf9", "content_id": "8016da5e34d397094e66bca93b6e086fcf4ea550", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/app/aclass/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\naclass = Blueprint('aclass',__name__)\nfrom . import views\nfrom ..main import errors" }, { "alpha_fraction": 0.7350427508354187, "alphanum_fraction": 0.7414529919624329, "avg_line_length": 45.900001525878906, "blob_id": "fad26cc1304d42fbd850b91dac04ffcac3e2eab0", "content_id": "8e90c0ca80b8c58903c96b28a0274544fabee8de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "no_license", "max_line_length": 95, "num_lines": 10, "path": "/app/auth/forms.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import IntegerField,StringField,PasswordField,SubmitField,SelectField,BooleanField\nfrom wtforms.validators import Required,Length\n\nclass LoginForm(FlaskForm):\n id = StringField(\"ID\",validators=[Required()])\n passwd = PasswordField(\"Password\",validators=[Required()])\n type = SelectField(\"角色\",choices=[(0,\"学生\"),(1,'老师'),(2,'管理员')],coerce=int)\n remember = BooleanField(\"记住登陆\")\n submit = SubmitField(\"Login in\")" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 27, "blob_id": "aefb3c05422d8aa69531cadfb83e454d76e10f1e", "content_id": "2dfb92dea2391b61a5a33488b4f2aea97458e829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/app/source/__init__.py", "repo_name": "dalaomai/stuInfoManag", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nsource = Blueprint('source',__name__)\nfrom . import views\nfrom ..main import errors" } ]
26
zahrabaghkhani/Nearest-Neighbor-Distribution-Function
https://github.com/zahrabaghkhani/Nearest-Neighbor-Distribution-Function
f5d2d49cf4a444c30ca8f90d32afb47c16c416f0
a28b9be0006ee9e0a0f804020651c56ae1005c53
072fd60c279320affb55555fe43c4ee870bd8525
refs/heads/main
2023-03-30T10:49:32.698669
2021-03-23T09:13:15
2021-03-23T09:13:15
334,134,108
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5341291427612305, "alphanum_fraction": 0.5684114098548889, "avg_line_length": 30.326732635498047, "blob_id": "9cfc3b8a3d01e84f3bc50748c4e4821c45530fdd", "content_id": "62fc69c8cd5f6a7b9372233bd348aa26fc846bfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3267, "license_type": "no_license", "max_line_length": 108, "num_lines": 101, "path": "/nnd_final.py", "repo_name": "zahrabaghkhani/Nearest-Neighbor-Distribution-Function", "src_encoding": "UTF-8", "text": "\r\n\"\"\"\r\n\r\nCreated on Sat Dec 5 10:11:58 2020\r\n\r\n@author: zahra\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n#parameters\r\nm_min=10\r\ndata_name1 = \"z0m5e11.csv\"\r\n\r\n\r\n\r\n\r\ndata_name_split =data_name1.split('_')\r\nname = data_name_split[0]\r\nhalo_finder = data_name_split[1]\r\nredshift = data_name_split[2]\r\nL_box =int(data_name_split[3][2:-1])\r\nif (halo_finder=='FOF'):\r\n m='mass'\r\nif (halo_finder=='Rockstar'):\r\n m='Mvir'\r\n\r\n#load data\r\n#data_name = data_name1+'.csv';\r\ndata_name = './Simulations_data/'+data_name1+'.csv';\r\ndata = pd.read_csv(data_name)\r\ndata['log_mass'] = np.log10(data[m])\r\ndata = data[data['log_mass']>m_min]\r\ndata = data[[m,'x','y','z']].values\r\n\r\nnum_den = len(data)/(L_box**3)\r\nr_star = (3/(4*np.pi*num_den))**(1/3)\r\n\r\nradius = 10*r_star\r\n\r\nprint('loading data is finished!')\r\nnnd = np.full((len(data),),-1).reshape(-1,1)\r\nmass_nnd = np.full((len(data),),-1).reshape(-1,1)\r\ndata = np.hstack((data,nnd,mass_nnd))\r\ndata_r = np.random.uniform(0,L_box,(len(data),3))\r\ndata_r = np.hstack((data_r,nnd,mass_nnd))\r\nprint('Preparing array is finished!')\r\nfor i in range(len(data)):\r\n condition_x = (data[:,1]<data[i,1]+radius)&(data[:,1]>data[i,1]-radius)\r\n condition_y= (data[:,2]<data[i,2]+radius)&(data[:,2]>data[i,2]-radius)\r\n condition_z= (data[:,3]<data[i,3]+radius)&(data[:,3]>data[i,3]-radius)\r\n \r\n condition_xr = (data[:,1]<data_r[i,0]+radius)&(data[:,1]>data_r[i,0]-radius)\r\n condition_yr= (data[:,2]<data_r[i,1]+radius)&(data[:,2]>data_r[i,1]-radius)\r\n condition_zr= (data[:,3]<data_r[i,2]+radius)&(data[:,3]>data_r[i,2]-radius)\r\n \r\n \r\n a= ((data[condition_x&condition_y&condition_z][:,1]- data[i,1])**2 +\r\n (data[condition_x&condition_y&condition_z][:,2]- data[i,2])**2+ \r\n (data[condition_x&condition_y&condition_z][:,3]- data[i,3])**2)**(0.5)\r\n \r\n try:\r\n data[i,4] = np.amin(a[a!=0])\r\n data[i,5] = data[condition_x&condition_y&condition_z][:,0][np.argwhere(a==np.amin(a[a!=0]))[0,0]]\r\n \r\n except ValueError: \r\n pass\r\n \r\n \r\n a_r = ((data[condition_xr&condition_yr&condition_zr][:,1]- data_r[i,0])**2 +\r\n (data[condition_xr&condition_yr&condition_zr][:,2]- data_r[i,1])**2+ \r\n (data[condition_xr&condition_yr&condition_zr][:,3]- data_r[i,2])**2)**(0.5)\r\n \r\n \r\n \r\n \r\n try:\r\n data_r[i,3] = np.amin(a_r)\r\n data_r[i,4] = data[condition_xr&condition_yr&condition_zr][:,0][np.argwhere(a_r==np.amin(a_r))[0,0]]\r\n \r\n except ValueError: \r\n pass\r\n if(np.mod(i,10**(np.floor(np.log10(len(data)))-1))==0):\r\n print(len(data),i,(i/len(data))*100,'% is finished!')\r\n \r\n \r\n \r\n \r\n #saving data\r\naddress = './'+'sim['+data_name1+']_m_min['+str(m_min)+']/';\r\n\r\nname_nnd = address+'NND'+'_'+name+'_'+halo_finder+'_'+redshift+'_sample'+str([m_min])+'.txt'\r\nname_nnd_r = address+'NND_R'+'_'+name+'_'+halo_finder+'_'+redshift+'_sample'+str([m_min])+'.txt'\r\ncolumn_names_nnd = ['mass','x','y','z','nnd','mass[nn]']\r\ncolumn_names_nnd_r= ['x[random]','y[random]','z[random]','nnd','mass[nn]']\r\nnp.savetxt(name_nnd,data,header=','.join(column_names_nnd),comments='')\r\nnp.savetxt(name_nnd_r,data_r,header=','.join(column_names_nnd_r),comments='')\r\n\r\n\r\n\r\n'''end'''\r\n" } ]
1
kodingwithkelly/OptimalFlowPlaylist
https://github.com/kodingwithkelly/OptimalFlowPlaylist
c88e0382ff2c674afc8acfb24ca02e354180eade
09dd2cc5e7d4e294492eb8fbd795dc2492184575
ea120d5962615e9854afbc05f4618ec207874ed4
refs/heads/main
2023-04-13T18:37:14.836916
2021-05-05T06:39:09
2021-05-05T06:39:09
334,337,342
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.565168559551239, "alphanum_fraction": 0.5685393214225769, "avg_line_length": 29.70689582824707, "blob_id": "9b32fb4ec33206fd550d050e0bee0655a756e6f0", "content_id": "cf304adc8eb565ed381f764b9d54ba74cd49dab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1780, "license_type": "no_license", "max_line_length": 206, "num_lines": 58, "path": "/optimal_python_script.py", "repo_name": "kodingwithkelly/OptimalFlowPlaylist", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nimport requests\nfrom secrets import access_token, user_id\n\nclass CreatePlaylist:\n\n def __init__(self):\n self.user_id = user_id\n self.access_token = access_token\n self.csv = '/Users/kellylam/optimal_playlist.csv'\n\n\n def create_playlist(self):\n request_body = json.dumps({\n 'name': 'Optimal Playlist',\n 'description': 'Python script that converts CSV file of reordered songs of \"Top Songs 2020\" into a Spotify playlist. Songs have been reordered by energy, tempo, valence, loudness, danceability',\n 'public': True\n })\n query = 'https://api.spotify.com/v1/users/{}/playlists'.format(self.user_id)\n response = requests.post(\n query,\n data = request_body,\n headers = {\n 'Content-Type':'application/json',\n 'Authorization':'Bearer {}'.format(self.access_token)\n }\n )\n response_json = response.json()\n\n # playlist id\n return response_json['id']\n\n\n def add_to_playlist(self):\n df = pd.read_csv(self.csv)\n \n # Create new playlist\n playlist_id = self.create_playlist()\n\n # Populate playlist\n request_data = json.dumps(df.uri.tolist())\n query = 'https://api.spotify.com/v1/playlists/{}/tracks'.format(playlist_id)\n response = requests.post(\n query,\n data = request_data,\n headers = {\n 'Content-Type':'application/json',\n 'Authorization':'Bearer {}'.format(self.access_token)\n }\n )\n response_json = response.json()\n return response_json\n\n\nif __name__ == '__main__':\n cp = CreatePlaylist()\n cp.add_to_playlist()" }, { "alpha_fraction": 0.7625492215156555, "alphanum_fraction": 0.7839567065238953, "avg_line_length": 53.9054069519043, "blob_id": "ee924782a58664686092bf8f68b3b00a28cda36f", "content_id": "b1bd02ddc8bea7e21509b964cd4c67e1d882d46c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4064, "license_type": "no_license", "max_line_length": 389, "num_lines": 74, "path": "/README.md", "repo_name": "kodingwithkelly/OptimalFlowPlaylist", "src_encoding": "UTF-8", "text": "# Spotify Optimal Musical Flow Reordering: Project Overview \n* Extracted 100 tracks from \"Top Songs 2020\" playlist, each song's audio features, artist ids, and genre of each artist and exported to .csv to do feature analysis. \n* Wrangled streaming history dataset and audio feature dataset using Python.\n* Explored over 7,000 rows of streaming history to see what days and what time of day I stream the most and to see how Spotify orders their \"Top Songs 2020\" playlist. \n* Analyzed audio features of \"Top Songs 2020\" to see distribution and correlation of each to subsequently apply to our criteria of reodering songs.\n\n## Conclusion\nStreamlit app: https://streamlit-optimal-playlist.herokuapp.com/\n\n\nI found that I tend to like happier songs through analyzing valence and mode. By creating correlation graphs, I discovered that energy is highly correlated with other features. Furthermore, my own analysis shows an affinity for streaming music on Wednesdays at Noon.\n\nMost importantly, this analysis has made my life easier by reordering songs to my preference and saved me time from manually locating each song to relocate. I believe that this analysis can be used in conjunction with Spotify's own \"Wrapped\". This solution to reordering songs can also be adapted for users to order by their own preference, not just by what has been analyzed in this EDA. \n\n## Code and Resources Used \n**Packages:** base64, requests, spotipy, json, pandas, numpy, seaborn, matplotlib.pyplot, sklearn (preprocessing)\n\n**Data Mining Video:** https://www.youtube.com/watch?v=xdq6Gz33khQ&t=4345s\n\n**Playlist Automation:** https://github.com/TheComeUpCode/SpotifyGeneratePlaylist/blob/master/create_playlist.py\n\n## Data Mining\nMined audio features of 100 of my top songs of 2020. \n\nFeatures:\n* energy\n* liveness\n* tempo\n* speechiness\n* acousticness\n* instrumentalness\n* time signature\n* danceability \n* key\n* duration (miliseconds)\n* loudness\n* valence\n* mode\n* type\n* uri\n\nAs well as Artist ID from the playlist and Genre from Artist ID.\n\n## Data Cleaning\nAfter data mining and discovering the data I had, I made the following changes:\n* Top Songs 2020\n * Removed the genre columns following the first\n * Removed time_signature, duration, and type\n * Replaced NaN values of genre to the mode\n * Mapped keys number into real letter keys\n * Added track name and artist to .csv file with audio features\n * Separated track name and artist into different cells in excel\n * Removed \"{,}\" from the cells after separating in excel\n* Streaming History\n * Created new column for Seconds Played called sPlayed in Excel\n * Removed msPlayed \n * Filtered endTime column so that we only have dates in 2020 and songs played 30 seconds or more\n * Created a weekdays column and hour time column that shows what hour AM/PM that I streamed\n * Remapped hour data after seeing it did not correctly represent the hours I streamed\n \n## EDA\nBelow are a few graphs from my EDA.\n![alt text](https://github.com/kodingwithkelly/OptimalFlowPlaylist/blob/main/Read%20me%20pngs/Correlation%20of%20Features.png \"Correlation of Features\")\n\n![alt text](https://github.com/kodingwithkelly/OptimalFlowPlaylist/blob/main/Read%20me%20pngs/Correlation%20Between%20Energy%20%26%20Valence.png \"Correlation Between Energy and Valence\")\n\n![alt text](https://github.com/kodingwithkelly/OptimalFlowPlaylist/blob/main/Read%20me%20pngs/Radar%20Chart.png \"Radar Chart\")\n\n![alt text](https://github.com/kodingwithkelly/OptimalFlowPlaylist/blob/main/Read%20me%20pngs/Barchart%20of%20Streaming%20Hour.png \"Barchart of Streaming Hour\")\n\n\n## Future Work\n* Revisit to add another map that details when specific songs were streamed and create functions to map instead of having a cell for each map\n* May 2021, I have revisited to have another go at creating a dashboard. I believe that the first try had an unusual size, lack of story, cluttered, and did not have interactive functionability. My updated dashboard fixes some of my issues, though I believe it can still be improved. I will possibly revisit as practice. \n" } ]
2
ashish8318/chatapp
https://github.com/ashish8318/chatapp
fe07f59dcec6cbe842be6f006af46dc812490133
631a07c14e5a71f57ab6f231d0d77d3fdf493908
db2a072f42d473597611c1ec983efc853b54e63b
refs/heads/master
2023-01-16T07:01:23.870982
2020-11-30T09:42:36
2020-11-30T09:42:36
315,703,064
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6392189860343933, "alphanum_fraction": 0.646010160446167, "avg_line_length": 29.710525512695312, "blob_id": "fc1530ff4c0ed07da5d7ef453f5bb30f6ac3675d", "content_id": "2deb70eaafc4a56c4e4336fd3b5ddc39a54f1d7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1178, "license_type": "no_license", "max_line_length": 62, "num_lines": 38, "path": "/chat/models.py", "repo_name": "ashish8318/chatapp", "src_encoding": "UTF-8", "text": "from flask_sqlalchemy import SQLAlchemy\nfrom flask_login import UserMixin\n\ndb = SQLAlchemy()\n\nclass User(UserMixin,db.Model):\n \"\"\"User model\"\"\"\n __tablename__ = 'user'\n id=db.Column(db.Integer,primary_key=True)\n username=db.Column(db.String(25),nullable=False)\n email=db.Column(db.String(30),unique=True, nullable=False)\n password=db.Column(db.String(), nullable=False)\n room=db.Column(db.String(25), nullable=True)\n image_name=db.Column(db.String(), nullable=True)\n\n def __init__(self, username, email, password):\n self.username = username\n self.email = email\n self.password = password\n\n def __repr__(self):\n return f\"<{self.username}>\"\n\nclass Friend(db.Model):\n \"\"\"User Friend\"\"\"\n __tablename__ = 'friend'\n id=db.Column(db.Integer,primary_key=True)\n send=db.Column(db.Integer,nullable=False)\n receive=db.Column(db.Integer,nullable=False)\n room_name=db.Column(db.String(25),nullable=False)\n\n def __init__(self, send,receive,room_name):\n self.send=send \n self.receive=receive \n self.room_name=room_name\n\n def __repr__(self):\n return f\"<{self.room_name}>\" " }, { "alpha_fraction": 0.7059865593910217, "alphanum_fraction": 0.7148423790931702, "avg_line_length": 49.375, "blob_id": "4148995135c76e6a0d7b1f0f009b4fe1433316b4", "content_id": "436a96617bf52cffd758e80f9912be22061b60e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2823, "license_type": "no_license", "max_line_length": 197, "num_lines": 56, "path": "/chat/forms.py", "repo_name": "ashish8318/chatapp", "src_encoding": "UTF-8", "text": "from flask_wtf import Form \nfrom wtforms import TextField,StringField, SubmitField ,PasswordField\nfrom flask_wtf.file import FileField\nfrom wtforms.validators import InputRequired, Length,ValidationError,Email\nfrom passlib.hash import pbkdf2_sha256\nfrom models import User\nimport os\n\ndef invalid_credentials(form,field):\n \"\"\"Username and password Chaker\"\"\"\n email_enter=form.email.data\n password_enter=field.data\n user_object=User.query.filter_by(email=email_enter).first()\n if user_object is None:\n raise ValidationError(\"Email is not match our database\")\n elif not pbkdf2_sha256.verify(password_enter,user_object.password ):\n raise ValidationError(\"Please enter correct password\")\n \n\nclass RegisterForm(Form): \n name = StringField(\"Name\",validators=[InputRequired(\"Please enter your name.\"),Length(min=4,max=10,message=\"User name must be above 4 and maximum 10 character\")]) \n email = StringField(\"Email\",validators=[InputRequired(\"Please enter your email address.\"),Email(\"Please Fill Correct email\")]) \n password=PasswordField(\"Password\", validators=[InputRequired(\"Plrase Enter Password\"),Length(min=4,max=8,message=\"Password must be minimum 4 and maximum 8 character\")])\n Register = SubmitField(\"Register\") \n\n def validate_email(self,email):\n if User.query.filter_by(email=email.data).first():\n raise ValidationError(\"User already exists please enter another user\")\n\nclass LoginForm(Form):\n email=StringField(\"Email\",validators=[InputRequired(\"Please enter your email address\"),Email(\"Please fill correct Email address\")])\n password=PasswordField(\"Password\",validators=[InputRequired(\"Please enter your password\"),Length(min=4,max=8,message=\"Password must be minimum 4 and maximum 8 character\"), invalid_credentials])\n Login = SubmitField(\"Login\")\n\nclass CreateRoom(Form):\n room=StringField(\"Room\",validators=[InputRequired(\"Please enter room name\"),Length(min=3,message=\"Room name must be above 3 character\")]) \n Submit = SubmitField(\"Submit\") \n\n # def check_Unique_Room(self,room):\n # if User.query.filter_by(room=room.data).first():\n # raise ValidationError(\"Room name already exists please enter another room\")\n\nclass UploadFile(Form):\n file = FileField('File',validators=[InputRequired(\"Please upload file\")])\n Upload = SubmitField('Upload') \n\n def validate_file(self,file):\n uploaded_file = file.data \n if uploaded_file.filename != '':\n filename = uploaded_file.filename\n file_ext = os.path.splitext(filename)[1]\n print(file_ext)\n if file_ext not in ['.jpg', '.png','.JPG','.PNG']:\n raise ValidationError(\"Please chosse jpg or png format image\")\n else:\n raise ValidationError(\"please not upload without name file\") " }, { "alpha_fraction": 0.47749197483062744, "alphanum_fraction": 0.4823151230812073, "avg_line_length": 35.80473327636719, "blob_id": "e77bd2801313b07259fe4193d245f5941b4a79d3", "content_id": "2d02a2c132262bb941980248e741e26fa2ed44a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 6220, "license_type": "no_license", "max_line_length": 188, "num_lines": 169, "path": "/chat/templates/dashbord.html", "repo_name": "ashish8318/chatapp", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% block title %}Login{% endblock %}\n{% block body %}\n<div uk-sticky=\"sel-target: .uk-navbar-container; cls-active: uk-navbar-sticky\"> \n <nav class=\"uk-navbar-container\" uk-navbar>\n\n <div class=\"uk-navbar-left uk-margin-left\">\n <a class=\"uk-navbar-item uk-logo\" href=\"#\"><span><i class=\"fab fa-rocketchat\" style=\"color: Tomato; margin-right:10px;\"></i></span> Chattapp </a>\n </div>\n\n <div class=\"uk-navbar-right uk-margin-right\">\n\n <ul class=\"uk-navbar-nav\">\n <li>\n <a href=\"{{url_for('chat')}}\">Chat</a>\n </li>\n <li ><a href=\"#\">Welcome : {% if current_user.is_authenticated %} {{ current_user.username }}{% endif %}</a></li>\n <li>\n <a href=\"{{url_for('logout')}}\">Logout</a>\n </li>\n </ul>\n\n </div>\n\n</nav>\n</div>\n\n{% block messageshow %}\n\n {% with messages = get_flashed_messages(with_categories=true) %}\n {% if messages %}\n {% for message in messages %}\n <div class=\"uk-alert-{{message[0]}}\" uk-alert>\n <a class=\"uk-alert-close\" uk-close></a>\n <p class=\"uk-text-center\" >{{message[1]}}.</p>\n </div>\n {% endfor %}\n {% endif %}\n {% endwith %}\n{% endblock %}\n\n<div class=\"uk-child-width-1-1@m uk-margin-large-top uk-margin-medium-left\" uk-grid>\n <div>\n <div uk-grid>\n <div class=\"uk-width-auto@m\">\n <ul class=\"uk-tab-left\" uk-tab=\"connect: #component-tab-left; animation: uk-animation-fade\">\n <li><a href=\"#\">Create Room</a></li>\n <li><a href=\"#\">Upload Image</a></li>\n <li><a href=\"#\">Send Room</a></li>\n <li><a href=\"#\"> My Freind</a></li>\n </ul>\n </div>\n <div class=\"uk-width-expand@m\">\n <ul id=\"component-tab-left\" class=\"uk-switcher\">\n <li>\n\n<form class=\"uk-form-stacked\" action=\"{{url_for('dashbord')}}\" method=\"POST\">\n {{ form1.hidden_tag() }}\n <div class=\"uk-margin\">\n <label class=\"uk-form-label\" for=\"form-stacked-text\">Room Name</label>\n <div class=\"uk-form-controls\">\n {{ form1.room(class_=\"uk-input uk-form-width-medium\" ,id_=\"form-stacked-text\" , placeholder=\"Enter Room name...\",autofocus=true) }} \n {% if form1.room.errors %}\n {% for error in form1.room.errors %}\n <div class=\"uk-alert-danger\" uk-alert>\n <a class=\"uk-alert-close\" uk-close></a>\n <p>{{error}}</p>\n </div>\n {% endfor %}\n {% endif %}\n </div>\n </div>\n\n <div class=\"uk-margin\">\n <div class=\"uk-form-controls\">\n {{form1.Submit(class_=\"uk-button uk-button-primary\")}}\n </div>\n </div>\n</form>\n\n\n</li>\n <li>\n <form method=\"POST\" action=\"{{url_for('dashbord')}}\" enctype=\"multipart/form-data\">\n {{ form2.hidden_tag() }}\n <div class=\"uk-margin\" uk-margin>\n <div uk-form-custom=\"target: true\">\n {{ form2.file() }}\n <input class=\"uk-input uk-form-width-medium\" type=\"text\" placeholder=\"Select file\" disabled>\n </div>\n {% if form2.file.errors %}\n {% for error in form2.file.errors %}\n <div class=\"uk-alert-danger\" uk-alert>\n <a class=\"uk-alert-close\" uk-close></a>\n <p>{{error}}</p>\n </div>\n {% endfor %}\n {% endif %}\n {{form2.Upload(class_=\"uk-button uk-button-primary\")}}\n </div>\n\n</form>\n</li>\n\n<li>\n\n <div class=\"uk-overflow-auto\">\n <table class=\"uk-table uk-table-hover uk-table-middle uk-table-divider\">\n <thead>\n <tr>\n <th class=\"uk-table-shrink\"><b>Image</b></th>\n <th class=\"uk-table-shrink\"><b>Name</b></th>\n <th class=\"uk-table-shrink\"><b>Send Request Room</b></th>\n </tr>\n </thead>\n <tbody>\n {% for i in all_user %}\n <tr>\n {% if i.image_name %}\n <td><img class=\"uk-preserve-width uk-border-circle\" src=\"static/image/{{i.image_name}}\" width=\"50\" alt=\"\"></td>\n {% else %}\n <td><img class=\"uk-preserve-width uk-border-circle\" src=\"static/image/user.jpg\" width=\"50\" alt=\"\"></td>\n {% endif %} \n <td class=\"uk-table-link\">{{i.username}}</td>\n <td class=\"uk-table-link\"><div>\n <a class=\"uk-button uk-button-primary uk-button-small\" href=\"send/{{current_user.id}}/{{i.id}}/{{current_user.room}}\">Send <i class=\"fas fa-paper-plane\"></i></a></div>\n </td> \n </tr>\n {% endfor %}\n \n </tbody>\n </table>\n </div>\n\n</li>\n<li><div class=\"uk-overflow-auto\">\n <table class=\"uk-table uk-table-hover uk-table-middle uk-table-divider\">\n <thead>\n <tr>\n <th class=\"uk-table-shrink\"><b>Image</b></th>\n <th class=\"uk-table-shrink\"><b>Name</b></th>\n <th class=\"uk-table-shrink\"><b>Unfriend</b></th>\n </tr>\n </thead>\n <tbody>\n {% for i in all_friend %}\n <tr>\n {% if userobject[loop.index-1].image_name %}\n <td><img class=\"uk-preserve-width uk-border-circle\" src=\"static/image/{{userobject[loop.index-1].image_name}}\" width=\"50\" alt=\"\"></td>\n {% else %}\n <td><img class=\"uk-preserve-width uk-border-circle\" src=\"static/image/user.jpg\" width=\"50\" alt=\"\"></td>\n {% endif %} \n <td class=\"uk-table-link\">{{userobject[loop.index-1].username}}</td>\n <td class=\"uk-table-link\"><div style=\"width=100px;\">\n <a class=\"uk-button uk-button-danger uk-button-small\" href=\"unfriend/{{i.send}}/{{i.receive}}\">Delete <i class=\"fas fa-trash-alt\"></i></a></div>\n </td> \n </tr>\n {% endfor %}\n </tbody>\n </table>\n </div>\n </li>\n </ul>\n </div>\n </div>\n </div>\n \n</div>\n{% endblock %}\n" }, { "alpha_fraction": 0.6331599354743958, "alphanum_fraction": 0.6396619081497192, "avg_line_length": 32.00429153442383, "blob_id": "b32465045ba0baf0300f804a4e6409d015ae45f3", "content_id": "227f44878b87d4f26e6ccfdd2a448b8dfbd229ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7690, "license_type": "no_license", "max_line_length": 126, "num_lines": 233, "path": "/chat/application.py", "repo_name": "ashish8318/chatapp", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template,flash,redirect,url_for,request\nfrom forms import RegisterForm,LoginForm,CreateRoom,UploadFile\nfrom flask_sqlalchemy import SQLAlchemy\nfrom passlib.hash import pbkdf2_sha256\nfrom flask_login import LoginManager,login_user,current_user,logout_user\nfrom werkzeug.utils import secure_filename\nfrom models import User,Friend\nimport os\nfrom sqlalchemy import create_engine\nengine=create_engine(\"database url\")\n# this code is write for delete freiend\nfrom sqlalchemy.orm import sessionmaker,scoped_session\nsession_factory=sessionmaker(bind=engine)\nsession1=scoped_session(session_factory)\ns1=session1()\n# end code\n# from flask_migrate import Migrate\n\n# instance of flask app\napp=Flask(__name__)\n# Secret_key\napp.config['SECRET_KEY'] = 'secret key'\n# Database location postgress localhost\napp.config['SQLALCHEMY_DATABASE_URI'] = \"database url\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False\n# ininlize db(connection with database)\ndb = SQLAlchemy(app)\n\n\n\n# migrate = Migrate(app, db)\n# User Table Model\n# configure flask_login\n# Email send\nfrom flask_mail import Mail, Message\nfrom itsdangerous import URLSafeTimedSerializer, SignatureExpired\ns = URLSafeTimedSerializer(\"Thisisasecret!\")\n\napp.config.update(dict(\n DEBUG = True,\n MAIL_SERVER = 'smtp.gmail.com',\n MAIL_PORT = 587,\n MAIL_USE_TLS = True,\n MAIL_USE_SSL = False,\n MAIL_USERNAME = 'user email',\n MAIL_PASSWORD = 'email password',\n))\n\nmail = Mail(app)\n\n# code for flask socketio\nfrom flask_socketio import SocketIO,join_room, leave_room,emit\nsocketio = SocketIO(app)\n\nlogin=LoginManager(app)\n# initlize login app\nlogin.init_app(app)\n\[email protected]_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n\[email protected](\"/register\", methods=[\"GET\",\"POST\"])\ndef register():\n form = RegisterForm() \n if form.validate_on_submit(): \n username=form.name.data \n email=form.email.data\n password=form.password.data\n sendmessage=[username,email,password]\n token = s.dumps(sendmessage, salt='email-confirm')\n msg = Message('Confirm Email chatapp', sender='email id with send ', recipients=[email]) \n link = url_for('confirm_Email', token=token, _external=True)\n msg.body = 'Your link is {} Please click confirmation '.format(link)\n mail.send(msg)\n flash(\"Please check email\")\n return redirect(url_for('register')) \n return render_template(\"register.html\",form=form)\n\[email protected]('/confirm_Email/<token>')\ndef confirm_Email(token):\n try:\n getmessage = s.loads(token, salt='email-confirm', max_age=8600)\n print(getmessage)\n except SignatureExpired:\n flash(\"Session Expire of tokrn please try again\",\"danger\")\n return redirect(url_for('register'))\n print(getmessage[2],getmessage[0],getmessage[1]) \n hash_password=pbkdf2_sha256.hash(getmessage[2])\n user=User(username=getmessage[0],email=getmessage[1],password=hash_password)\n db.session.add(user)\n db.session.commit()\n db.session.close() \n flash(\"Your account is Successfully created please login\") \n return redirect(url_for('login'))\n\[email protected]('/login',methods=[\"GET\",\"POST\"])\ndef login():\n form=LoginForm()\n if form.validate_on_submit():\n user_obj=User.query.filter_by(email=form.email.data).first()\n login_user(user_obj)\n return redirect(url_for('chat'))\n return render_template(\"login.html\",form=form)\n\[email protected]('/chat')\ndef chat():\n if not current_user.is_authenticated:\n flash(\"Please login \",\"danger\")\n return redirect(url_for('login'))\n all_friend=Friend.query.filter((Friend.send==current_user.get_id())| (Friend.receive==current_user.get_id())).all() \n return render_template(\"Chatt.html\",all_friend=all_friend) \n\n\[email protected]('/dashbord',methods=[\"GET\",\"POST\"])\ndef dashbord():\n if current_user.is_authenticated: \n\n form1=CreateRoom()\n form2=UploadFile()\n all_user=User.query.filter(User.id!=current_user.get_id()).all()\n print(current_user.get_id())\n all_friend=Friend.query.filter((Friend.send==current_user.get_id())| (Friend.receive==current_user.get_id())).all()\n l=[]\n for i in all_friend:\n if i.send == current_user.id :\n l.append(User.query.get(i.receive))\n else:\n l.append(User.query.get(i.send)) \n\n print(all_friend,l)\n if form1.validate_on_submit():\n user_obj=User.query.get(current_user.get_id())\n if user_obj.room is None :\n user_obj.room=form1.room.data\n db.session.merge(user_obj)\n db.session.commit()\n db.session.close()\n flash(\"Your room is successfully created\")\n else:\n flash(\"You are already create room \",\"danger\") \n elif form2.validate_on_submit():\n print(\"success\")\n uploaded_file = form2.file.data\n filename = secure_filename(uploaded_file.filename)\n uploaded_file.save(os.path.join('static/image',filename))\n print(filename)\n user_obj=User.query.get(current_user.get_id())\n print(current_user.get_id())\n print(user_obj.image_name)\n name=user_obj.image_name\n if name is None :\n user_obj.image_name=filename\n db.session.merge(user_obj)\n db.session.commit()\n db.session.close()\n flash(\"Your file is successfully uploaded\")\n else:\n flash(\"you are already upload image\",\"danger\")\n \n return redirect(url_for('dashbord'))\n \n return render_template(\"dashbord.html\",form1=form1,form2=form2,all_user=all_user,all_friend=all_friend,userobject=l) \n else:\n return redirect(url_for('login'))\n\n \n \n\[email protected]('/send/<int:id1>/<int:id2>/<string:room>')\ndef send(id1,id2,room):\n print(room)\n if room == \"None\" :\n flash(\"Please create own room then send\",\"danger\")\n return redirect(url_for(\"dashbord\")) \n else :\n fcheck=Friend.query.filter((Friend.send==id1) & (Friend.receive==id2)).first()\n if fcheck :\n flash(\"You are already send request\",\"danger\")\n else:\n F=Friend(send=id1,receive=id2,room_name=room)\n db.session.add(F)\n db.session.commit()\n db.session.close()\n flash(\"Your request successfully send\")\n return redirect(url_for(\"dashbord\")) \n\[email protected]('/unfriend/<int:id1>/<int:id2>')\ndef unfriend(id1,id2):\n print(id1,id2)\n user_obj=s1.query(Friend).filter((Friend.send==id1) & (Friend.receive==id2)).first()\n s1.delete(user_obj)\n s1.commit()\n s1.close()\n flash(\"Your Freiend successfully unfriend\",\"danger\")\n \n return redirect(url_for('dashbord'))\n\[email protected]('/logout')\ndef logout():\n logout_user()\n flash(\"You are logged out\",'danger')\n return redirect(url_for('login'))\n\n\n\n# Flask socket\[email protected]('message')\ndef handle_message(message):\n if message[\"room\"]==\"NO\":\n socketio.send(message)\n else:\n socketio.send(message,room=message[\"room\"]) \n\[email protected]('join')\ndef on_join(data):\n username = data['username']\n room = data['room']\n join_room(room)\n socketio.send({'msg':\"has join the room.\",'username':username},room=room)\n\[email protected]('leave')\ndef on_leave(data):\n username = data['username']\n room = data['room']\n leave_room(room)\n socketio.emit('leave',{'msg':\"has leave the room.\",'username':username} ) \n\n\n# run flask code....\nif __name__==\"__main__\":\n socketio.run(app, debug=True)\n" }, { "alpha_fraction": 0.7592997550964355, "alphanum_fraction": 0.7600291967391968, "avg_line_length": 62.72093200683594, "blob_id": "5d8fe081128888346f8b87aa3c2b9c47061109ae", "content_id": "2583f16529e38f0b9987c4ab7aa32edb75688049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2742, "license_type": "no_license", "max_line_length": 580, "num_lines": 43, "path": "/README.md", "repo_name": "ashish8318/chatapp", "src_encoding": "UTF-8", "text": "# Chatapp\n# Introduction\n\nThis chatapp created with **flask socketio** and **javascripts socket**. This is simple chatapp in which user login then he redirect on chat page or he show message you are connected. In this chat app also provide dashbord in which user create own room, upload own profile photo, send own room to any people who create register in chatapp. user also delete own friend or show all friend. user go on the chat page or select room so in this case message send in a selected room. not show all user that are connected. But user not choose room then in this case message send all user.\n\n# Demo\n![](chat/chat.png)\n# File/ Folder in chatpp\n * **application:** This is main file . This file contain createing app instance, database connection, or import all package. This file contain code that handle like registration,login,dashbord, socket connect, join room, leave room or logout functionality.\n\n * **models:** This file contains class base table structure. This methodlogy called **ORM** in flask. this class covert in table with SQLAlchemy. I also add usermixin in class to appy flask login functinality.\n\n * **Forms:** I create form with flask_wtf, with flask_wtf create the form with python code then creating instance and render. flask templete engine converted this in simple html form. (This contain userLogin , registration, uploadFile form). This also contain validation of field like InputRequired,Length,Email validation, or email already exists.\n\n * **Static folder:** This folder contains static image in image folder\n \n * **Templates folder:** This folder contains client page like base.htm login.html, register.html, dashbord.html, chat.html, logout.html. In which chat.html conatin all socket connection , join room, leave room all javascripts code in scripts tag. I use jquery write this code. I use **UI KIT** framwork for html page design. \n# Features of chatapp\n * Email validation with sending email link varification.\n * use ui kit framwork best and faster web page design just like bootstarp.\n\n# Deploy\n I use heroku free host website paltform for testing purpose. I use postgress object oriented database system.\n I use some command:-\n ```\n myenv\\scripts\\activate.bat\n pip insatll gevent-websocket\n pip install gunicorn\n pip freeze > requirement.txt\n git init\n git add .\n git commit -m '-------'\n heroku login\n git:remote -a 'name of heroku app'\n git push heroku master\n ```\n # Run Chatapp\n My chatapp is host on heroku server. you can visit click on this link :\n [Chat-group](https://mychat-group.herokuapp.com)\n\n # Refrence \n * [flask quickstart ](https://flask.palletsprojects.com/en/1.1.x/quickstart/)\n * [flask socketio](https://flask-socketio.readthedocs.io/en/latest/)\n\n\n" } ]
5
murfreesboro/dftints
https://github.com/murfreesboro/dftints
738abd69944326a9047cff7649aea283312aa325
6e3d5ebb376f0494b5561e8de318d3e9ba42723a
fa873a27cfc0840d772face7a6fd939d85aa3541
refs/heads/master
2020-05-20T03:34:58.383849
2014-09-09T12:40:12
2014-09-09T12:40:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6655580997467041, "alphanum_fraction": 0.6690916419029236, "avg_line_length": 28.69135856628418, "blob_id": "8a3dd4d7931e5ff1516aafb0b40d52f76a73d420", "content_id": "10ddd14a87fc8724e085a729d72934de64330516", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4811, "license_type": "permissive", "max_line_length": 166, "num_lines": 162, "path": "/generateBasis.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module is used to generate the DFT basis set value etc.\nup to the fourth derivatives\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\nimport infor\nimport shell\nimport basis\nimport codeprint\nimport shellsymbol\nimport derivorder\nimport derivparser\n\ndef generateCode(order):\n\t\"\"\"\n\tprint out the code \n\t\"\"\"\n\n\t# get the file name\n\tif order == 1:\n\t\tfunName = \"dftbasisderiv1\"\n\telif order == 2:\n\t\tfunName = \"dftbasisderiv2\"\n\telif order == 3:\n\t\tfunName = \"dftbasisderiv3\"\n\telif order == 4:\n\t\tfunName = \"dftbasisderiv4\"\n\telse:\n\t\tprint \"Improper order in the generateCode of generateBasis.py\"\n\t\tsys.exit()\n\tinf = funName + \".cpp\"\n\n\n\t# now we open the file\t\n\tf = open(inf, \"w\")\n\tcodeprint.initilizeIndent()\n\n\t# the comment part for the file\n\tf.write(\"/**\\n\")\n\tline = \" * This function is used to generate \"+str(order)+\" derivatives for basis set \" \n\tcodeprint.printLine(line,f)\n\tline = \" * The basis set derivatives are evaluated for the given shell which \" \n\tcodeprint.printLine(line,f)\n\tline = \" * is characterized by the L(no composite shell!). Generally, by given the \"\n\tcodeprint.printLine(line,f)\n\tline = \" * derivative order (for exmaple, X, Y Z or XX, YY or XYY etc.)\"\n\tcodeprint.printLine(line,f)\n\tline = \" * for an arbitrary shell we could combine the radial part and \"\n\tcodeprint.printLine(line,f)\n\tline = \" * the angular part together so to form the result.\" \n\tcodeprint.printLine(line,f)\n\tline = \" * The result is arranged as: (nBas, ng, nDerivOrder)\"\n\tcodeprint.printLine(line,f)\n\tline = \" * nBas is the number of Cartesian type basis set for shell with L\"\n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param ng number of grid points \" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param L angular momentum of the shell \" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param nTolCarBas number of Cartesian basis set in the ang array \" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param ang angular part of the basis set values(nTolCarBas,ng) \"\n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param rad radial part of the basis set values \"\n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\return basis derivatives of basis set values for the given order\"\n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\author Fenglai Liu and Jing Kong \" \n\tcodeprint.printLine(line,f)\n\tf.write(\" */\\n\")\n\n\t# including head files\n\tline = \"#include\\\"libgen.h\\\"\" \n\tcodeprint.printLine(line,f)\n\tline = \"#include\\\"batchbasis.h\\\"\" \n\tcodeprint.printLine(line,f)\n\tline = \"using namespace batchbasis;\" \n\tcodeprint.printLine(line,f)\n\tf.write(\"\\n\")\n\n\t# print out the function name\n\tline = \"void \" + \"BatchBasis::\" + funName + '''(const UInt& ng, const UInt& L, const UInt& nTolCarBas, const Double* ang, const Double* rad, Double* basis) const '''\n\tcodeprint.printLine(line,f)\n\n\t# here we enter in real code\n\tline = \"{\"\n\tcodeprint.printLine(line,f)\n\tcodeprint.increaseIndentation()\n\tf.write(\"\\n\")\n\n # set up the nBas\n\tline = \"// now we set up the nBas for the computation\"\n\tcodeprint.printLine(line,f)\n line = \"UInt nBas = (L+1)*(L+2)/2;\"\n\tcodeprint.printLine(line,f)\n\tf.write(\"\\n\")\n\n\t# now we create the derivatives order\n\torderList = derivorder.derivOrderGeneration(order)\n\tfor derivOrder in orderList:\n\n\t\t# comment \n\t\tline = \"// now we do derivatives for the given basis set to \" + derivOrder\n\t\tcodeprint.printLine(line,f)\n\t\tindexDerivOrder = orderList.index(derivOrder)\n\t\tif indexDerivOrder > 0 :\n\t\t\tline = \"basis = basis + \" + \"ng*nBas; \"\n\t\t\tcodeprint.printLine(line,f)\n\t\t\tf.write(\"\\n\")\n\n\t\t# within the loop, actually we choose doing code from S to I\n\t\tmaxL = infor.getMaxL()\n\t\tfor L in range(maxL+1):\n\n\t\t\t# print out the block\n\t\t\tif L == 0:\n\t\t\t\tline = \"if(L == \" + str(L) + \") {\" \n\t\t\telse:\n\t\t\t\tline = \"} else if(L == \" + str(L) + \") {\" \n\t\t\tcodeprint.printLine(line,f)\n\t\t\tcodeprint.increaseIndentation()\n\t\t\tf.write(\"\\n\")\n\n\t\t\t# now it's the real work module\n\t\t\tline = \"for(UInt ip = 0; ip<ng; ip++) {\" \n\t\t\tcodeprint.printLine(line,f)\n\t\t\tcodeprint.increaseIndentation()\n\t\t\tline = \"Double* bas = &basis[ip*nBas];\" \n\t\t\tcodeprint.printLine(line,f)\n\t\t\tline = \"const Double* angArray = &ang[ip*nTolCarBas];\" \n\t\t\tcodeprint.printLine(line,f)\n\t\t\ts = shell.shell(L)\n\t\t\tbasList = s.getBasis()\n\t\t\tfor bas in basList:\n\t\t\t\tformula = {\"0\":bas}\n\t\t\t\tresult = { }\n\t\t\t\tderivparser.getDerivExpression(formula, derivOrder, 0, result)\n\t\t\t\tind = basList.index(bas)\n\t\t\t\tderivparser.printExpression(result,derivOrder,ind,f)\n\t\t\t\n\t\t\t# block end for ip\n\t\t\tcodeprint.decreaseIndentation() \n\t\t\tline = \"}\" \n\t\t\tcodeprint.printLine(line,f)\n\t\t\tcodeprint.decreaseIndentation() \n\t\t\tf.write(\"\\n\")\n\n\t\t# block end with the L\n\t\tline = \"}\" \n\t\tcodeprint.printLine(line,f)\n\t\tf.write(\"\\n\\n\")\n\n\t# end of function block\n\tcodeprint.decreaseIndentation()\n\tline = \"}\" \n\tcodeprint.printLine(line,f)\n\tf.write(\"\\n\\n\")\n\n\t# end of whole file\n\tf.close()\n\n" }, { "alpha_fraction": 0.6226499676704407, "alphanum_fraction": 0.6347358822822571, "avg_line_length": 17.756301879882812, "blob_id": "62aab0b39c1a37cd369c2ad57852c7a528296dfb", "content_id": "d0e74100cf8a4c8fedb56bc594d9f68e3cb63570", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2234, "license_type": "permissive", "max_line_length": 74, "num_lines": 119, "path": "/shell.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module is used describe the class related to the \"Shell\". \nShell actually is a group of basis set functions in the quantum chemistry,\nall of these basis set functions share the same L, namely:\nL = l+m+n\nFor example, Shell of L=1 has theree basis set functions, namely\nPx 1,0,0\nPy 0,1,0\nPz 0,0,1\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\nimport basis\nimport shellsymbol\nimport infor\n\nclass shell:\n\n\tdef __init__(self,L0):\n\t\t\"\"\"\n\t\tconstructor for the shell class\n\t\tL0 is the shell's angular momentum type\n\t\tIn the initilization, we also generate all of basis set functions\n\t\t\"\"\"\n\t\tself.L = L0\n\n\t\t# check the L, it should not be less than zero\n\t\tif L0 < 0:\n\t\t\tprint \"L can not be less than zero in shell class\\n\"\n\t\t\tsys.exit()\n\n\n\tdef __eq__(self,t):\n\t\tif self.L == t.L:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef __ne__(self,t):\n\t\tif self.L != t.L:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef getL(self):\n\t\t\"\"\"\n\t\treturn the L\n\t\t\"\"\"\n\t\treturn self.L\n\n\n\tdef getBasis(self):\n\t\t\"\"\"\n\t\treturn the full basis set list\n\t\t\"\"\"\n\n\t\t# get the basis set order\n\t\torder = self.generateBasisSetOrders()\n\n\t\t# generate the basis set functions for this shell\n\t\t# each basis set function is characterized by three numbers\n\t\tl = len(order)/3\n\t\tbasisSets = [ ]\n\t\ti = 0\n\t\twhile i < l:\n\t\t\tbasisSet = basis.basis(order[3*i],order[3*i+1],order[3*i+2])\n\t\t\tbasisSets.append(basisSet)\n\t\t\ti = i + 1\n\t\treturn basisSets\n\t\t\t\n\n\tdef generateBasisSetOrders(self):\n\t\t\"\"\"\n\t\tgenerating the basis set's ordering\n\t\t\"\"\"\n\t\torderList = []\n\t\tL = self.L\n\t\ti = 0\n\t\tbasisSetOrder = infor.getBasisSetOrder()\n\t\tif basisSetOrder == \"libint\":\n\t\t\twhile i <= L:\n\t\t\t\tnx = L - i\n\t\t\t\tj = 0\n\t\t\t\twhile j<=i: \n\t\t\t\t\tny = i-j\n\t\t\t\t\tnz = j\n\t\t\t\t\torderList.append(nx)\n\t\t\t\t\torderList.append(ny)\n\t\t\t\t\torderList.append(nz)\n\t\t\t\t\tj = j + 1\n\t\t\t\ti = i + 1\n else:\n\t\t\tprint \"Unrecognized basis set ordering to generate basis sets\\n\"\n\t\t\tsys.exit()\n\n\t\treturn orderList\n\n\n\tdef hasBasisSet(self,bas):\n\t\t\"\"\"\n\t\ttesting that whether we have the basis set in the\n\t\tgiven shell\n\t\t\"\"\"\n\t\tbL = bas.getL()\n\t\tif bL == self.L:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef getName(self):\n\t\t\"\"\"\n\t\tgive the name for this shell\n\t\t\"\"\"\n\t\tname = shellsymbol.getShellSymbol(self.L)\n\t\treturn name\n\n\n" }, { "alpha_fraction": 0.6852272748947144, "alphanum_fraction": 0.6965909004211426, "avg_line_length": 20.950000762939453, "blob_id": "cb2e6bd8da79a55c0c0687fb579785b7e863fb89", "content_id": "8bb9ceac16545dae174296942579a410940ccf5b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 880, "license_type": "permissive", "max_line_length": 80, "num_lines": 40, "path": "/main.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nmain module for dft basis sets\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\nimport infor\nimport generateAngBasis\nimport generateBasis\nimport derivorder\n\n# setting the basis set order \nmaxLChoice = 6\nif len(sys.argv) == 2:\n\tmaxLChoice = int(sys.argv[1])\nelif len(sys.argv) > 2:\n\tprint \"Wrong argv list! We only support zero/one arguments! Please check it!\\n\"\n\tsys.exit()\ninfor.setBasisSetOrder()\ninfor.setMaxL(maxLChoice)\n\n# print out the angular part of code\ngenerateAngBasis.generateCode()\n\n# print out the basis set code\nfor i in range(4):\n\ti = i + 1\n\tgenerateBasis.generateCode(i)\n\n# finally, we try to print out the derivatives information\n# used in the program\ncount = 1\nfor i in range(4):\n\ti = i + 1\n\tdlist = derivorder.derivOrderGeneration(i)\n\tfor var in dlist:\n\t\tv = \"DERIV_\" + var\n\t\tline = \"UInt \" + v + \" = \" + str(count) + \";\"\n\t\tprint line\n\t\tcount = count + 1\n\n\n" }, { "alpha_fraction": 0.6059139966964722, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 22.225000381469727, "blob_id": "0d22ee67ab9213d681038f2ea2990f951141bd27", "content_id": "7a2c813c505aa06b57d3cf2e933d70a3df222807", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1860, "license_type": "permissive", "max_line_length": 78, "num_lines": 80, "path": "/derivorder.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module is used to generate the derivatives sequence for the program\nThe derivatives sequence is like this, if we want to do 3rd order derivatives;\nfor example; do derivatives with respect to X, Y and Z then the derivative\nvar is written into \"XYZ\". For each order derivatives, we will set up\nthe sequence and then parse it later\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\n\n\ndef derivOrderGeneration(order):\n\t\"\"\"\n\tgenerating the derivatives sequence:\n\tXX, YY etc. for derivatives order 2\n\tXXX, XYY, ZZZ etc. for derivatives order 3\n\t\"\"\"\n\taxis = ( \"X\", \"Y\", \"Z\" )\n\tresult = [ ]\n\tif order == 1:\n\t\tresult = axis\n\telif order == 2:\n\t\tfor i in axis:\n\t\t\tfor j in axis:\n\t\t\t\tif axis.index(j) > axis.index(i):\n\t\t\t\t\tcontinue\n\t\t\t\tvar = j + i\n\t\t\t\tresult.append(var)\n\telif order == 3:\n\t\tfor i in axis:\n\t\t\tfor j in axis:\n\t\t\t\tfor k in axis:\n\t\t\t\t\tif axis.index(j) > axis.index(i):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif axis.index(k) > axis.index(j):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tvar = k + j + i\n\t\t\t\t\tresult.append(var)\n\telif order == 4:\n\t\tfor i in axis:\n\t\t\tfor j in axis:\n\t\t\t\tfor k in axis:\n\t\t\t\t\tfor l in axis:\n\t\t\t\t\t\tif axis.index(j) > axis.index(i):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif axis.index(k) > axis.index(j):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif axis.index(l) > axis.index(k):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tvar = l + k + j + i\n\t\t\t\t\t\tresult.append(var)\n\telse:\n\t\tprint \"Improper order in the derivOrderGeneration\\n\"\n\t\tsys.exit()\n\t\n\t# return\n\treturn result\n\n\ndef parseDeriv(var):\n\t\"\"\"\n\tfor each given var, which is in format of XX, YY, XYZ etc.\n\twe need to parse it to figure out that how many X, how many\n\tY and how many Z it has\n\t\"\"\"\n\tnx = 0\n\tny = 0\n\tnz = 0\n\tfor i in range(len(var)):\n\t\tif var[i] == \"X\":\n\t\t\tnx = nx + 1\n\t\telif var[i] == \"Y\":\n\t\t\tny = ny + 1\n\t\telif var[i] == \"Z\":\n\t\t\tnz = nz + 1\n\t\telse:\n\t\t\tprint \"Illegal character got in parseDeriv. Could be only X, Y or Z\"\n\t\t\tsys.exit()\n\treturn (nx, ny, nz)\n\n\n" }, { "alpha_fraction": 0.6982758641242981, "alphanum_fraction": 0.7093595862388611, "avg_line_length": 18.309524536132812, "blob_id": "6b380b8db2e7befc0e0440c8e2c3f3187395cdad", "content_id": "0c370f5755657a2293d06dfa1a8fc63b86ed37c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 812, "license_type": "permissive", "max_line_length": 64, "num_lines": 42, "path": "/codeprint.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module is used to provide functions to print the real codes\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\n\n# the indentLength indicates current indent length in the code\n# in default, each time we increase 3\nindentLength = 0\n\ndef increaseIndentation():\n\t\"\"\"\n\tincrease the indent for 3\n\t\"\"\"\n\tglobal indentLength\n\tindentLength = indentLength + 3\n\ndef decreaseIndentation():\n\t\"\"\"\n\tdecrease the indent for 3\n\t\"\"\"\n\tglobal indentLength\n\tindentLength = indentLength - 3\n\tif indentLength < 0:\n\t\tprint \"Illegal indentLength in printcode.py\\n\"\n\t\tsys.exit()\n\ndef printLine(line,f):\n\t\"\"\"\n\tprint out the given line of code\n\t\"\"\"\n\tglobal indentLength\n\tif indentLength != 0:\n\t\tfor i in range(indentLength):\n\t\t\tf.write(\" \")\n\tf.write(line)\n\tf.write(\"\\n\")\n\ndef initilizeIndent():\n\tglobal indentLength\n\tindentLength = 0\n\n" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.53125, "avg_line_length": 7, "blob_id": "f7eafa42928d90ac2cd54271739a3437dda60b71", "content_id": "4eb579bfa26f9cbe80fe2c54754c023847f61837", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 32, "license_type": "permissive", "max_line_length": 12, "num_lines": 4, "path": "/rm.sh", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nrm *.pyc\nrm *.cpp\n" }, { "alpha_fraction": 0.554770290851593, "alphanum_fraction": 0.5583038926124573, "avg_line_length": 24.590909957885742, "blob_id": "f46ed044830cbcedc9dd9ef7cb1b4ceb5d2fafa3", "content_id": "6676777791da1f5a91cc67bb6eec0ee2ef60d566", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "permissive", "max_line_length": 78, "num_lines": 22, "path": "/shellsymbol.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module stores the shell symbols\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\n\n# the shell name list is taken from libint package\nSHELL_NAME_LIST = [\n'S', 'P', 'D', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',\n'O', 'Q', 'R', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\ndef getShellSymbol(i):\n\n\tglobal SHELL_NAME_LIST\n\tl = len(SHELL_NAME_LIST)\n\tif i>=l:\n\t\tprint \"Why you need to claim such high order shells, L>20?\"\n print \"however, we still do it, but be careful with your code\"\n return \"L\" + str(i)\n\telse:\n\t\treturn SHELL_NAME_LIST[i]\n\n\n\n" }, { "alpha_fraction": 0.5812381505966187, "alphanum_fraction": 0.6052987575531006, "avg_line_length": 20.494186401367188, "blob_id": "bba64d0033c7f45ad7057653430f246fbe3f15cc", "content_id": "1ec593fdf5f425b03abff6e5fd5e4ee4decff5e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3699, "license_type": "permissive", "max_line_length": 89, "num_lines": 172, "path": "/basis.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module is used describe the class related to the \"basis set functions\". \nOriginally, each basis set function is a combination of Gaussian primitive\nfunctions:\npsi = sum_{mu}d_{mu}chi_{mu}\npsi is the basis set function, and chi_{mu} is the primitive functions.\nAll of chi are on the same center as psi, and d_{mu} is some fixed \ncoefficients. All of Gaussian primitive functions share the same angular\nmomentum with the basis set.\n\nFor each Gaussian primitive function, it has the form that:\nchi = x^{l}y^{m}z^{n}e^{-alpha*r^{2}}\nx^{l}y^{m}z^{n} is its angular momentum part, which is characterized by\nthree number of l, m, and n. The e^{-alpha*r^{2}} is its radial part,\nso l,m,n combined with alpha and its prefactor of d_{mu}, then we know\nall of information to get psi.\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\nimport shellsymbol\n\nclass basis:\n\n\tdef __init__(self,l0,m0,n0):\n\t\t\"\"\"\n\t\tBasis class is characterized by three numbers\n\t\tThey are corresponding to the angular momentum numbers\n\t\t\"\"\"\n\t\tself.l = l0\n\t\tself.m = m0\n\t\tself.n = n0\n\n\t\t# test the angular momentum number\n\t\tif l0<0 or m0<0 or n0<0:\n\t\t\tprint \"Illegal angular momentum number in basis.py. It should not be less than zero\\n\"\n\t\t\tprint l0,m0,n0\n\t\t\tsys.exit()\n\n\n\tdef __eq__(self,t):\n\t\t\"\"\"\n\t\ttesting whether two basis sets are equal with each other\n\t\t\"\"\"\n\t\tl0,m0,n0 = self.getlmn()\n\t\tl1,m1,n1 = t.getlmn()\n\t\tif l0 == l1 and m0 == m1 and n0 == n1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef __ne__(self,t):\n\t\t\"\"\"\n\t\ttesting whether two basis sets are equal with each other\n\t\t\"\"\"\n\t\tl0,m0,n0 = self.getlmn()\n\t\tl1,m1,n1 = t.getlmn()\n\t\tif l0 == l1 and m0 == m1 and n0 == n1:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\n\tdef getName(self):\n\t\t\"\"\"\n\t\tdepending on the l,m,n; we get the name for this basis set\n\t\t\"\"\"\n\t\tL = self.l + self.m + self.n\n\t\tname = shellsymbol.getShellSymbol(L)\n\t\tif self.l > 0:\n\t\t\tif self.l == 1:\n\t\t\t\tname = name + \"x\"\n\t\t\telse:\n\t\t\t\tname = name + str(self.l) + \"x\"\n\t\tif self.m > 0:\n\t\t\tif self.m == 1:\n\t\t\t\tname = name + \"y\"\n\t\t\telse:\n\t\t\t\tname = name + str(self.m) + \"y\"\n\t\tif self.n > 0:\n\t\t\tif self.n == 1:\n\t\t\t\tname = name + \"z\"\n\t\t\telse:\n\t\t\t\tname = name + str(self.n) + \"z\"\n\t\treturn name\n\n\n\tdef getlmn(self):\n\t\t\"\"\"\n\t\tl,m,n is given\n\t\t\"\"\"\n\t\treturn (self.l, self.m, self.n)\n\n\n\tdef getComponent(self,axis):\n\t\t\"\"\"\n\t\tfor the given axis (X, Y or Z) we return the component\n\t\t\"\"\"\n\t\tif axis == \"X\":\n\t\t\t\treturn self.l\n\t\telif axis == \"Y\":\n\t\t\t\treturn self.m\n\t\telif axis == \"Z\":\n\t\t\t\treturn self.n\n\t\telse:\n\t\t\tprint \"Wrong axis passed in the getComponent\"\n\t\t\tsys.exit()\n\n\tdef getL(self):\n\t\t\"\"\"\n\t\treturn the total angular momentum number of L\n\t\t\"\"\"\n\t\tL = self.l + self.m + self.n\n\t\treturn L\n\n\n\tdef loweringAng(self,axis):\n\t\t\"\"\"\n\t\tfor the given axis (X, Y or Z) we determine\n\t\twhich component to lowering\n\t\t\"\"\"\n\t\tif axis == \"X\":\n\t\t\tl1 = self.l - 1\n\t\t\tm1 = self.m\n\t\t\tn1 = self.n\n\t\t\tif l1 < 0:\n\t\t\t\treturn (None,None,None)\n\t\t\telse:\n\t\t\t\treturn (l1,m1,n1)\n\t\telif axis == \"Y\":\n\t\t\tl1 = self.l \n\t\t\tm1 = self.m - 1\n\t\t\tn1 = self.n\n\t\t\tif m1 < 0:\n\t\t\t\treturn (None,None,None)\n\t\t\telse:\n\t\t\t\treturn (l1,m1,n1)\n\t\telif axis == \"Z\":\n\t\t\tl1 = self.l \n\t\t\tm1 = self.m\n\t\t\tn1 = self.n - 1\n\t\t\tif n1 < 0:\n\t\t\t\treturn (None,None,None)\n\t\t\telse:\n\t\t\t\treturn (l1,m1,n1)\n\t\telse:\n\t\t\tprint \"Wrong axis passed in the loweringAng\"\n\t\t\tsys.exit()\n\n\n\tdef raisingAng(self,axis):\n\t\t\"\"\"\n\t\tfor the given axis (X, Y or Z) we determine\n\t\twhich component to raising up\n\t\t\"\"\"\n\t\tif axis == \"X\":\n\t\t\tl1 = self.l + 1\n\t\t\tm1 = self.m\n\t\t\tn1 = self.n\n\t\telif axis == \"Y\":\n\t\t\tl1 = self.l \n\t\t\tm1 = self.m + 1\n\t\t\tn1 = self.n\n\t\telif axis == \"Z\":\n\t\t\tl1 = self.l \n\t\t\tm1 = self.m\n\t\t\tn1 = self.n + 1\n\t\telse:\n\t\t\tprint \"Wrong axis passed in the raisingAng\"\n\t\t\tsys.exit()\n\t\treturn (l1,m1,n1)\n\n\n" }, { "alpha_fraction": 0.7529411911964417, "alphanum_fraction": 0.7529411911964417, "avg_line_length": 20.25, "blob_id": "a0dd9977a66d4098d8c2cf57e55de8a1a61f009c", "content_id": "5a1afe348a88d4b247c73fb837b8327aa14365d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 85, "license_type": "permissive", "max_line_length": 67, "num_lines": 4, "path": "/README.md", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "dftints\n=======\n\npython code to generate basis set derivatives value for DFT package\n" }, { "alpha_fraction": 0.6026574969291687, "alphanum_fraction": 0.612301766872406, "avg_line_length": 26.122093200683594, "blob_id": "8a0fa04ae961f399540af2507f6b58cb4f657122", "content_id": "b2b0cd79354a59255cf31ab1e19479ce688a471b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4666, "license_type": "permissive", "max_line_length": 74, "num_lines": 172, "path": "/derivparser.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module is used to generate the parser for the given basis set with\nrespect to the given derivatives\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\nimport shell\nimport basis\nimport codeprint\n\ndef getDerivExpression(formula, deriv, order, final_result):\n\t\"\"\"\n\tfor the given formula, as well as the derivative var(something like\n\tXXXX, XXYZ etc. generated in derivorder.py). We can get the derivatives \n\texpression for the current formula. We note that this process continues\n\tin recursively way until all of the derivatives are processed. If the \n\tfinal order is arrived, we will push the result into the final_result\n\t\"\"\"\n\tresult = { }\n\taxis = deriv[order]\n\tnunderscore = 1\n\tfor k, bas in formula.iteritems():\n\n\t\t# to get rid of the \"-\" sign first\n\t\tk = k.replace(\"-\",\"\")\n\n\t\t# get the first term in the derivative expression\n\t\t# the first term is \"(l,m,n)*chi(l,m,n - delta)\"\n\t\t(l,m,n) = bas.loweringAng(axis)\n\n # add a comment: if the loweringAng produce new \n # l,m,n no matter which one is smaller than 0;\n # then l,m,n are all none\n # we only need to check that whether l is none or not\n # so that to know the new basis set exist or not\n\t\tif l is not None:\n\t\t\tnewBasis = basis.basis(l,m,n)\n\t\t\tcom = bas.getComponent(axis)\n\t\t\tnewkey1 = k + \"_\" + str(com)\n\t\t\tif result.has_key(newkey1):\n\t\t\t\tfor i in range(nunderscore):\n\t\t\t\t\tnewkey1 = newkey1 + \"-\"\n\t\t\t\tnunderscore = nunderscore + 1\n\t\t\t\tresult[newkey1] = newBasis\n\t\t\telse:\n\t\t\t\tresult[newkey1] = newBasis\n\n\t\t# get the second term\n\t\t# the second term is 2alpha*chi(l,m,n + delta)\n\t\t(l,m,n) = bas.raisingAng(axis)\n\t\tnewBasis = basis.basis(l,m,n)\n\t\tnewkey2 = k + \"_\" + \"2alpha\"\n\t\tif result.has_key(newkey2):\n\t\t\tfor i in range(nunderscore):\n\t\t\t\tnewkey2 = newkey2 + \"_\"\n\t\t\tnunderscore = nunderscore + 1\n\t\t\tresult[newkey2] = newBasis\n\t\telse:\n\t\t\tresult[newkey2] = newBasis\n\n\t# now let's judge whether we need to proceed it\n\torder = order + 1\n\tdesire_order = len(deriv)\n\tif order == desire_order:\n\t\tfor k, bas in result.iteritems():\n\t\t\tfinal_result[k] = bas\n\telse:\n\t\tgetDerivExpression(result, deriv, order, final_result)\n\n\ndef printExpression(expression, derivOrder, basIndex, f):\n\t\"\"\"\n\tnow we print out the derivative expression here for the \n\tgiven derivative order. \n\t\"\"\"\n\t# set up the LHS of the expression\n\tline = \"bas[\" + str(basIndex) + \"] = \"\n\n\t# get the length of the derivative order\n\tl = len(derivOrder)\n\n\t# we use count to know whether this is the first term\n\tcount = 0\n\n\t# now let's search each order - for every order,\n\t# we have a rad term\n\tfor order in range(l+1):\n\n\t\t# set up the list for basis and coefficients\n\t\t# they are corresponding to the same rad term\n\t\tbasList = [ ] \n\t\tcoeList = [ ]\n\n\t\tfor k, bas in expression.iteritems():\n\n\t\t\t# to get rid of the \"-\" sign first\n\t\t\tk = k.replace(\"-\",\"\")\n\t\t\tklist = k.split(\"_\")\n\n\t\t\t# determine how many 2alpha we have in the k\n\t\t\t# we only pick up these who math the order\n\t\t\tn2alpha = 0\n\t\t\tfor i in klist:\n\t\t\t\tif i == \"2alpha\":\n\t\t\t\t\tn2alpha = n2alpha + 1\n\t\t\tif n2alpha != order:\n\t\t\t\tcontinue\n\n\t\t\t# determine the coefficient in the k\n\t\t\tcoe = 1\n\t\t\tfor i in klist:\n\t\t\t\tif i.isdigit() and i != \"0\":\n\t\t\t\t\tcoe = coe*int(i)\n\n\t\t\t# push back the basis and coe\n\t\t\tif bas in basList:\n\t\t\t\tindex = basList.index(bas)\n\t\t\t\tcoeList[index] = coeList[index] + coe\n\t\t\telse:\n\t\t\t\tbasList.append(bas)\n\t\t\t\tcoeList.append(coe)\n\n\t\tif len(basList) > 0:\n\t\t\t\n\t\t\t# give the offset for the radial array\n\t\t\t# we add the minus sign to this part\n\t\t\tif order == 0:\n\t\t\t\trad = \"rad[ip]\"\n\t\t\telif order == 1:\n\t\t\t\trad = \"rad[ip+ng]\"\n\t\t\telse:\n\t\t\t\trad = \"rad[ip+\" + str(order) + \"*ng]\"\n\t\t\tif order % 2 == 1:\n\t\t\t\trad = \"-\" + rad \n\t\t\telif count > 0: # these term should have \"+\" sign\n\t\t\t\trad = \"+\" + rad\n\t\t\n\t\t\t# set the basis set, by combining it with coefficients \n\t\t\t# we will get the term corresponding to the rad term\n\t\t\tang = \"*\"\n\t\t\tif len(basList) > 1:\n\t\t\t\tang = ang + \"(\"\n\t\t\tfor bas in basList:\n\t\t\t\tL = bas.getL()\n\t\t\t\tgOffSet = L*(L+1)*(L+2)/6 # counting all of lower shell index since S\n\t\t\t\ts = shell.shell(L)\n\t\t\t\tbList = s.getBasis()\n\t\t\t\tbind = bList.index(bas)\n\t\t\t\tindex = bind + gOffSet\n\t\t\t\tcind = basList.index(bas)\n\t\t\t\tif coeList[cind] != 1:\n\t\t\t\t\tc = str(coeList[cind]) + \"*\"\n\t\t\t\telse:\n\t\t\t\t\tc = \"\"\n\t\t\t\tang = ang + c\n\t\t\t\tang = ang + \"angArray[\" + str(index) + \"]\" \n\t\t\t\t#ang = ang + c + bas.getName()\n\t\t\t\tif cind == len(basList) - 1:\n\t\t\t\t\tif ang.find(\"(\") > 0:\n\t\t\t\t\t\tang = ang + \")\"\n\t\t\t\telse:\n\t\t\t\t\tang = ang + \"+\"\n\n\t\t\t# now add this order \n\t\t\tline = line + rad + ang\n\n\t\t\t# finally add counting\n\t\t\tcount = count + 1\n\n\tline = line + \";\"\n\tcodeprint.printLine(line,f)\n\n" }, { "alpha_fraction": 0.6354514956474304, "alphanum_fraction": 0.6387959718704224, "avg_line_length": 20.261905670166016, "blob_id": "0443e1771fff0ef72da31309f7a7669cd546a4f8", "content_id": "0cf8af8235ee60c091ba3c274871bcb386d8a18d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 897, "license_type": "permissive", "max_line_length": 78, "num_lines": 42, "path": "/infor.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nget the maximum L for generating the angular part of dft basis \n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\n\n# global data \nbasisSetOrder = \" \"\nmaxL = 6 # this is default, we just generate all of codes up to I orbital\n\ndef setBasisSetOrder():\n\t\"\"\"\n\tset the basis set order\n in the future we can define other basis set order\n if you want\n just modify the shell.py\n\t\"\"\"\n\tglobal basisSetOrder\n\tbasisSetOrder = \"libint\"\n\ndef setMaxL(choice):\n\t\"\"\"\n\tset the maxL\n in the future we can define other basis set order\n if you want\n just modify the shell.py\n\t\"\"\"\n\tglobal maxL\n if choice >= 0:\n maxL = choice\n else:\n print \"Illegal choice provided in setMaxL, must be an integer >=0\"\n sys.exit()\n\ndef getBasisSetOrder():\n\tglobal basisSetOrder\n\treturn basisSetOrder\n\ndef getMaxL():\n\tglobal maxL\n\treturn maxL\n\n\n\n\n" }, { "alpha_fraction": 0.6268656849861145, "alphanum_fraction": 0.6344278454780579, "avg_line_length": 25.866310119628906, "blob_id": "0185e4db622a9c91360b77e5745be3c856fc555c", "content_id": "95a65d10f5530edee439481105bacacc305b3383", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5025, "license_type": "permissive", "max_line_length": 122, "num_lines": 187, "path": "/generateAngBasis.py", "repo_name": "murfreesboro/dftints", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module is used to generate the angular part of codes for DFT basis set \nmodule\n\"\"\"\n__author__ = \"Fenglai Liu\"\nimport sys\nimport os\nimport infor\nimport shell\nimport basis\nimport codeprint\nimport shellsymbol\n\ndef generateCode():\n\t\"\"\"\n\tprint out the code for angular part\n\t\"\"\"\n\tf = open(\"angdftbasis.cpp\", \"w\")\n\tmaxL = infor.getMaxL() + 4 # we consider the fourth derivatives of basis set\n\n\t# the comment part for the file\n\tf.write(\"/**\\n\")\n\tline = \" * This function is used to generating the angular part for the \" \n\tcodeprint.printLine(line,f)\n\tline = \" * Cartesian type of basis set functions on a given atom. The \" \n\tcodeprint.printLine(line,f)\n\tline = \" * basis set functions are evaluated for the given shell which \" \n\tcodeprint.printLine(line,f)\n\tline = \" * is characterized by the lmax value.\" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param ng number of grid points \" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param pts grid point coordinates(3*ng) \" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param c basis set center coordinates(3) \" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\param lmax maximum L value of all shells on this atom \" \n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\return ang angular part of the basis set values(nCarBas,ng) \"\n\tcodeprint.printLine(line,f)\n\tline = \" * \\\\author Fenglai Liu and Jing Kong \" \n\tcodeprint.printLine(line,f)\n\tf.write(\" */\\n\")\n\n\t# including head files\n\tline = \"#include\\\"libgen.h\\\"\" \n\tcodeprint.printLine(line,f)\n\tline = \"#include\\\"batchbasis.h\\\"\" \n\tcodeprint.printLine(line,f)\n\tline = \"using namespace batchbasis;\" \n\tcodeprint.printLine(line,f)\n\tf.write(\"\\n\\n\")\n\n\t# print out the function name\n\tline = \"void BatchBasis::angDFTBasis(const UInt& ng, const UInt& lmax, const Double* pts, const Double* c, Double* ang)\" \n\tcodeprint.printLine(line,f)\n\n\t# here we enter in real code\n\tline = \"{\"\n\tcodeprint.printLine(line,f)\n\tcodeprint.increaseIndentation()\n\tf.write(\"\\n\")\n\n\t# now begin the loop over grids\n\t# for each grid point, we calculate all the\n\t# possible angular basis sets\n\tline = \"for(UInt i=0; i<ng; i++) {\" \n\tcodeprint.printLine(line,f)\n\tcodeprint.increaseIndentation()\n\tf.write(\"\\n\")\n\tline = \"Double GCX = pts[i*3 ] - c[0]; // X\" # x\n\tcodeprint.printLine(line,f)\n\tline = \"Double GCY = pts[i*3+1] - c[1]; // Y\" # y\n\tcodeprint.printLine(line,f)\n\tline = \"Double GCZ = pts[i*3+2] - c[2]; // Z\" # z\n\tcodeprint.printLine(line,f)\n\n # set the total basis set number\n\tf.write(\"\\n\")\n line = \"// this is to evaluate total number of basis sets, L from 0 to lmax\"\n\tcodeprint.printLine(line,f)\n\tline = \"UInt nTolBas = (lmax+1)*(lmax+2)*(lmax+3)/6; \" \n\tcodeprint.printLine(line,f)\n\n\t# loop over the angular momentums\n\tf.write(\"\\n\")\n\tline = \"for(UInt L=0; L<= lmax; L++) {\"\n\tcodeprint.printLine(line,f)\n\tcodeprint.increaseIndentation()\n\n\t# loop over all possible angular momentums\n\tfor L in range(maxL+1):\n\t\t symbol = shellsymbol.getShellSymbol(L)\n\t\t if L == 0:\n\t \t\tline = \"if(L == \" + str(L) + \") {\" \n \t\t else:\t\t\n line = \"} else if(L == \" + str(L) + \") {\" \n\t\t codeprint.printLine(line,f)\n\t\t codeprint.increaseIndentation()\n\t\t s = shell.shell(L)\n\t\t printCodeForShell(s,f)\n\t\t codeprint.decreaseIndentation()\n\n\tline = \"}\" # matching the if \n\tcodeprint.printLine(line,f)\n\n\t# end block of l = lmin to lmax\n\tcodeprint.decreaseIndentation() \n\tline = \"}\" # matching the for loop on L\n\tcodeprint.printLine(line,f)\n\n\tcodeprint.decreaseIndentation()\n\tline = \"}\" # matching the loop over grids\n\tcodeprint.printLine(line,f)\n\n\t# end of function block\n\tcodeprint.decreaseIndentation()\n\tline = \"}\" # matching the main body function\n\tcodeprint.printLine(line,f)\n\tf.write(\"\\n\\n\")\n\n\t# end of whole file\n\tf.close()\n\n\ndef printCodeForShell(s,f):\n\t\"\"\"\n\tprint out the code of shell section\n\t\"\"\"\n # consider S shell\n\tL = s.getL() \n if L == 0:\n code = \"ang[0+i*nTolBas]\" + \" = ONE;\" \n \tcodeprint.printLine(code,f)\n return\n\n\t# real work\n\tbasisList = s.getBasis()\n\toffset = L*(L+1)*(L+2)/6 # calculate the position of this shell \n\tpos = 0\n\tfor bas in basisList:\n\t\tl,m,n = bas.getlmn()\n\t\tposition = pos + offset\n\t\tcode = \"ang[\" + str(position) + \"+i*nTolBas\" + \"]\" + \" = \" # LHS\n\n\t\t# get RHS\n\t\tif l > 0:\n\t\t\tcodex = getXYZMultiplication(\"GCX\",l)\n\t\telse:\n\t\t\tcodex = \"\"\n\t\tif m > 0:\n\t\t\tcodey = getXYZMultiplication(\"GCY\",m)\n\t\telse:\n\t\t\tcodey = \"\"\n\t\tif n > 0:\n\t\t\tcodez = getXYZMultiplication(\"GCZ\",n)\n\t\telse:\n\t\t\tcodez = \"\"\n\n\t\t# real expression\n\t\tif m > 0 or n > 0:\n\t\t\tif l > 0:\n\t\t\t\tcodex += \"*\"\n\t\tif n > 0:\n\t\t\tif m > 0:\n\t\t\t\tcodey += \"*\"\n\t\tcode += codex + codey + codez + \";\"\n\t\tcodeprint.printLine(code,f)\n\t\tpos = pos + 1\n\n\ndef getXYZMultiplication(v,order):\n\t\"\"\"\n\there for each GCX, GCY or GCZ we multiply it up to order\n\tand return the string\n\t\"\"\"\n\tresult = \"\"\n\tif order == 1:\n\t\tresult = v\n\telif order > 1:\n\t\tresult = v\n\t\tfor i in range(order-1):\n\t\t\tresult = result + \"*\" + v\n\telse:\n\t\tprint \"Inproper order in getXYZMultiplication\"\n\t\tsys.exit()\n\treturn result\n\n" } ]
12
johnatasr/CoolPark
https://github.com/johnatasr/CoolPark
222c29dd81d7c6339e111df483cd6b40cde16c75
e22c751516e14ac45ca4726764813e2aeee72801
ba4515bf8a6f0045b7e8e52d7b83d07574653189
refs/heads/master
2023-03-26T12:58:17.591147
2021-03-23T00:08:56
2021-03-23T00:08:56
349,555,151
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5629228949546814, "alphanum_fraction": 0.5967523455619812, "avg_line_length": 23.633333206176758, "blob_id": "c6ce2989d9c851e5b9a20acf7b59ba5e87e277b0", "content_id": "35e3461656286033ba3c1f1b5715e8c05810fb1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 56, "num_lines": 30, "path": "/automobilies/tests/tests_models.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from automobilies.models import Automobilie\nfrom django.test import TestCase\n\n\nclass AutomobilieModelTest(TestCase):\n \"\"\"\n Tests of Automobilie in parking.models.py\n \"\"\"\n\n def setUp(self):\n Automobilie.objects.create(\n plate=\"ABC-1234\",\n )\n\n def test_create_auto(self):\n Automobilie.objects.create(\n plate=\"CCC-1234\",\n )\n park = Automobilie.objects.all()\n self.assertEquals(park.count(), 2)\n\n def test_update_auto(self):\n Automobilie.objects.create(\n plate=\"CCC-1234\",\n )\n auto = Automobilie.objects.get(plate=\"CCC-1234\")\n auto.plate = \"AAA-1111\"\n auto.save()\n\n self.assertEquals(auto.plate, \"AAA-1111\")\n" }, { "alpha_fraction": 0.551101565361023, "alphanum_fraction": 0.5970011949539185, "avg_line_length": 34.5217399597168, "blob_id": "51f6fc562dae7bf86c57d7e457c37203935471cd", "content_id": "4515f72ae21bc172b337185701f5308d7e959701", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3268, "license_type": "no_license", "max_line_length": 106, "num_lines": 92, "path": "/parking/tests/tests_entities.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from automobilies.entities import Automobilie\nfrom django.test import TestCase\nfrom datetime import datetime\nfrom parking.entities import ParkingOcurrency\n\n\nclass ParkingOcurrencyEnttityTestCase(TestCase):\n \"\"\"\n Tests of ParkingOcurrency in parking.entities.py\n \"\"\"\n\n def setUp(self):\n self.po_one = ParkingOcurrency(\n paid=False, left=False, auto=Automobilie(plate=\"ABC-1234\")\n )\n\n self.po_two = ParkingOcurrency(\n paid=True, left=True, auto=Automobilie(plate=\"CBA-4321\")\n )\n\n def test_isistance_object(self):\n self.assertIsInstance(self.po_one, object)\n self.assertIsInstance(self.po_two, object)\n\n def test_atributes_values_po(self):\n po1 = {\n \"id\": 1,\n \"paid\": False,\n \"left\": False,\n \"entry\": datetime(2021, 6, 10),\n \"exit\": datetime(2021, 6, 11),\n \"time\": \"24 hours\",\n \"auto\": Automobilie(plate=\"CBA-4321\"),\n }\n\n po2 = {\n \"id\": 2,\n \"paid\": False,\n \"left\": False,\n \"entry\": datetime(2021, 6, 10),\n \"exit\": datetime(2021, 6, 11),\n \"time\": \"24 hours\",\n \"auto\": Automobilie(plate=\"CBA-4321\"),\n }\n\n self.po_one.set_id(1)\n self.po_one.set_entry(datetime(2021, 6, 10))\n self.po_one.set_exit(datetime(2021, 6, 11))\n self.po_one.set_time(\"24 hours\")\n\n self.po_two.set_id(2)\n self.po_two.set_entry(datetime(2021, 6, 10))\n self.po_two.set_exit(datetime(2021, 6, 12))\n self.po_two.set_time(\"48 hours\")\n\n self.assertEquals(self.po_one.id, 1)\n self.assertEquals(self.po_one.id, po1[\"id\"])\n self.assertEquals(self.po_one.paid, False)\n self.assertEquals(self.po_one.paid, po1[\"paid\"])\n self.assertEquals(self.po_one.left, False)\n self.assertEquals(self.po_one.left, po1[\"left\"])\n self.assertEquals(self.po_one.entry, datetime(2021, 6, 10))\n self.assertEquals(self.po_one.entry, po1[\"entry\"])\n self.assertEquals(self.po_one.exit, datetime(2021, 6, 11))\n self.assertEquals(self.po_one.exit, po1[\"exit\"])\n self.assertEquals(self.po_one.time, \"24 hours\")\n self.assertEquals(self.po_one.time, po1[\"time\"])\n\n def test_atributes_type_po(self):\n self.po_one.set_id(1)\n self.po_one.set_entry(datetime(2021, 6, 10))\n self.po_one.set_exit(datetime(2021, 6, 11))\n self.po_one.set_time(\"24 hours\")\n\n self.assertIsInstance(self.po_one.id, int)\n self.assertIsInstance(self.po_one.paid, bool)\n self.assertIsInstance(self.po_one.left, bool)\n self.assertIsInstance(self.po_one.entry, object)\n self.assertIsInstance(self.po_one.exit, object)\n self.assertIsInstance(self.po_one.time, str)\n self.assertIsInstance(self.po_one.auto, object)\n\n def test_repr_class_po(self):\n\n repr: str = \"Entity: ParkingOcurrency<id:1, time:24 hours, paid:False, left:False, auto:ABC-1234>\"\n\n self.po_one.set_id(1)\n self.po_one.set_entry(datetime(2021, 6, 10))\n self.po_one.set_exit(datetime(2021, 6, 11))\n self.po_one.set_time(\"24 hours\")\n\n self.assertEquals(self.po_one.__str__(), repr)\n" }, { "alpha_fraction": 0.6900978088378906, "alphanum_fraction": 0.7114914655685425, "avg_line_length": 20.473684310913086, "blob_id": "8b112496f6bf6e5874b30a9b0a4f90d232076613", "content_id": "df26d590096ca594b116fde5ef212701cc7b5708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1663, "license_type": "no_license", "max_line_length": 167, "num_lines": 76, "path": "/readme.md", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "# CoolPark\n\nCoolpark é uma API de registros de estacionamento, onde é possível ser integrada de forma fácil com qualquer dispositivo via Rest\n\n\n## Requisitos\n\n* Docker\n* Python 3.7 >\n\n## Tecnologias\n\n* Django\n* Django RestFramework \n* Poetry\n* Black\n* Docker\n* Gitlab CI\n\n\n## Iniciando\n\nPassos para configurar o projeto com docker:\n\n1. `cd` na pasta do projeto\n2. `docker-compose up --build`\n\nCaso não deseje o uso de docker:\n1. `Inicie um virtual env`\n3. `pip install -r requirements.txt`\n2. `python manage.py runserver`\n\nO projeto por padrão estará em localhost:8000 \n\n## Como usar ? \n\n####Check-in\n\nPara fazer o check-in via api é necessário apenas a placa do veículo no payload da requisição. O endpoint responsável é: \n\n```\ncurl --request POST \\\n --url http://localhost:8000/parking \\\n --header 'Content-Type: application/json' \\\n --data '{\n\t\"plate\": \"ABC-1234\"\n}\n\t'\n```\n\n####Check-out\n\nPara fazer o check-out é necessário passar o ID do parking na url e ter feito o pagamento no endpoint de pagamento. O endpoint responsável é: \n\n```\ncurl --request PUT \\\n --url http://localhost:8000/parking/<ID>/out\n```\n\n####Do-Payment\n\nEste é o endpoint de pagamento, no caso, é passado o ID do parking na url e logo após processado pode ser feito o check-out. Abaixo o enpoint de pagamento:\n\n```\ncurl --request PUT \\\n --url http://localhost:8000/parking/<ID>/pay\n```\n\n####Parking-History\n\nPor esse endpoint é possível obter o histórico de registros de um determinado veículo pela placa. Será apenas necessário passar a placa na url como no exemplo abaixo: \n\n```\ncurl --request GET \\\n --url http://localhost:8000/parking/ABC-1234\n```\n\n\n\n\n" }, { "alpha_fraction": 0.6045627593994141, "alphanum_fraction": 0.6730037927627563, "avg_line_length": 21.869565963745117, "blob_id": "6da8074a3a03951b2a4a05ac4387bebdb43b01ab", "content_id": "146d4d6c38b488a7ef1c784065d557753c61c790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 526, "license_type": "no_license", "max_line_length": 89, "num_lines": 23, "path": "/pyproject.toml", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"coolpark\"\nversion = \"0.1.0\"\ndescription = \"A application developed to made easy the parking management by Django API\"\nauthors = [\"Johnatas Rabelo\"]\nlicense = \"MIT\"\n\n[tool.poetry.dependencies]\npython = \"^3.7\"\nDjango = \"^3.1.7\"\ndjangorestframework = \"^3.12.2\"\npydantic = \"^1.8.1\"\npython-dateutil = \"2.8.1\"\nblack = \"^20.8b1\"\npsycopg2 = \"^2.8.6\"\ngunicorn = \"^20.0.4\"\ndjango-environ = \"^0.4.5\"\n\n[tool.poetry.dev-dependencies]\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n" }, { "alpha_fraction": 0.5436619520187378, "alphanum_fraction": 0.5436619520187378, "avg_line_length": 25.625, "blob_id": "f13cd32924f1f5d816e64460b882efd2540a9440", "content_id": "06b9da3efa380301a6437516f5d5d9cd3fb2fc21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 50, "num_lines": 40, "path": "/parking/serializers.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from .interfaces import ISerializer\n\n\nclass DefaultSerializer(ISerializer):\n def __init__(self, parking: object, msg: str):\n self.parking = parking\n self.msg = msg\n\n def mount_payload(self):\n payload: dict = {\n \"id\": self.parking.id,\n \"msg\": self.msg,\n \"plate\": self.parking.auto.plate,\n }\n return payload\n\n def create_message(self):\n message = self.mount_payload()\n return message\n\n\nclass HistoricSerializer(ISerializer):\n def __init__(self, historic: object):\n self.historic = historic\n\n def mount_payload(self):\n list_historic: list = []\n for parking in self.historic.iterator():\n histo = {\n \"id\": parking[\"id\"],\n \"time\": parking[\"time\"],\n \"paid\": parking[\"paid\"],\n \"left\": parking[\"left\"],\n }\n list_historic.append(histo)\n return list_historic\n\n def create_message(self):\n message: dict = self.mount_payload()\n return message\n" }, { "alpha_fraction": 0.5603053569793701, "alphanum_fraction": 0.5664122104644775, "avg_line_length": 32.305084228515625, "blob_id": "84d5bfb94fc8a128bd48efaec421582e192336ff", "content_id": "63ffee116daafe009d5e8d6d73f70b8954c6d202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1965, "license_type": "no_license", "max_line_length": 86, "num_lines": 59, "path": "/parking/validators.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from configs.exceptions import InvalidPayloadException\nfrom .interfaces import IValidator\nfrom .helpers import ParkingHelpers\n\n\nclass ParkValidator(IValidator):\n\n DEFAULT_PLATE_MASK: str = \"[A-Z]{3}-[0-9]{4}\\Z\"\n\n def __init__(self):\n self.helper = ParkingHelpers()\n\n @staticmethod\n def validate(value: bool) -> bool:\n return value\n\n def is_empty_payload(self, payload) -> (bool, Exception):\n if isinstance(payload, (dict, object, list, bytes)):\n return True\n else:\n raise InvalidPayloadException(\n source=\"validator\",\n code=\"empty_payload\",\n message=\"Payload in request is empty\",\n )\n\n def validate_only_plate(self, plate: str) -> (bool, Exception):\n return self.helper.regex_validate(plate, self.DEFAULT_PLATE_MASK)\n\n def validate_payload(self, payload) -> list:\n self.is_empty_payload(payload)\n\n if \"plate\" not in payload:\n raise InvalidPayloadException(\n source=\"validator\",\n code=\"field_not_exists\",\n message=\"Field required: plate\",\n )\n if len(payload[\"plate\"]) < 8:\n raise InvalidPayloadException(\n source=\"validator\",\n code=\"field_size_not_allowed\",\n message=\"Field plate don't should be less than 8\",\n )\n if len(payload[\"plate\"]) > 8:\n raise InvalidPayloadException(\n source=\"validator\",\n code=\"field_size_not_allowed\",\n message=\"Field plate don't should be major than 8\",\n )\n\n if not self.validate_only_plate(payload[\"plate\"]):\n raise InvalidPayloadException(\n source=\"validator\",\n code=\"field_not_allowed\",\n message=\"Wrong format plate. Field should be in this format AAA-9999\",\n )\n\n return self.validate(True)\n" }, { "alpha_fraction": 0.6604477763175964, "alphanum_fraction": 0.6809701323509216, "avg_line_length": 33.58064651489258, "blob_id": "781f4113564ec44f4607927a061f15f140f18248", "content_id": "4b9b12a6ca721a3f8fca9e1581036bcf5778482c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 75, "num_lines": 31, "path": "/parking/tests/tests_factories.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom parking.factories import ParkFactory\nimport unittest\n\n\nclass ParkingFactoryTestCase(TestCase):\n \"\"\"\n Tests of ParkFactory in parking.factories.py\n \"\"\"\n\n def setUp(self):\n self.factory = ParkFactory\n\n def test_create_check_in_iterator(self):\n msg = self.factory.create_check_in_interator({\"plate\": \"ABC-1234\"})\n self.assertIsInstance(msg, dict)\n\n def test_create_check_out_iterator(self):\n self.factory.create_check_in_interator({\"plate\": \"ABC-1234\"})\n msg = self.factory.create_check_out_interator(1)\n self.assertIsInstance(msg, dict)\n\n def test_create_do_payment_iterator(self):\n self.factory.create_check_in_interator({\"plate\": \"ABC-1234\"})\n msg = self.factory.create_do_payment_interator(1)\n self.assertIsInstance(msg, dict)\n\n def test_create_historic_iterator(self):\n self.factory.create_check_in_interator({\"plate\": \"ABC-1234\"})\n msg = self.factory.create_historic_interator(\"ABC-1234\")\n self.assertIsInstance(msg, list)\n" }, { "alpha_fraction": 0.36380863189697266, "alphanum_fraction": 0.3742302358150482, "avg_line_length": 28.73239517211914, "blob_id": "25eebdbde054214e0fe2cdea101f549f65d52ff9", "content_id": "9389c4e0d03afb5a7109f34310156df8c61860d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2111, "license_type": "no_license", "max_line_length": 86, "num_lines": 71, "path": "/parking/migrations/0001_initial.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.7 on 2021-03-21 18:17\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n (\"automobilies\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"ParkingOcurrency\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"entry\", models.DateTimeField(auto_created=True)),\n (\n \"time\",\n models.CharField(\n blank=True,\n max_length=244,\n null=True,\n verbose_name=\"Time elapsed\",\n ),\n ),\n (\"exit\", models.DateTimeField()),\n (\"paid\", models.BooleanField(default=False)),\n (\"left\", models.BooleanField(default=False)),\n (\n \"auto\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.DO_NOTHING,\n to=\"automobilies.automobilie\",\n ),\n ),\n ],\n ),\n migrations.CreateModel(\n name=\"Park\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"parking_ocurrencies\",\n models.ManyToManyField(blank=True, to=\"parking.ParkingOcurrency\"),\n ),\n ],\n options={\n \"abstract\": False,\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5487805008888245, "alphanum_fraction": 0.5975610017776489, "avg_line_length": 21.77777862548828, "blob_id": "65c87f5a89530814d0b406703ecfd465567cd90a", "content_id": "ffddca8f199e68aeb8143909de18af928acfffc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/automobilies/migrations/0002_auto_20210322_1803.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.7 on 2021-03-22 21:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"automobilies\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"automobilie\",\n name=\"plate\",\n field=models.CharField(max_length=8, verbose_name=\"Auto Plate\"),\n ),\n ]\n" }, { "alpha_fraction": 0.5549618601799011, "alphanum_fraction": 0.5572519302368164, "avg_line_length": 24.6862735748291, "blob_id": "9981a547f4e0e4eba05838fbe8591f1ee246138a", "content_id": "3b3eb5903ffdb796cc5ecc8f55e6c235e09be35f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 78, "num_lines": 51, "path": "/parking/helpers.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from typing import Type\nfrom django.utils import timezone\nfrom dateutil import relativedelta\nfrom datetime import datetime\nimport re\n\n\nclass ParkingHelpers:\n @staticmethod\n def transform_date_checkout(\n start_date: Type[datetime], end_date: Type[datetime]\n ) -> str:\n \"\"\" \"\n Transform the range of dates in a string representation\n \"\"\"\n result = relativedelta.relativedelta(end_date, start_date)\n msg: str = \"\"\n\n if result.hours >= 1:\n hours = \"hours\"\n if result.hours == 1:\n hours = \"hour\"\n msg += f\"{result.hours} {hours}, \"\n\n if result.minutes > 1:\n minutes = \"minutes\"\n else:\n minutes = \"minute\"\n\n msg += f\"{result.minutes} {minutes}\"\n\n return msg\n\n @staticmethod\n def regex_validate(plate: str, default_mask: str) -> bool:\n \"\"\" \"\n Valid the plate using Regex\n \"\"\"\n regex = re.compile(default_mask)\n valid = regex.match(plate)\n if valid:\n return True\n else:\n raise Exception(\"Plate don't match with default plate validation\")\n\n @staticmethod\n def get_today() -> datetime:\n \"\"\" \"\n Get the time of current system\n \"\"\"\n return timezone.now()\n" }, { "alpha_fraction": 0.5610432624816895, "alphanum_fraction": 0.5610432624816895, "avg_line_length": 32.37036895751953, "blob_id": "b9c4b933de2d0427b24ee1ebc83ea9309ce5de09", "content_id": "a27e2ac0c1be21b5deb9de3fb9f0d4e23e0cdafa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5406, "license_type": "no_license", "max_line_length": 89, "num_lines": 162, "path": "/parking/iterators.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from configs.exceptions import InteratorException\nfrom .interfaces import IIterator\nfrom typing import Any\n\n\"\"\"\n In Iterators occour all iteration beetween repositories, serializers and validators\n adding the business rules in process\n\"\"\"\n\n\nclass CheckInIterator(IIterator):\n \"\"\" \"\n Interactor responsible for check-in, called by API\n \"\"\"\n\n def __init__(self, validator=None, repo=None, serializer=None):\n self.validator: object = validator\n self.repo: object = repo()\n self.serializer: object = serializer\n\n def set_params(self, park_payload: dict):\n self.payload = park_payload\n return self\n\n def execute(self):\n try:\n valided_payload = self.validator().validate_payload(self.payload)\n\n if valided_payload:\n plate: str = self.payload[\"plate\"]\n created_parking_ocurrency = self.repo.create_parking_ocurrency_by_plate(\n plate=plate\n )\n\n serialized_return = self.serializer(\n parking=created_parking_ocurrency, msg=\"Check-in created\"\n ).create_message()\n return serialized_return\n except InteratorException as error:\n raise InteratorException(error)\n\n\nclass CheckOutIterator(IIterator):\n \"\"\" \"\n Interactor responsible for check-out, called by API\n \"\"\"\n\n def __init__(self, validator=None, repo=None, serializer=None):\n self.validator: object = validator\n self.repo: object = repo()\n self.serializer: object = serializer\n\n def set_params(self, parking_id: Any):\n self.parking_id = parking_id\n return self\n\n def execute(self):\n try:\n if isinstance(self.parking_id, str):\n self.parking_id = int(self.parking_id)\n\n parking: object = self.repo.get_parking_ocurrency_by_id(id=self.parking_id)\n\n if parking.exists():\n parking: object = parking.first()\n if parking.paid:\n if parking.left:\n serialize = {\"msg\": \"Check-out already done\", \"id\": parking.id}\n return serialize\n parking_entity = self.repo.update_parking_ocurrency_checkout(\n parking\n )\n serialize = self.serializer(\n parking=parking_entity, msg=\"Check-out done\"\n ).create_message()\n return serialize\n else:\n serialize = {\n \"msg\": \"Cannot Check-out, payment not done\",\n \"id\": parking.id,\n }\n return serialize\n else:\n serialize = {\"msg\": f\"Parking not found with ID : {self.parking_id}\"}\n return serialize\n\n except InteratorException as error:\n raise InteratorException(error)\n\n\nclass DoPaymentIterator(IIterator):\n \"\"\"\n Interactor responsible for the payment API process\n \"\"\"\n\n def __init__(self, validator=None, repo=None, serializer=None):\n self.validator: object = validator\n self.repo: object = repo()\n self.serializer: object = serializer\n\n def set_params(self, parking_id: Any):\n self.parking_id = parking_id\n return self\n\n def execute(self):\n try:\n if isinstance(self.parking_id, str):\n self.parking_id = int(self.parking_id)\n\n parking = self.repo.get_parking_ocurrency_by_id(id=self.parking_id)\n\n if parking.exists():\n parking: object = parking.first()\n\n if parking.paid:\n serialize = {\"msg\": \"Payment already done\", \"id\": parking.id}\n return serialize\n\n parking_entity = self.repo.update_parking_ocurrency_pay(parking)\n serialize = self.serializer(\n parking=parking_entity, msg=\"Payment done\"\n ).create_message()\n return serialize\n else:\n serialize = {\"msg\": f\"Parking not found with ID : {self.parking_id}\"}\n return serialize\n\n except InteratorException as error:\n raise InteratorException(error)\n\n\nclass HistoricIterator(IIterator):\n \"\"\"\n Interactor responsible for the history of parking by the registered license plate\n \"\"\"\n\n def __init__(self, validator=None, repo=None, serializer=None):\n self.validator: object = validator\n self.repo: object = repo\n self.serializer: object = serializer\n\n def set_params(self, plate: str):\n self.plate = plate\n return self\n\n def execute(self):\n try:\n valided_plate = self.validator().validate_only_plate(self.plate)\n\n if valided_plate:\n historic = self.repo().get_historic_by_plate(self.plate)\n\n if historic.exists():\n historic_cached = historic\n serialize = self.serializer(historic_cached).create_message()\n return serialize\n else:\n serialize = {\"msg\": f\"Historic not found with plate : {self.plate}\"}\n return serialize\n\n except InteratorException as error:\n raise InteratorException(error)\n" }, { "alpha_fraction": 0.48991355299949646, "alphanum_fraction": 0.7002881765365601, "avg_line_length": 16.350000381469727, "blob_id": "991d6979f9f8302c9d692234a4482ce92b40bf7c", "content_id": "5840ac48a9269789b223187cf5a46ac3ddf9afaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 347, "license_type": "no_license", "max_line_length": 27, "num_lines": 20, "path": "/requirements.txt", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "appdirs==1.4.4\nasgiref==3.3.1\nblack==20.8b1\nclick==7.1.2\nDjango==3.1.7\ndjango-environ==0.4.5\ndjangorestframework==3.12.2\ngunicorn==20.0.4\nmypy-extensions==0.4.3\npathspec==0.8.1\npsycopg2==2.8.6\npydantic==1.8.1\npython-dateutil==2.8.1\npytz==2021.1\nregex==2021.3.17\nsix==1.15.0\nsqlparse==0.4.1\ntoml==0.10.2\ntyped-ast==1.4.2\ntyping-extensions==3.7.4.3\n" }, { "alpha_fraction": 0.6441047787666321, "alphanum_fraction": 0.663755476474762, "avg_line_length": 30.18181800842285, "blob_id": "77eb0a9139f0f9a56c7de422546062b0f45fac6b", "content_id": "2b098e6b4b70da672f746a4980516bb2a8a6154a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1374, "license_type": "no_license", "max_line_length": 67, "num_lines": 44, "path": "/parking/tests/tests_helpers.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom parking.helpers import ParkingHelpers\nfrom typing import Any, Type\nfrom datetime import datetime\nfrom django.utils import timezone\nimport unittest\n\n\nclass ParkingHelpersTestCase(TestCase):\n \"\"\"\n Tests of ParkingHelpers in parking.helpers.py\n \"\"\"\n\n def setUp(self):\n self.helpers = ParkingHelpers\n\n self.start_time: Type[datetime] = datetime(2021, 3, 22)\n self.end_time: Type[datetime] = datetime(2021, 3, 23)\n\n self.mask = \"[A-Z]{3}-[0-9]{4}\\Z\"\n\n def test_transform_date_checkout_type(self):\n details: str = self.helpers.transform_date_checkout(\n self.start_time, self.end_time\n )\n self.assertIsInstance(details, str)\n\n def test_transform_date_checkout_value(self):\n details: str = self.helpers.transform_date_checkout(\n self.start_time, self.end_time\n )\n self.assertEquals(details, \"0 minute\")\n\n def test_regex_validate_type(self):\n result = self.helpers.regex_validate(\"ABC-1234\", self.mask)\n self.assertIsInstance(result, bool)\n\n def test_regex_validate_value(self):\n result = self.helpers.regex_validate(\"ABC-1234\", self.mask)\n self.assertEquals(result, True)\n\n def test_get_today_type(self):\n result = self.helpers.get_today()\n self.assertIsInstance(result, datetime)\n\n\n" }, { "alpha_fraction": 0.629363477230072, "alphanum_fraction": 0.629363477230072, "avg_line_length": 30.419355392456055, "blob_id": "e407d6453d645ee4c6b6c61d256c1f8409e97824", "content_id": "779e6b6278af4771fc166ae2bfd8106917010696", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "no_license", "max_line_length": 78, "num_lines": 31, "path": "/automobilies/repositories.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from configs.exceptions import ConflictException\nfrom .entities import Automobilie\nfrom .models import Automobilie as AutomobilieModel\n\nfrom typing import Any\n\n\nclass AutomobiliesRepo:\n def create_auto_model(self, auto_entity):\n \"\"\"\n Save the model Automobilie with the Entity created previously\n \"\"\"\n automobilie = AutomobilieModel.objects.create(plate=auto_entity.plate)\n automobilie.save()\n return automobilie\n\n def create_auto(self, plate: str):\n \"\"\"\n Create a Entity Automobilie saving in database the model object\n \"\"\"\n try:\n entity = Automobilie(plate=plate)\n automobilie = self.create_auto_model(entity)\n\n return automobilie\n except ConflictException as err:\n raise ConflictException(\n source=\"repository\",\n code=\"conflit_in_create\",\n message=f\"Não possível skill, erro : {err}\",\n )\n" }, { "alpha_fraction": 0.4847605228424072, "alphanum_fraction": 0.5297532677650452, "avg_line_length": 23.60714340209961, "blob_id": "f445dfc40a1c82ea261995cecaef1102ad2ff255", "content_id": "5647d01c0ab43ee2d2325105fb71d625c8fcc63e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/parking/migrations/0004_auto_20210322_1803.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.7 on 2021-03-22 21:03\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"parking\", \"0003_auto_20210321_1530\"),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name=\"Park\",\n new_name=\"Parking\",\n ),\n migrations.AlterModelOptions(\n name=\"parking\",\n options={\"verbose_name\": \"Park\"},\n ),\n migrations.AlterModelOptions(\n name=\"parkingocurrency\",\n options={\n \"verbose_name\": \"Parking Ocurrery\",\n \"verbose_name_plural\": \"Parking Ocurrencies\",\n },\n ),\n ]\n" }, { "alpha_fraction": 0.56509929895401, "alphanum_fraction": 0.56509929895401, "avg_line_length": 32.8563232421875, "blob_id": "a37469ef54d4bf75079c488215d46a8c22311d59", "content_id": "a11b5e49c6ff6cd11555335498f61796233b08ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5891, "license_type": "no_license", "max_line_length": 91, "num_lines": 174, "path": "/parking/repository.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from configs.exceptions import ConflictException\nfrom automobilies.repositories import AutomobiliesRepo\nfrom .helpers import ParkingHelpers\nfrom .entities import (\n ParkingOcurrency as ParkingOcurrencyEntity,\n)\nfrom .models import ParkingOcurrency, Parking\n\nfrom typing import Any, Type\n\n\nclass ParkRepo:\n \"\"\"\n This layer is responsible for interacting with models and entities\n \"\"\"\n\n helper: Type[ParkingHelpers] = ParkingHelpers()\n\n def get_or_create_parking_base_model(self) -> Type[Parking]:\n \"\"\"\n Create a Parking model\n \"\"\"\n try:\n return Parking().load()\n except ConflictException as err:\n raise ConflictException(\n source=\"repository\",\n code=\"conflit_in_create\",\n message=f\"Error in load Park objects: {err}\",\n )\n\n def create_parking_ocurrency_model(\n self, parking_object: object\n ) -> Type[ParkingOcurrency]:\n \"\"\"\n Create a Parking Ocurrenvy model\n \"\"\"\n try:\n parking_created_model = ParkingOcurrency(\n paid=parking_object.paid,\n left=parking_object.left,\n auto=parking_object.auto,\n )\n parking_created_model.save()\n return parking_created_model\n except ConflictException as err:\n raise ConflictException(\n source=\"repository\",\n code=\"conflit_in_create\",\n message=f\"Error in create a ocurreny_model: {err}\",\n )\n\n def create_parking_ocurrency_entity(\n self, parking_ocurrency: Any, type_object=False\n ) -> Type[ParkingOcurrencyEntity]:\n \"\"\"\n Create a Parking Ocurrenvy object Entity\n \"\"\"\n try:\n return ParkingOcurrencyEntity(\n paid=parking_ocurrency.paid\n if type_object\n else parking_ocurrency[\"paid\"],\n left=parking_ocurrency.left\n if type_object\n else parking_ocurrency[\"left\"],\n auto=parking_ocurrency.auto\n if type_object\n else parking_ocurrency[\"auto\"],\n )\n\n except ConflictException as err:\n raise ConflictException(\n source=\"repository\",\n code=\"conflit_in_create\",\n message=f\"Error in create a ocurreny_entity: {err}\",\n )\n\n def create_parking_ocurrency_by_plate(\n self, plate: str\n ) -> Type[ParkingOcurrencyEntity]:\n \"\"\"\n Create a parking ocurrency by plate, this is the main method called by Check-in Api\n to create a new parking instance\n \"\"\"\n try:\n park: Type[Parking] = self.get_or_create_parking_base_model()\n automobilie = AutomobiliesRepo().create_auto(plate)\n parking_ocurrency_entity: Type[\n ParkingOcurrencyEntity\n ] = self.create_parking_ocurrency_entity(\n {\"paid\": False, \"left\": False, \"auto\": automobilie}, type_object=False\n )\n\n parking_ocurrency = self.create_parking_ocurrency_model(\n parking_ocurrency_entity\n )\n park.parking_ocurrencies.add(parking_ocurrency)\n park.save()\n\n parking_ocurrency_entity.set_id(parking_ocurrency.id)\n\n return parking_ocurrency_entity\n except ConflictException as err:\n raise ConflictException(\n source=\"repository\",\n code=\"conflit_in_create\",\n message=f\"Error in create a parking by plate : {err}\",\n )\n\n def get_parking_ocurrency_by_id(self, id: int) -> Type[ParkingOcurrency]:\n \"\"\" \"\n Search a parking by Parking Ocurrency model id\n \"\"\"\n return ParkingOcurrency.objects.filter(id=id)\n\n def get_historic_by_plate(self, plate: str) -> Type[ParkingOcurrency]:\n \"\"\" \"\n Search a parking by Automobilie model plate\n \"\"\"\n return ParkingOcurrency.objects.filter(auto__plate=plate).values(\n \"id\", \"time\", \"paid\", \"left\", \"auto\"\n )\n\n def update_parking_ocurrency_checkout(\n self, parking: object\n ) -> Type[ParkingOcurrencyEntity]:\n \"\"\" \"\n Update the parking ocurrency called by Check-out iterator\n \"\"\"\n try:\n exit_datetime = self.helper.get_today()\n formated_exit_date = self.helper.transform_date_checkout(\n start_date=parking.entry, end_date=exit_datetime\n )\n\n parking.left = True\n parking.exit = exit_datetime\n parking.time = formated_exit_date\n parking.save()\n\n parking_entity = self.create_parking_ocurrency_entity(\n parking, type_object=True\n )\n parking_entity.set_id(parking.id)\n\n return parking_entity\n except ConflictException as err:\n raise ConflictException(\n source=\"repository\",\n code=\"conflit_in_update\",\n message=f\"Error in update checkout a ocurreny : {err}\",\n )\n\n def update_parking_ocurrency_pay(\n self, parking: object\n ) -> Type[ParkingOcurrencyEntity]:\n \"\"\" \"\n Update the parking ocurrency called by Do-Payment iterator\n \"\"\"\n try:\n parking.paid = True\n parking.save()\n parking_entity = self.create_parking_ocurrency_entity(\n parking, type_object=True\n )\n parking_entity.set_id(parking.id)\n return parking_entity\n except ConflictException as err:\n raise ConflictException(\n source=\"repository\",\n code=\"conflit_in_update\",\n message=f\"Error in update pay a ocurreny : {err}\",\n )\n" }, { "alpha_fraction": 0.6860986351966858, "alphanum_fraction": 0.6905829310417175, "avg_line_length": 23.77777862548828, "blob_id": "069287c34632f7447ab0d1f581e3302dc4fb3539", "content_id": "85e037b5f03fab9a1794098699969434d99444ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 81, "num_lines": 9, "path": "/automobilies/models.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\n# Create your models here.\nclass Automobilie(models.Model):\n plate = models.CharField(\"Auto Plate\", max_length=8, null=False, blank=False)\n\n def __str__(self):\n return self.plate\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 18.799999237060547, "blob_id": "1c0fdcb3e962efbe93c798baae88fe4c8439500e", "content_id": "8249377a73b61c4297611efcd2069173af1a98ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/automobilies/apps.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass AutomobiliesConfig(AppConfig):\n name = \"automobilies\"\n" }, { "alpha_fraction": 0.7585033774375916, "alphanum_fraction": 0.7585033774375916, "avg_line_length": 23.5, "blob_id": "e735eb2358bf9d93436d7d9068ecf301a4ec78c3", "content_id": "98fdc49b891fa672edb78554913ae354f723441e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 294, "license_type": "no_license", "max_line_length": 52, "num_lines": 12, "path": "/parking/urls.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .views import ParkViewSet\n\nrouter = DefaultRouter(trailing_slash=False)\nrouter.register(\"\", ParkViewSet, basename=\"parking\")\n\napp_name = \"parking\"\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n]\n" }, { "alpha_fraction": 0.5279383659362793, "alphanum_fraction": 0.5279383659362793, "avg_line_length": 20.625, "blob_id": "e5dc5b2d135e2f68e9136e3699a3b73e348d457f", "content_id": "cb812615b5975885f3a7566ddcd678f422309e9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 72, "num_lines": 24, "path": "/automobilies/entities.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "class Automobilie:\n def __init__(self, plate: str):\n self._id = None\n self._plate = plate\n\n def __repr__(self):\n return f\"Entity: Automobilie<id:{self.id}, plate:{self.plate}>\"\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n\n @property\n def id(self):\n return self._id\n\n def set_id(self, id: int):\n self._id = id\n\n @property\n def plate(self):\n return self._plate\n" }, { "alpha_fraction": 0.6140018701553345, "alphanum_fraction": 0.6329233646392822, "avg_line_length": 29.200000762939453, "blob_id": "c533e990a61db0272845dd7f6be203e9b61f0664", "content_id": "9f7d5b207cb48972e6db49886fa4e7fddafaeaff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 68, "num_lines": 35, "path": "/parking/tests/tests_models.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from parking.models import Parking, ParkingOcurrency\nfrom automobilies.models import Automobilie\nfrom django.test import TestCase\n\n\nclass POTest(TestCase):\n \"\"\"\n Tests of ParkingOcurrency in parking.models.py\n \"\"\"\n\n def test_create_po(self):\n auto = Automobilie.objects.create(plate=f\"ABC-1234\")\n parking = ParkingOcurrency.objects.create(\n paid=False, left=False, time=\"24 Hours\", auto=auto\n )\n parking.save()\n\n po = ParkingOcurrency.objects.filter(auto__plate=\"ABC-1234\")\n self.assertEquals(po.exists(), True)\n\n def test_update_po(self):\n auto = Automobilie.objects.create(plate=f\"ABC-1234\")\n parking = ParkingOcurrency.objects.create(\n paid=False, left=False, time=\"24 Hours\", auto=auto\n )\n parking.save()\n\n po = ParkingOcurrency.objects.filter(auto__plate=\"ABC-1234\")\n po = po.first()\n po.paid = True\n po.left = True\n po.save()\n\n self.assertEquals(po.paid, True)\n self.assertEquals(po.left, True)\n" }, { "alpha_fraction": 0.6593625545501709, "alphanum_fraction": 0.6832669377326965, "avg_line_length": 26.83333396911621, "blob_id": "803ef81c9868bf1d72d3a4a9caed00417586792e", "content_id": "34a1ae3f6a48607d84d0ebae937d0738d11b77b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 92, "num_lines": 36, "path": "/Dockerfile", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "FROM python:3.8-slim\n\nENV PYTHONUNBUFFERED 1 \\\n PIP_DEFAULT_TIMEOUT=100 \\\n PIP_DISABLE_PIP_VERSION_CHECK=1 \\\n PIP_NO_CACHE_DIR=1 \\\n POETRY_VERSION=1.1.5\n\nRUN groupadd user && useradd --create-home --home-dir /home/user -g user user\nWORKDIR /var/www/app\n\n# Install system dependencies\nRUN apt-get update && apt-get install gcc build-essential python3-psycopg2 libpq-dev -y && \\\n python3 -m pip install --no-cache-dir pip-tools\n\nRUN pip install psycopg2-binary\nRUN pip install poetry\nCOPY pyproject.toml poetry.lock ./\nRUN poetry install\nCOPY ./requirements.txt /var/www/app\nRUN pip install -r requirements.txt\n\n# Clean the house\nRUN apt-get purge libpq-dev -y && apt-get autoremove -y && \\\n rm /var/lib/apt/lists/* rm -rf /var/cache/apt/*\n\nCOPY . /var/www/app\n\nUSER user\n\nCMD [\"sh\",\"-c\", \\\n \"sleep 4s && \\\n python manage.py makemigrations && \\\n python manage.py migrate && \\\n python manage.py test && \\\n gunicorn configs.wsgi --log-file - -b 0.0.0.0:8000 --reload\"]\n\n\n" }, { "alpha_fraction": 0.6337653994560242, "alphanum_fraction": 0.6527085304260254, "avg_line_length": 40.79166793823242, "blob_id": "808e168b1c6f39cb0c81a4ea27c3a21af2212906", "content_id": "038f6d037db66d0d44d009ec32342957c1a4e94c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3009, "license_type": "no_license", "max_line_length": 78, "num_lines": 72, "path": "/parking/tests/tests_views_api.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from rest_framework import status\nfrom rest_framework.test import APITestCase\n\n\nclass ParkViewSetTests(APITestCase):\n \"\"\"\n Tests of ParkViewSet in parking.views.py\n \"\"\"\n\n def setUp(self):\n self.data = {\"plate\": \"ABC-1234\"}\n\n def test_check_in_with_data(self):\n response = self.client.post(\"/parking\", data=self.data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_check_in_wrong_data(self):\n data = {\"plate\": \"abc-1234\"}\n response = self.client.post(\"/parking\", data=data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_check_in_no_data(self):\n response = self.client.post(\"/parking\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_check_out_without_pay(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n response = self.client.put(\"/parking/1/out\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_check_out_with_pay(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n self.client.put(\"/parking/1/pay\")\n response = self.client.put(\"/parking/1/out\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_check_in_without_id(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n self.client.put(\"/parking/1/pay\")\n response = self.client.put(\"/parking/out\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_pay_with_id(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n response = self.client.put(\"/parking/1/pay\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_pay_without_id(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n response = self.client.put(\"/parking/pay\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_pay_already_paid(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n self.client.put(\"/parking/1/pay\")\n response = self.client.put(\"/parking/1/pay\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_parking_history_with_plate(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n self.client.put(\"/parking/1/pay\")\n self.client.put(\"/parking/1/out\")\n response = self.client.get(\"/parking/ABC-1234\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n\n def test_parking_history_without_plate(self):\n self.client.post(\"/parking\", data=self.data, format=\"json\")\n self.client.put(\"/parking/1/pay\")\n self.client.put(\"/parking/1/out\")\n response = self.client.get(\"/parking/\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n" }, { "alpha_fraction": 0.515539288520813, "alphanum_fraction": 0.5722120404243469, "avg_line_length": 22.782608032226562, "blob_id": "f40657b44c7aa504b32a5aed6358f283254714be", "content_id": "af5417f95a8b315209ed6e60bf0bd2979e7c5c9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 50, "num_lines": 23, "path": "/parking/migrations/0003_auto_20210321_1530.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.7 on 2021-03-21 18:30\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"parking\", \"0002_auto_20210321_1523\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"parkingocurrency\",\n name=\"entry\",\n field=models.DateTimeField(),\n ),\n migrations.AlterField(\n model_name=\"parkingocurrency\",\n name=\"exit\",\n field=models.DateTimeField(null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6930692791938782, "alphanum_fraction": 0.7128713130950928, "avg_line_length": 32.66666793823242, "blob_id": "39d459535487a6241623682983f0ff8ff651d627", "content_id": "f33c7c757b8e3b9a0db690b156c1c5e84e8f53d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 808, "license_type": "no_license", "max_line_length": 61, "num_lines": 24, "path": "/automobilies/tests/tests_repo.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from automobilies.models import Automobilie\nfrom automobilies.entities import Automobilie as AutoEntity\nfrom automobilies.repositories import AutomobiliesRepo\nfrom django.test import TestCase\n\n\nclass AutoRepoTestCase(TestCase):\n \"\"\"\n Tests of AutomobiliesRepo in automobilies.repository.py\n \"\"\"\n\n def setUp(self):\n self.repo = AutomobiliesRepo()\n\n def test_create_auto_model(self):\n auto_entity = AutoEntity(plate=\"ABC-1234\")\n auto = self.repo.create_auto_model(auto_entity)\n self.assertIsInstance(auto, object)\n self.assertEquals(auto.plate, \"ABC-1234\")\n\n def test_create_auto(self):\n auto_entity = self.repo.create_auto(plate=\"CBA-1234\")\n self.assertIsInstance(auto_entity, object)\n self.assertEquals(auto_entity.plate, \"CBA-1234\")\n" }, { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.7441860437393188, "avg_line_length": 16.200000762939453, "blob_id": "b4397a7de3cceb06803c04477ae3209dfb82c73c", "content_id": "e3e0b04ce6c479bfcb05c34c1c6ec76896f90f8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/parking/apps.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ParkConfig(AppConfig):\n name = \"parking\"\n" }, { "alpha_fraction": 0.3819628655910492, "alphanum_fraction": 0.4031830132007599, "avg_line_length": 23.322580337524414, "blob_id": "e09528f46f7839b6d8c6365c37f83b94b0fe2120", "content_id": "a94d2729b5b0db998c464e2dab6ee9d5330d37ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "no_license", "max_line_length": 83, "num_lines": 31, "path": "/automobilies/migrations/0001_initial.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.7 on 2021-03-21 18:17\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = []\n\n operations = [\n migrations.CreateModel(\n name=\"Automobilie\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"plate\",\n models.CharField(max_length=8, verbose_name=\"Placa Automóvel\"),\n ),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6633797287940979, "alphanum_fraction": 0.6756720542907715, "avg_line_length": 34.936893463134766, "blob_id": "03aa0d2d1c14e42ad501cb1d800199b94270888f", "content_id": "4ff20feca6c678fe2b0d9b03f00fc4a789905c5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7403, "license_type": "no_license", "max_line_length": 87, "num_lines": 206, "path": "/parking/tests/tests_interators.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom parking.iterators import (\n CheckInIterator,\n CheckOutIterator,\n DoPaymentIterator,\n HistoricIterator,\n)\nfrom parking.serializers import DefaultSerializer, HistoricSerializer\nfrom parking.validators import ParkValidator\nfrom parking.repository import ParkRepo\n\n\nclass CheckInIteratorTestCase(TestCase):\n \"\"\"\n Tests of CheckInIterator in parking.iterator.py\n \"\"\"\n\n def setUp(self):\n self.iterator = CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n )\n\n def test_init(self):\n self.assertIsInstance(self.iterator, object)\n self.assertIsInstance(self.iterator.validator, object)\n self.assertIsInstance(self.iterator.repo, object)\n self.assertIsInstance(self.iterator.serializer, object)\n\n def test_set_params(self):\n self.iterator.set_params(park_payload={\"plate\": \"ABC-1234\"})\n self.assertIsInstance(self.iterator.payload, dict)\n self.assertEquals(self.iterator.payload[\"plate\"], \"ABC-1234\")\n\n def test_execute(self):\n result = self.iterator.set_params(park_payload={\"plate\": \"ABC-1234\"}).execute()\n self.assertIsInstance(result, dict)\n self.assertEquals(result[\"msg\"], \"Check-in created\")\n self.assertEquals(result[\"plate\"], \"ABC-1234\")\n\n\nclass CheckOutIteratorTestCase(TestCase):\n \"\"\"\n Tests of CheckOutIterator in parking.iterator.py\n \"\"\"\n\n def setUp(self):\n self.iterator = CheckOutIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n )\n\n def test_init(self):\n self.assertIsInstance(self.iterator, object)\n self.assertIsInstance(self.iterator.validator, object)\n self.assertIsInstance(self.iterator.repo, object)\n self.assertIsInstance(self.iterator.serializer, object)\n\n def test_set_params(self):\n self.iterator.set_params(1)\n self.assertIsInstance(self.iterator.parking_id, int)\n self.assertEquals(self.iterator.parking_id, 1)\n\n self.iterator.set_params(\"1\")\n self.assertIsInstance(self.iterator.parking_id, str)\n self.assertEquals(self.iterator.parking_id, \"1\")\n\n def test_execute(self):\n check_in = CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params({\"plate\": \"ABC-1234\"}).execute()\n\n result = self.iterator.set_params(parking_id=6).execute()\n self.assertIsInstance(result, dict)\n\n def test_execute_payment_not_done(self):\n CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params({\"plate\": \"ABC-1234\"}).execute()\n\n result = self.iterator.set_params(1).execute()\n self.assertIsInstance(result[\"msg\"], str)\n\n def test_execute_already_done(self):\n CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params({\"plate\": \"ABC-1234\"}).execute()\n\n DoPaymentIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params(1).execute()\n\n CheckOutIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params(1).execute()\n\n result = self.iterator.set_params(parking_id=1).execute()\n self.assertIsInstance(result[\"msg\"], str)\n\n def test_execute_wrong_id(self):\n result = self.iterator.set_params(parking_id=48).execute()\n self.assertIsInstance(result[\"msg\"], str)\n\n\nclass DoPaymentIteratorTestCase(TestCase):\n \"\"\"\n Tests of DoPaymentIterator in parking.iterator.py\n \"\"\"\n\n def setUp(self):\n self.iterator = DoPaymentIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n )\n\n def test_init(self):\n self.assertIsInstance(self.iterator, object)\n self.assertIsInstance(self.iterator.validator, object)\n self.assertIsInstance(self.iterator.repo, object)\n self.assertIsInstance(self.iterator.serializer, object)\n\n def test_set_params(self):\n self.iterator.set_params(1)\n self.assertIsInstance(self.iterator.parking_id, int)\n self.assertEquals(self.iterator.parking_id, 1)\n\n self.iterator.set_params(\"1\")\n self.assertIsInstance(self.iterator.parking_id, str)\n self.assertEquals(self.iterator.parking_id, \"1\")\n\n def test_execute(self):\n CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params({\"plate\": \"ABC-1234\"}).execute()\n\n result = self.iterator.set_params(parking_id=1).execute()\n self.assertIsInstance(result, dict)\n\n def test_execute_payment_already(self):\n CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params({\"plate\": \"ABC-1234\"}).execute()\n\n DoPaymentIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params(1).execute()\n\n result = self.iterator.set_params(1).execute()\n self.assertIsInstance(result[\"msg\"], str)\n\n def test_execute_wrong_id(self):\n result = self.iterator.set_params(48).execute()\n self.assertEquals(result[\"msg\"], \"Parking not found with ID : 48\")\n\n\nclass HistoricIteratorTestCase(TestCase):\n \"\"\"\n Tests of HistoricIterator in parking.iterator.py\n \"\"\"\n\n def setUp(self):\n self.iterator = HistoricIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=HistoricSerializer\n )\n\n def test_init(self):\n self.assertIsInstance(self.iterator, object)\n self.assertIsInstance(self.iterator.validator, object)\n self.assertIsInstance(self.iterator.repo, object)\n self.assertIsInstance(self.iterator.serializer, object)\n\n def test_set_params(self):\n self.iterator.set_params(plate=\"ABC-1234\")\n self.assertIsInstance(self.iterator.plate, str)\n self.assertEquals(self.iterator.plate, \"ABC-1234\")\n\n def test_execute(self):\n CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params({\"plate\": \"ABC-1234\"}).execute()\n\n DoPaymentIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params(1).execute()\n\n CheckOutIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params(1).execute()\n\n CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params({\"plate\": \"ABC-1234\"}).execute()\n\n DoPaymentIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params(2).execute()\n\n CheckOutIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n ).set_params(2).execute()\n\n result = self.iterator.set_params(plate=\"ABC-1234\").execute()\n self.assertIsInstance(result, list)\n self.assertEquals(len(result), 2)\n\n\n def test_execute_not_fould(self):\n result = self.iterator.set_params(plate=\"ASQ-1234\").execute()\n self.assertEquals(result[\"msg\"], \"Historic not found with plate : ASQ-1234\")\n" }, { "alpha_fraction": 0.6651705503463745, "alphanum_fraction": 0.6795332431793213, "avg_line_length": 30.828571319580078, "blob_id": "de00ab049baeea4539f85cc97f293d8e692a2e25", "content_id": "f91746fc00f3b923b5372b8bf152d48317f02e80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 83, "num_lines": 35, "path": "/parking/tests/tests_validators.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from parking.validators import ParkValidator\nfrom configs.exceptions import InvalidPayloadException\nfrom django.test import TestCase\nimport unittest\n\n\nclass ParkValidatorTestCase(TestCase):\n \"\"\"\n Tests of ParkValidator in parking.validators.py\n \"\"\"\n\n def setUp(self):\n self.validator = ParkValidator()\n\n def test_type_regex(self):\n self.assertEquals(self.validator.DEFAULT_PLATE_MASK, \"[A-Z]{3}-[0-9]{4}\\Z\")\n\n def test_validate(self):\n self.assertEquals(self.validator.validate(True), True)\n self.assertEquals(self.validator.validate(False), False)\n\n def test_is_empty_payload(self):\n payload = {\"plate\": \"ABC-1234\"}\n result = self.validator.is_empty_payload(payload)\n self.assertEquals(result, True)\n\n def test_validate_only_plate(self):\n plate = \"ABC-1234\"\n result = self.validator.validate_only_plate(plate)\n self.assertEquals(result, True)\n\n def test_validate_payload(self):\n payload = {\"plate\": \"ABC-1234\"}\n result = self.validator.validate_payload(payload)\n self.assertEquals(result, True)\n" }, { "alpha_fraction": 0.6021881699562073, "alphanum_fraction": 0.6205689311027527, "avg_line_length": 35.85483932495117, "blob_id": "f097567b935b7bf24515b7aa9476557ad3f6263a", "content_id": "5cf4e5ab1837edf1ae5308fbb66892a2df79dfc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2285, "license_type": "no_license", "max_line_length": 87, "num_lines": 62, "path": "/parking/views.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "# Create your views here.\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST\nfrom rest_framework.response import Response\nfrom parking.factories import ParkFactory\n\n\n# Register your viewsets here.\nclass ParkViewSet(viewsets.GenericViewSet):\n \"\"\"\n API made using Django Rest Framework\n \"\"\"\n\n factory = ParkFactory()\n http_method_names = [\"get\", \"post\", \"put\"]\n\n @action(methods=[\"POST\"], detail=False, url_path=\"parking\")\n def check_in(self, request):\n \"\"\"\n Check-in enpoint\n \"\"\"\n try:\n check_in_result = self.factory.create_check_in_interator(data=request.data)\n return Response(check_in_result, status=HTTP_200_OK)\n except Exception as error:\n return Response({\"msg\": error.args[0]}, status=HTTP_400_BAD_REQUEST)\n\n @action(methods=[\"PUT\"], detail=False, url_path=\"parking/(?P<id>[0-9]+)/out\")\n def check_out(self, request, id):\n \"\"\"\n Enpoint to check-out\n \"\"\"\n try:\n check_out_result = self.factory.create_check_out_interator(id=id)\n return Response(check_out_result, status=HTTP_200_OK)\n except Exception as error:\n return Response({\"msg\": error.args[0]}, status=HTTP_400_BAD_REQUEST)\n\n @action(methods=[\"PUT\"], detail=False, url_path=\"parking/(?P<id>[0-9]+)/pay\")\n def do_payment(self, request, id):\n \"\"\"\n Enpoint for payments\n \"\"\"\n try:\n payment_result = self.factory.create_do_payment_interator(id=id)\n return Response(payment_result, status=HTTP_200_OK)\n except Exception as error:\n return Response({\"msg\": error.args[0]}, status=HTTP_400_BAD_REQUEST)\n\n @action(\n methods=[\"GET\"], detail=False, url_path=\"parking/(?P<plate>[A-Z]{3}-[0-9]{4})\"\n )\n def parking_history(self, request, plate: str):\n \"\"\"\n Enpoint of parking history\n \"\"\"\n try:\n history_result = self.factory.create_historic_interator(plate=plate)\n return Response(history_result, status=HTTP_200_OK)\n except Exception as error:\n return Response({\"msg\": error.args[0]}, status=HTTP_400_BAD_REQUEST)\n" }, { "alpha_fraction": 0.5716260075569153, "alphanum_fraction": 0.5716260075569153, "avg_line_length": 20.284313201904297, "blob_id": "1763e4c3758e0f33e30ce6db52749899a69fd07d", "content_id": "be0fd9fdd60728919abb673ee9c256e6b3fd87e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2171, "license_type": "no_license", "max_line_length": 134, "num_lines": 102, "path": "/parking/entities.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from typing import List, Type\nfrom datetime import datetime\nfrom automobilies.models import Automobilie\n\n\nclass ParkingOcurrency:\n \"\"\"\n Representation in Object of Model ParkingOcurrency\n \"\"\"\n\n def __init__(self, paid: bool, left: bool, auto: Type[Automobilie]):\n\n self._id = None\n self._time = None\n self._entry = None\n self._exit = None\n self._paid = paid\n self._left = left\n self._auto = auto\n\n def __repr__(self):\n return f\"Entity: ParkingOcurrency<id:{self.id}, time:{self.time}, paid:{self.paid}, left:{self.left}, auto:{self.auto.plate}>\"\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((\"id\", self.id, \"time\", self.time))\n\n @property\n def id(self):\n return self._id\n\n def set_id(self, id: int):\n self._id = id\n\n @property\n def time(self):\n return self._time\n\n def set_time(self, time_string: str):\n self._time = time_string\n\n @property\n def entry(self):\n return self._entry\n\n def set_entry(self, entry: Type[datetime]):\n self._entry = entry\n\n @property\n def exit(self):\n return self._exit\n\n def set_exit(self, exit: Type[datetime]):\n self._exit = exit\n\n @property\n def paid(self):\n return self._paid\n\n @property\n def left(self):\n return self._left\n\n @property\n def auto(self):\n return self._auto\n\n @property\n def auto_plate(self):\n return self._auto.plate\n\n\nclass Parking:\n \"\"\"\n Representation in Object of Model Parking\n \"\"\"\n\n def __init__(self, park_ocurrencies: List[ParkingOcurrency]):\n self._id = None\n self._park_ocurrencies = park_ocurrencies\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n\n @property\n def id(self):\n return self._id\n\n def set_id(self, id: int):\n self._id = id\n\n @property\n def park_ocurrencies(self):\n return self.park_ocurrencies\n" }, { "alpha_fraction": 0.637302577495575, "alphanum_fraction": 0.6403402090072632, "avg_line_length": 25.54838752746582, "blob_id": "b6a0cf0e7b106bc4db75c5eded4ab8fe288d72ae", "content_id": "c22bb3391aa40d2d683bb223a2b1a007f5bfaf21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1646, "license_type": "no_license", "max_line_length": 82, "num_lines": 62, "path": "/parking/models.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.core.cache import cache\nfrom django.utils import timezone\n\nfrom automobilies.models import Automobilie\n\n\n# Create your models here.\nclass ParkingOcurrency(models.Model):\n time = models.CharField(\"Time elapsed\", max_length=244, null=True, blank=True)\n entry = models.DateTimeField()\n exit = models.DateTimeField(null=True)\n paid = models.BooleanField(default=False)\n left = models.BooleanField(default=False)\n auto = models.ForeignKey(Automobilie, on_delete=models.DO_NOTHING)\n\n class Meta:\n verbose_name = \"Parking Ocurrery\"\n verbose_name_plural = \"Parking Ocurrencies\"\n\n def save(self, *args, **kwargs):\n self.entry = timezone.now()\n return super(ParkingOcurrency, self).save(*args, **kwargs)\n\n\nclass SingletonModel(models.Model):\n class Meta:\n abstract = True\n\n def set_cache(self):\n cache.set(self.__class__.__name__, self)\n\n def delete(self, *args, **kwargs):\n pass\n\n def save(self, *args, **kwargs):\n self.pk = 1\n super(SingletonModel, self).save(*args, **kwargs)\n\n self.set_cache()\n\n @classmethod\n def load(cls):\n if cache.get(cls.__name__) is None:\n obj, created = cls.objects.get_or_create(pk=1)\n if not created:\n obj.set_cache()\n return cache.get(cls.__name__)\n\n\nclass Parking(SingletonModel):\n \"\"\" \"\n Model base, representation of Parking\n \"\"\"\n\n parking_ocurrencies = models.ManyToManyField(ParkingOcurrency, blank=True)\n\n class Meta:\n verbose_name = \"Park\"\n\n def __str__(self):\n return str(self.id)\n" }, { "alpha_fraction": 0.6134556531906128, "alphanum_fraction": 0.6403669714927673, "avg_line_length": 31.700000762939453, "blob_id": "005106030dc83328e6d0114c80180328a63b93cc", "content_id": "5cdfac7bd493dd6d00bdd5c8a8e46bbf23cda372", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1635, "license_type": "no_license", "max_line_length": 64, "num_lines": 50, "path": "/automobilies/tests/tests_entities.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from automobilies.entities import Automobilie\nfrom django.test import TestCase\n\n\nclass AutomobilieEntityTestCase(TestCase):\n \"\"\"\n Tests of Automobilie in automobilies.entities.py\n \"\"\"\n\n def setUp(self):\n self.auto_one = Automobilie(plate=\"ABC-1234\")\n\n self.auto_two = Automobilie(plate=\"CBA-4321\")\n\n def test_isistance_object(self):\n self.assertIsInstance(self.auto_one, object)\n self.assertIsInstance(self.auto_two, object)\n\n def test_atributes_values_po(self):\n auto1 = {\"id\": 1, \"plate\": \"ABC-1234\"}\n\n auto2 = {\"id\": 2, \"plate\": \"CBA-4321\"}\n\n self.auto_one.set_id(1)\n self.auto_two.set_id(2)\n\n self.assertEquals(self.auto_one.id, 1)\n self.assertEquals(self.auto_one.id, auto1[\"id\"])\n self.assertEquals(self.auto_one.plate, \"ABC-1234\")\n self.assertEquals(self.auto_one.plate, auto1[\"plate\"])\n\n self.assertEquals(self.auto_two.id, 2)\n self.assertEquals(self.auto_two.id, auto2[\"id\"])\n self.assertEquals(self.auto_two.plate, \"CBA-4321\")\n self.assertEquals(self.auto_two.plate, auto2[\"plate\"])\n\n def test_atributes_type_po(self):\n self.auto_one.set_id(1)\n self.auto_two.set_id(2)\n\n self.assertIsInstance(self.auto_one.id, int)\n self.assertIsInstance(self.auto_one.plate, str)\n\n self.assertIsInstance(self.auto_two.id, int)\n self.assertIsInstance(self.auto_two.plate, str)\n\n def test_repr_class_po(self):\n self.auto_one.set_id(1)\n repr: str = \"Entity: Automobilie<id:1, plate:ABC-1234>\"\n self.assertEquals(self.auto_one.__str__(), repr)\n" }, { "alpha_fraction": 0.6614896059036255, "alphanum_fraction": 0.6727850437164307, "avg_line_length": 36.27631759643555, "blob_id": "0e32a67458147944979a1b298cfdda8b795abbed", "content_id": "d63e6c2caf3b262a8ed683b3b5b43c30c938118b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2833, "license_type": "no_license", "max_line_length": 88, "num_lines": 76, "path": "/parking/tests/tests_serializers.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from parking.entities import ParkingOcurrency\nfrom parking.models import ParkingOcurrency as ParkingOcurrencyModel\nfrom parking.serializers import DefaultSerializer, HistoricSerializer\nfrom automobilies.models import Automobilie\nfrom django.test import TestCase\n\n\nclass DefaultSerializerTestCase(TestCase):\n \"\"\"\n Tests of DefaultSerializer in parking.serializer.py\n \"\"\"\n\n def setUp(self):\n self.po = ParkingOcurrency(paid=False, left=False, auto=Automobilie(\"CBA-4321\"))\n self.po.set_id(1)\n\n self.serializer = DefaultSerializer\n\n def test_init(self):\n serializer = self.serializer(parking=self.po, msg=\"Init test\")\n self.assertIsInstance(serializer.parking, object)\n self.assertIsInstance(serializer.msg, str)\n\n def test_mount_payload(self):\n serializer = self.serializer(parking=self.po, msg=\"Mount test\")\n dict_message = serializer.mount_payload()\n self.assertIsInstance(dict_message, dict)\n self.assertEquals(dict_message[\"id\"], 1)\n self.assertEquals(dict_message[\"msg\"], \"Mount test\")\n\n def test_create_message(self):\n serializer = self.serializer(parking=self.po, msg=\"Create message test\")\n message = serializer.create_message()\n self.assertIsInstance(message, dict)\n\n\nclass HistoricSerializerTestCase(TestCase):\n \"\"\"\n Tests of HistoricSerializerTestCase in parking.serializer.py\n \"\"\"\n\n def setUp(self):\n self.serializer = HistoricSerializer\n\n auto = Automobilie.objects.create(plate=\"CBA-1234\")\n auto.save()\n\n for i in range(1, 4):\n park = ParkingOcurrencyModel.objects.create(\n paid=True, left=True, time=f\"3{i} minutes\", auto=auto\n )\n park.save()\n\n def test_init(self):\n results = ParkingOcurrencyModel.objects.filter(auto__plate=\"CBA-1234\")\n serializer = self.serializer(historic=results)\n self.assertIsInstance(serializer.historic, object)\n self.assertEquals(serializer.historic.count(), 3)\n\n def test_mount_payload(self):\n results = ParkingOcurrencyModel.objects.filter(auto__plate=\"CBA-1234\").values(\n \"id\", \"time\", \"paid\", \"left\"\n )\n serializer = self.serializer(historic=results)\n list_historic = serializer.mount_payload()\n self.assertIsInstance(list_historic, list)\n self.assertEquals(list_historic[0][\"time\"], \"31 minutes\")\n self.assertEquals(list_historic[1][\"time\"], \"32 minutes\")\n\n def test_create_message(self):\n results = ParkingOcurrencyModel.objects.filter(auto__plate=\"CBA-1234\").values(\n \"id\", \"time\", \"paid\", \"left\"\n )\n serializer = self.serializer(historic=results)\n list_historic = serializer.mount_payload()\n self.assertIsInstance(list_historic, list)\n" }, { "alpha_fraction": 0.6168437004089355, "alphanum_fraction": 0.6168437004089355, "avg_line_length": 26.45833396911621, "blob_id": "ef513600598b27db5f883f961a24ff2a66f1e903", "content_id": "d94ca9b6631d4581fa2b94786a67e368951db22a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 85, "num_lines": 48, "path": "/parking/factories.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from .repository import ParkRepo\nfrom .iterators import (\n CheckInIterator,\n CheckOutIterator,\n DoPaymentIterator,\n HistoricIterator,\n)\nfrom .validators import ParkValidator\nfrom .serializers import DefaultSerializer, HistoricSerializer\nfrom typing import Any\n\n\nclass ParkFactory:\n @staticmethod\n def create_check_in_interator(data: dict):\n return (\n CheckInIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=DefaultSerializer\n )\n .set_params(park_payload=data)\n .execute()\n )\n\n @staticmethod\n def create_check_out_interator(id: Any):\n return (\n CheckOutIterator(repo=ParkRepo, serializer=DefaultSerializer)\n .set_params(parking_id=id)\n .execute()\n )\n\n @staticmethod\n def create_do_payment_interator(id: Any):\n return (\n DoPaymentIterator(repo=ParkRepo, serializer=DefaultSerializer)\n .set_params(parking_id=id)\n .execute()\n )\n\n @staticmethod\n def create_historic_interator(plate: str):\n return (\n HistoricIterator(\n validator=ParkValidator, repo=ParkRepo, serializer=HistoricSerializer\n )\n .set_params(plate=plate)\n .execute()\n )\n" }, { "alpha_fraction": 0.665057897567749, "alphanum_fraction": 0.6779279112815857, "avg_line_length": 39.3636360168457, "blob_id": "82d585c1fcab9a7383fc1ac8480775a2b74662b3", "content_id": "bf20920736d2a6e8c7f48b86bebc9a908391842b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3108, "license_type": "no_license", "max_line_length": 83, "num_lines": 77, "path": "/parking/tests/tests_repo.py", "repo_name": "johnatasr/CoolPark", "src_encoding": "UTF-8", "text": "from parking.models import ParkingOcurrency as ParkingOcurrencyModel\nfrom automobilies.models import Automobilie\nfrom parking.entities import ParkingOcurrency\nfrom parking.repository import ParkRepo\nfrom django.test import TestCase\n\n\nclass ParkingRepoTestCase(TestCase):\n \"\"\"\n Tests of ParkRepo in parking.repository.py\n \"\"\"\n\n def setUp(self):\n self.auto = Automobilie.objects.create(plate=\"ABC-1234\")\n self.auto.save()\n\n self.po_model = ParkingOcurrencyModel.objects.create(\n paid=False, left=False, auto=self.auto\n )\n self.po_model.save()\n\n self.po = ParkingOcurrency(paid=False, left=False, auto=self.auto)\n self.repo = ParkRepo()\n\n def test_get_or_create_park_model(self):\n park: object = self.repo.get_or_create_parking_base_model()\n self.assertIsInstance(park, object)\n\n def test_create_parking_ocurrency_model(self):\n po = {\"paid\": True, \"left\": False, \"auto\": Automobilie(plate=\"ABC-1234\")}\n\n po_entity = self.repo.create_parking_ocurrency_entity(parking_ocurrency=po)\n self.assertIsInstance(po_entity, object)\n self.assertEquals(po_entity.paid, po[\"paid\"])\n self.assertEquals(po_entity.left, po[\"left\"])\n self.assertEquals(po_entity.auto_plate, po[\"auto\"].plate)\n\n def test_create_parking_ocurrency_by_plate(self):\n po_entity = self.repo.create_parking_ocurrency_by_plate(\"ABC-1234\")\n self.assertIsInstance(po_entity, object)\n self.assertEquals(po_entity.paid, False)\n self.assertEquals(po_entity.left, False)\n self.assertEquals(po_entity.auto_plate, \"ABC-1234\")\n\n def test_get_parking_ocurrency_by_id(self):\n po_searched = self.repo.get_parking_ocurrency_by_id(1)\n self.assertIsInstance(po_searched, object)\n\n def test_get_historic_by_plate(self):\n self.repo.create_parking_ocurrency_by_plate(\"CBA-4444\")\n self.repo.create_parking_ocurrency_by_plate(\"CBA-4444\")\n historic = self.repo.get_historic_by_plate(\"CBA-4444\")\n self.assertEquals(historic.count(), 2)\n self.assertEquals(historic[0][\"paid\"], False)\n self.assertEquals(historic[1][\"paid\"], False)\n\n def test_update_parking_ocurrency_checkout(self):\n po_model = ParkingOcurrencyModel.objects.create(\n paid=True, left=False, auto=self.auto\n )\n po_entity = self.repo.update_parking_ocurrency_checkout(parking=po_model)\n\n self.assertIsInstance(po_entity, object)\n self.assertEquals(po_entity.paid, True)\n self.assertEquals(po_entity.left, True)\n self.assertEquals(po_entity.auto_plate, \"ABC-1234\")\n\n def test_update_parking_ocurrency_pay(self):\n po_model = ParkingOcurrencyModel.objects.create(\n paid=False, left=False, auto=self.auto\n )\n\n po_entity = self.repo.update_parking_ocurrency_pay(parking=po_model)\n self.assertIsInstance(po_entity, object)\n self.assertEquals(po_entity.paid, True)\n self.assertEquals(po_entity.left, False)\n self.assertEquals(po_entity.auto_plate, \"ABC-1234\")\n" } ]
36
sweetie98/DUMMY-PERSONAL-ASSISTANT-WITH-PYTHON
https://github.com/sweetie98/DUMMY-PERSONAL-ASSISTANT-WITH-PYTHON
26bdd46b28015fb47b9ea36ef44c3e4892b18ca3
fb977f085f6df325e671ffbbb2e0edbcbd84b93f
144566e77b0207664d977451d262352dc6136bdb
refs/heads/master
2022-11-26T20:11:21.947463
2020-08-10T08:42:23
2020-08-10T08:42:23
286,418,435
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8087855577468872, "alphanum_fraction": 0.8165374398231506, "avg_line_length": 54.28571319580078, "blob_id": "ee392e005c5c3801da80ea8f3d13ea2cf396c6ff", "content_id": "cadd889591cc7ba5038b9a66ba52c0168fcb455f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 387, "license_type": "no_license", "max_line_length": 174, "num_lines": 7, "path": "/README.md", "repo_name": "sweetie98/DUMMY-PERSONAL-ASSISTANT-WITH-PYTHON", "src_encoding": "UTF-8", "text": "# DUMMY-PERSONAL-ASSISTANT-WITH-PYTHON\nA dummy personal assistant program for personal use. This program uses speech recognition to recognize your voice and responds based on answers of WolframAlpha and Wikipedia.\n\nModules used are-\n1) WolframAlpha and Wikipedia libraries to get answers for our question.\n2) speech recognition to get user audio input.\n3) Tkinter to display the answer.\n" }, { "alpha_fraction": 0.44489383697509766, "alphanum_fraction": 0.46258845925331116, "avg_line_length": 34, "blob_id": "b360643868406a7958a6817ecde6d27d861f9643", "content_id": "f751305ab250be437f77e9ccfaadbafebf2c1bce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1978, "license_type": "no_license", "max_line_length": 115, "num_lines": 55, "path": "/dummy_assistant.py", "repo_name": "sweetie98/DUMMY-PERSONAL-ASSISTANT-WITH-PYTHON", "src_encoding": "UTF-8", "text": "import wolframalpha\r\nimport wikipedia\r\nfrom tkinter import *\r\n#import tkinter_messagebox\r\nimport speech_recognition as sr\r\n\r\nwhile True:\r\n r=sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n r.adjust_for_ambient_noise(source, duration = 1)\r\n r.energy_threshold=140\r\n r.pause_threshold=0.5\r\n print(\"speak now...\")\r\n audio=r.listen(source)\r\n try:\r\n text=r.recognize_google(audio)\r\n print(text)\r\n if text=='stop':\r\n break\r\n else:\r\n window =Tk()\r\n #window.geometry(\"700x600\")\r\n try:\r\n app_id=\"HQVT8K-XXYAW3HETJ\"\r\n client=wolframalpha.Client(app_id)\r\n res = client.query(text)\r\n answer= next(res.results).text\r\n T=Text(window,bg='pink')\r\n T.pack(expand=True)\r\n T.insert(END,answer)\r\n window.after(8000,lambda:window.destroy())\r\n window.mainloop()\r\n except:\r\n answer=wikipedia.summary(text)\r\n T=Text(window,bg='pink')\r\n T.pack(expand=True)\r\n T.insert(END,answer)\r\n window.after(20000,lambda:window.destroy())\r\n window.mainloop()\r\n \r\n except:\r\n answer=\"oops!Didn't get you.Try again\"\r\n print(answer)\r\n \"\"\"\r\n try:\r\n app_id=\"HQVT8K-XXYAW3HETJ\"\r\n client=wolframalpha.Client(app_id)\r\n answer= next(res.results).text\r\n label1= label(window,justify='LEFT',compound='CENTER',padx=10,text=answer,font='times 15 bold')\r\n label1.pack()\r\n window.after(5000,lambda:window.destroy())\r\n window.mainloop()\r\n except:\r\n \"\"\"" } ]
2
kkastr/clean_empty_space
https://github.com/kkastr/clean_empty_space
eed95086d03c84a116a2000e1c0438e42e49d7e2
60c417182af6893adfbae5f44b1cf3e88a472eea
08e6b9fe94335a7ce024078545bd48f0c9f79e6a
refs/heads/master
2021-01-20T23:26:35.018646
2015-06-04T18:26:43
2015-06-04T18:26:43
36,887,438
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 24, "blob_id": "379ff36b5730cfaa7f059fb3c2887bf1efce5b28", "content_id": "0c9b60ffd67678da74ddf35d8f5f1e204cbd9006", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 50, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/README.md", "repo_name": "kkastr/clean_empty_space", "src_encoding": "UTF-8", "text": "# clean_empty_space\nCleans empty space from files\n" }, { "alpha_fraction": 0.7175140976905823, "alphanum_fraction": 0.7288135886192322, "avg_line_length": 15.090909004211426, "blob_id": "c9ebee566066a66797f9ef1daf624861a8da977a", "content_id": "015d7afa04e4a40e2681f70cbf8fd012a535f92a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/clean_empty_space.py", "repo_name": "kkastr/clean_empty_space", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom numpy import *\nimport fileinput\n\n\nfor line in fileinput.input(sys.argv[1:]):\n\tcleanline = line.strip()\n\tif cleanline:\n\t\tprint(cleanline)\n" } ]
2
ticotheps/twilio-programmable-voice
https://github.com/ticotheps/twilio-programmable-voice
5ebf2fb245b6d838c39f2eaf7ede742bebf974a2
5794e30f81121f914110aa08469192437a3ea0a4
395fa3d5495ea7c726643450409b26f009113d56
refs/heads/master
2022-06-18T01:31:08.938012
2020-05-07T07:04:48
2020-05-07T07:04:48
261,914,252
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.7246376872062683, "avg_line_length": 23.352941513061523, "blob_id": "8beeca8927d26945bca7d8beb8cf1da179cca462", "content_id": "12a3a19bec8bfca1d9abc03713d68c61cfc532ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/send_text.py", "repo_name": "ticotheps/twilio-programmable-voice", "src_encoding": "UTF-8", "text": "from twilio.rest import Client\nfrom decouple import config\n\nACCOUNT_SID = config(\"ACCOUNT_SID\")\nAUTH_TOKEN = config(\"AUTH_TOKEN\")\n\nMY_PHONE_NUMBER = config(\"MY_PHONE_NUMBER\")\nMY_TWILIO_NUMBER = config(\"MY_TWILIO_NUMBER\")\n\nclient = Client(ACCOUNT_SID, AUTH_TOKEN)\n\nmessage = client.messages.create(\n to=MY_PHONE_NUMBER, \n from_=MY_TWILIO_NUMBER,\n body=\"Hello from Tico's MacBook Pro!\")\n\nprint(message.sid)\n" }, { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 27, "blob_id": "0d306fe837c83610dce9659f87c464236bda3048", "content_id": "60ecf59f2cc22da49774e7a16f9b064e374d23ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 27, "num_lines": 1, "path": "/README.md", "repo_name": "ticotheps/twilio-programmable-voice", "src_encoding": "UTF-8", "text": "# twilio-programmable-voice\n" }, { "alpha_fraction": 0.4518272280693054, "alphanum_fraction": 0.6777408719062805, "avg_line_length": 14.8421049118042, "blob_id": "bf1bd6f41e3dbec151154f9d4426c679f4e17d6e", "content_id": "20496fd274fba6426bef564d13965a68cf5600d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 301, "license_type": "no_license", "max_line_length": 21, "num_lines": 19, "path": "/requirements.txt", "repo_name": "ticotheps/twilio-programmable-voice", "src_encoding": "UTF-8", "text": "certifi==2020.4.5.1\nchardet==3.0.4\nclick==7.1.2\nFlask==1.1.2\nidna==2.9\nitsdangerous==1.1.0\nJinja2==2.11.2\nMarkupSafe==1.1.1\npip-save==0.2.0\npip-tools==5.1.2\nPyJWT==1.7.1\npython-decouple==3.3\npython-dotenv==0.13.0\npytz==2020.1\nrequests==2.23.0\nsix==1.9.0\ntwilio==6.39.0\nurllib3==1.25.9\nWerkzeug==1.0.1\n" }, { "alpha_fraction": 0.7109004855155945, "alphanum_fraction": 0.7109004855155945, "avg_line_length": 22.5, "blob_id": "ba14f851b381faf6ae8fabf15fa9c9d5ba7f6b3d", "content_id": "239bd5123ab46c0cf0491ae3d1925d6f86d329df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/make_call.py", "repo_name": "ticotheps/twilio-programmable-voice", "src_encoding": "UTF-8", "text": "from twilio.rest import Client\nfrom decouple import config\n\nACCOUNT_SID = config(\"ACCOUNT_SID\")\nAUTH_TOKEN = config(\"AUTH_TOKEN\")\n\nMY_PHONE_NUMBER = config(\"MY_PHONE_NUMBER\")\nMY_TWILIO_NUMBER = config(\"MY_TWILIO_NUMBER\")\n\nclient = Client(ACCOUNT_SID, AUTH_TOKEN)\n\ncall = client.calls.create(\n twiml=\"<Response><Say>What's up, Bro?</Say></Response>\",\n to=MY_PHONE_NUMBER,\n from_=MY_TWILIO_NUMBER\n)\n\nprint(call.sid)" } ]
4
daratovstyga/web_http
https://github.com/daratovstyga/web_http
cbe6d4a91f1da4d5784d20b44ef88305715f7fea
4dc7ab1005523c2a770e1c57bcf8873fc4b25049
e3ca14b37779a3d65f5fd8f8abdf11e37a37c715
refs/heads/master
2023-03-30T21:24:37.010110
2021-04-05T20:41:43
2021-04-05T20:41:43
354,853,854
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6308139562606812, "alphanum_fraction": 0.6424418687820435, "avg_line_length": 26.520000457763672, "blob_id": "c5330bd299246945f16e652a4507e23713b3e3c6", "content_id": "e7439f13cef4e51cfe3ad33efeabccf5377cb18e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 94, "num_lines": 25, "path": "/search.py", "repo_name": "daratovstyga/web_http", "src_encoding": "UTF-8", "text": "import sys\nfrom io import BytesIO\nimport requests\nfrom PIL import Image\nimport get_ap\nfrom get_cord import get_cord\n\nll = get_cord()\norg = get_ap.get_ap(ll)[0]\n\nmap_params = {\n \"ll\": ll,\n \"l\": \"map\",\n \"pt\": ll + ',round' + '~' + ','.join(map(str, org[3])) + ',comma'\n}\n\nmap_api_server = \"http://static-maps.yandex.ru/1.x/\"\nresponse = requests.get(map_api_server, params=map_params)\nprint(f'Адрес: {org[2]}\\nНазвание: {org[1]}\\nВремя работы: {org[4]}\\nРасстояние: {org[0]} м.')\nif not response:\n print(\"Ошибка выполнения запроса:\")\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\nelse:\n Image.open(BytesIO(response.content)).show()\n" }, { "alpha_fraction": 0.5804816484451294, "alphanum_fraction": 0.6185044646263123, "avg_line_length": 36.619049072265625, "blob_id": "267f24c352228247d73736fd92e97b1589f58f3c", "content_id": "e036ddf741214b9943439e5a8bcfd015bd18393b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 826, "license_type": "no_license", "max_line_length": 99, "num_lines": 21, "path": "/get_cord.py", "repo_name": "daratovstyga/web_http", "src_encoding": "UTF-8", "text": "import sys\nimport requests\n\n\ndef get_cord():\n # toponym_to_find = \"Москва, ул. Ак. Королева, 12\"\n toponym_to_find = \" \".join(sys.argv[1:])\n geocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n geocoder_params = {\n \"apikey\": \"40d1649f-0493-4b70-98ba-98533de7710b\",\n \"geocode\": toponym_to_find,\n \"format\": \"json\"}\n response = requests.get(geocoder_api_server, params=geocoder_params)\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n else:\n json_response = response.json()\n toponym = json_response[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"]\n return ','.join(toponym['Point']['pos'].split())" }, { "alpha_fraction": 0.6502976417541504, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 34.3684196472168, "blob_id": "7dbb5051d7a00bd938ad99c5374d1fecf9645424", "content_id": "ce7fbe852977ae5211abd28977c52996193eeb0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "no_license", "max_line_length": 140, "num_lines": 19, "path": "/district.py", "repo_name": "daratovstyga/web_http", "src_encoding": "UTF-8", "text": "import sys\nimport requests\nfrom get_cord import get_cord\n\nll = get_cord()\ngeocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\ngeocoder_params = {\n \"apikey\": \"40d1649f-0493-4b70-98ba-98533de7710b\",\n \"geocode\": ll,\n \"kind\": 'district',\n \"format\": \"json\"}\nresponse = requests.get(geocoder_api_server, params=geocoder_params)\nif not response:\n print(\"Ошибка выполнения запроса:\")\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\nelse:\n json_response = response.json()\n print(json_response[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"][\"metaDataProperty\"][\"GeocoderMetaData\"][\"text\"])\n" }, { "alpha_fraction": 0.5758895874023438, "alphanum_fraction": 0.5918663740158081, "avg_line_length": 28.934782028198242, "blob_id": "a63be1aee88f4e9b738df664041caed219e09ff8", "content_id": "b2425c2cfd23dd9dfc2469487a92b8affd75501c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1454, "license_type": "no_license", "max_line_length": 78, "num_lines": 46, "path": "/guessing_game.py", "repo_name": "daratovstyga/web_http", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport random\nimport pygame\nimport requests\nfrom get_cord_new import get_cord\n\nmap_files = []\ncities = ['Москва', 'Курган', 'Санкт-Петербург', 'Владивосток', 'Калининград']\npos = [get_cord(i) for i in cities]\n\nfor i in range(len(pos)):\n map_params = {\"ll\": pos[i][0],\n \"spn\": pos[i][1],\n \"size\": '600,450',\n \"l\": random.choice([\"map\", \"sat\"])}\n map_api_server = \"http://static-maps.yandex.ru/1.x/\"\n response = requests.get(map_api_server, params=map_params)\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(response.url)\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n else:\n map_file = \"map.png\"\n with open(map_file, \"wb\") as file:\n file.write(response.content)\n map_files.append(pygame.image.load(map_file))\n os.remove(map_file)\n\ni = 0\nrandom.shuffle(map_files)\npygame.init()\nscreen = pygame.display.set_mode((600, 450))\nscreen.blit(map_files[i], (0, 0))\npygame.display.flip()\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n i = (i + 1) % len(map_files)\n screen.blit(map_files[i], (0, 0))\n pygame.display.flip()\npygame.quit()\n" }, { "alpha_fraction": 0.5250154137611389, "alphanum_fraction": 0.5663990378379822, "avg_line_length": 35.79545593261719, "blob_id": "e560b2402c542dd9c189338d04bed23ed89dbfe3", "content_id": "4f0a6f4fafaacb3c4f3ccedc62f61c8f677cf1aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1654, "license_type": "no_license", "max_line_length": 98, "num_lines": 44, "path": "/get_ap.py", "repo_name": "daratovstyga/web_http", "src_encoding": "UTF-8", "text": "import math\nimport sys\nimport requests\n\n\ndef dist(cord1, cord2):\n dx = abs(cord1[0] - cord2[0]) * 111 * 1000 * math.cos(math.radians((cord1[1] + cord2[1]) / 2))\n dy = abs(cord1[1] - cord2[1]) * 111 * 1000\n return int((dx * dx + dy * dy) ** 0.5)\n\n\ndef get_ap(ll, k=1):\n ll_float = list(map(float, ll.split(',')))\n search_api_server = \"https://search-maps.yandex.ru/v1/\"\n api_key = \"920e2579-8aef-445d-a34d-ed523688c844\"\n search_params = {\n \"apikey\": api_key,\n \"text\": \"аптека\",\n \"lang\": \"ru_RU\",\n \"ll\": ll,\n \"type\": \"biz\",\n \"results\": 100\n }\n response = requests.get(search_api_server, params=search_params)\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n else:\n json_response = response.json()\n list1 = []\n for organization in json_response[\"features\"]:\n org_name = organization[\"properties\"][\"CompanyMetaData\"][\"name\"]\n org_address = organization[\"properties\"][\"CompanyMetaData\"][\"address\"]\n org_point = organization[\"geometry\"][\"coordinates\"]\n if 'Hours' in organization['properties']['CompanyMetaData']:\n org_times = organization['properties']['CompanyMetaData']['Hours']['text']\n else:\n org_times = ''\n\n org_dist = dist(ll_float, org_point)\n list1.append((org_dist, org_name, org_address, org_point, org_times))\n\n return list(sorted(list1, key=lambda x: (x[0], x[1], x[2], x[3], x[4])))[:k]\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6174863576889038, "avg_line_length": 23.7297306060791, "blob_id": "bfa6c341260490c34cda916766bbbb75f3dce054", "content_id": "edb7347de7ce47768fda7bcbe3df2f4889308cc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 957, "license_type": "no_license", "max_line_length": 95, "num_lines": 37, "path": "/10ap.py", "repo_name": "daratovstyga/web_http", "src_encoding": "UTF-8", "text": "import sys\nfrom io import BytesIO\nimport requests\nfrom PIL import Image\nfrom get_cord import get_cord\nimport get_ap\n\nll = get_cord()\ninfo = get_ap.get_ap(ll, 10)\n\nrc = ',pm2gnm~'.join([','.join(map(str, org[3])) for org in info if org[4] == 'круглосуточно'])\nif rc:\n rc += ',pm2gnm~'\n\nrcn = ',pm2blm~'.join([','.join(map(str, org[3])) for org in info if org[4]])\nif rcn:\n rcn += ',pm2blm~'\n\nno_data = ',pm2grm~'.join([','.join(map(str, org[3])) for org in info if org[4] == ''])\nif no_data:\n no_data += ',pm2grm'\n\n\nmap_params = {\n \"ll\": ll,\n \"l\": \"map\",\n \"pt\": rc + rcn + no_data\n}\n\nmap_api_server = \"http://static-maps.yandex.ru/1.x/\"\nresponse = requests.get(map_api_server, params=map_params)\nif not response:\n print(\"Ошибка выполнения запроса:\")\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\nelse:\n Image.open(BytesIO(response.content)).show()\n" }, { "alpha_fraction": 0.5765853524208069, "alphanum_fraction": 0.6107317209243774, "avg_line_length": 35.60714340209961, "blob_id": "11bcf5f7f0d1844d51bdc6fb8a54f385773cb72e", "content_id": "f7dd659a6b8e3d01a2811d107142565189cb3b6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/get_cord_new.py", "repo_name": "daratovstyga/web_http", "src_encoding": "UTF-8", "text": "import sys\nimport requests\n\n\ndef optimal_spn(toponym):\n to = toponym[\"boundedBy\"][\"Envelope\"]\n lc = list(map(float, to[\"lowerCorner\"].split()))\n uc = list(map(float, to[\"upperCorner\"].split()))\n spn = ','.join([str(abs(uc[0] - lc[0]) / 50), str(abs(uc[1] - lc[1]) / 50)])\n return spn\n\n\ndef get_cord(toponym_to_find):\n geocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n geocoder_params = {\n \"apikey\": \"40d1649f-0493-4b70-98ba-98533de7710b\",\n \"geocode\": toponym_to_find,\n \"format\": \"json\"}\n response = requests.get(geocoder_api_server, params=geocoder_params)\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n else:\n json_response = response.json()\n toponym = json_response[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"]\n spn = optimal_spn(toponym)\n return (','.join(toponym['Point']['pos'].split()), spn)\n" } ]
7
AjaySoni98/blog-site
https://github.com/AjaySoni98/blog-site
6c74d1c35fa2ca7c649a38fda484d90eeb3690c4
a0bb6d4014e53f62890247414c9a4d00134b5a8e
4b6cea224e4efff95815e15d7d1bc6281e4c882c
refs/heads/master
2020-12-01T06:07:47.393840
2020-01-14T17:20:30
2020-01-14T17:20:30
229,704,557
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.594960629940033, "alphanum_fraction": 0.5962204933166504, "avg_line_length": 31.79787254333496, "blob_id": "26509d2dd77b106ad8d35147aafc636d9c68630a", "content_id": "15441bf82c1b8133bf50c42debf5bd0b2240ddf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3175, "license_type": "no_license", "max_line_length": 164, "num_lines": 94, "path": "/Blogging/blogApp.py", "repo_name": "AjaySoni98/blog-site", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, url_for, request, redirect\r\nfrom datetime import date\r\nfrom werkzeug.utils import secure_filename\r\nfrom flask_mysqldb import MySQL\r\nimport yaml\r\nimport os\r\nimport tempfile\r\n\r\nUPLOAD_FOLDER = 'static/images'\r\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\r\n\r\napp = Flask(__name__)\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n\r\ndb = yaml.load(open('db.yaml'))\r\napp.config['MYSQL_HOST'] = db['mysql_host']\r\napp.config['MYSQL_USER'] = db['mysql_user']\r\napp.config['MYSQL_PASSWORD'] = db['mysql_password']\r\napp.config['MYSQL_DB'] = db['mysql_db']\r\n\r\nmysql = MySQL(app)\r\n\r\[email protected](\"/\")\r\[email protected](\"/home\")\r\ndef parallax():\r\n return render_template('parallax.html')\r\n\r\[email protected]('/signup', methods=['POST'])\r\ndef signup():\r\n if request.method == 'POST':\r\n # Fetch form data\r\n userDetails = request.form\r\n name = userDetails['name']\r\n email = userDetails['email']\r\n username = userDetails['username']\r\n password = userDetails['password']\r\n confpassword = userDetails['confpassword']\r\n profilepic = userDetails['profilepuc']\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"INSERT INTO user_signup(name, email, username, password, profilepic) VALUES(%s, %s, %s, %s, %s)\",(name, email, username, password, profilepic))\r\n mysql.connection.commit()\r\n cur.close()\r\n return render_template('parallax.html')\r\n \r\n return render_template('parallax.html')\r\n\r\n\r\[email protected]('/login', methods=['GET', 'POST'])\r\ndef login():\r\n print(\"Me aalo\")\r\n if request.method == 'POST':\r\n userLoginDetails = request.form\r\n email = userLoginDetails['email']\r\n password = userLoginDetails['password']\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"SELECT * from user_signup where email='\" + email + \"' and password='\" + password + \"'\")\r\n cur.execute(\"SELECT * FROM blog\")\r\n data = cur.fetchall() \r\n mysql.connection.commit()\r\n cur.close()\r\n return render_template('login-home.html', data=data)\r\n \r\n\r\n else:\r\n return \"failed\"\r\n\r\[email protected](\"/login-home\")\r\ndef home():\r\n return render_template('login-home.html')\r\n\r\[email protected](\"/post\", methods=['POST'])\r\ndef post():\r\n if request.method == 'POST':\r\n post = request.form\r\n title = post['post-title']\r\n descp = post['post-desc']\r\n imag = request.files['post-image']\r\n if imag:\r\n imagname = secure_filename(imag.filename)\r\n nam = tempfile.NamedTemporaryFile()\r\n fname = nam.name[::-1]\r\n imagname = fname[0:10]\r\n imag.save(os.path.join(app.config['UPLOAD_FOLDER'], imagname))\r\n image = \"static/images/\"+imagname\r\n today = date.today()\r\n datee = today.strftime(\"%d %B, %Y\")\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"INSERT INTO blog(title, description, image, date) VALUES(%s, %s, %s, %s)\",(title, descp, image, datee))\r\n mysql.connection.commit()\r\n cur.close()\r\n return redirect(url_for('home'))\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)" }, { "alpha_fraction": 0.4508301317691803, "alphanum_fraction": 0.4597701132297516, "avg_line_length": 32.130435943603516, "blob_id": "397c88fe9efb1b6a2be9ce84f298dc81ed21884c", "content_id": "673800140db13271e8ad31f25bbeefd1283a62da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 783, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/Blogging/templates/login-home.html", "repo_name": "AjaySoni98/blog-site", "src_encoding": "UTF-8", "text": "{% extends 'navbar.html' %}\r\n{% block blogsfeed %}\r\n{% for item in data %}\r\n<div class=\"blog\" id=\"blur2\" style=\"margin-bottom: 40px;\">\r\n <div class=\"profilepic\"></div>\r\n <div class=\"blogcontent\">\r\n <div class=\"blogtitle\">\r\n <p class=\"blogtitlee\" align=\"justify\">{{ item[0] }}</p>\r\n <a class=\"uname\" href=\"#\">username</a>\r\n <div class=\"datetime\"><p>{{ item[3] }}</p></div>\r\n </div>\r\n <div class=\"ekdiv\">\r\n <div class=\"oimage\">\r\n <div class=\"bimage\"><img src=\"{{ item[2] }}\"></div>\r\n </div>\r\n <div class=\"otext\">\r\n <div class=\"btext\"><p align=\"justify\">{{ item[1] }}</p></div>\r\n </div>\r\n </div>\r\n </div>\r\n</div>\r\n{% endfor %}\r\n{% endblock %}" }, { "alpha_fraction": 0.593923807144165, "alphanum_fraction": 0.6589644551277161, "avg_line_length": 31.01369857788086, "blob_id": "f91a653ff46263ee4ed0a6610e1eb0fa14b9a508", "content_id": "49c2e2c33b7b5bf7fab87c8a06b2da4cefc7ed2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2337, "license_type": "no_license", "max_line_length": 234, "num_lines": 73, "path": "/Blogging/blogbase.sql", "repo_name": "AjaySoni98/blog-site", "src_encoding": "UTF-8", "text": "-- phpMyAdmin SQL Dump\n-- version 4.8.4\n-- https://www.phpmyadmin.net/\n--\n-- Host: 127.0.0.1:3306\n-- Generation Time: Jan 14, 2020 at 05:19 PM\n-- Server version: 5.7.24\n-- PHP Version: 7.2.14\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\nSET AUTOCOMMIT = 0;\nSTART TRANSACTION;\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8mb4 */;\n\n--\n-- Database: `blogbase`\n--\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `blog`\n--\n\nDROP TABLE IF EXISTS `blog`;\nCREATE TABLE IF NOT EXISTS `blog` (\n `title` varchar(120) NOT NULL,\n `description` varchar(60000) NOT NULL,\n `image` varchar(120) NOT NULL,\n `date` varchar(120) NOT NULL\n) ENGINE=MyISAM DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `blog`\n--\n\nINSERT INTO `blog` (`title`, `description`, `image`, `date`) VALUES\n('Baby Yodaaa!', 'Finally i have been able to do the shit i have been trying to do from such a long time. Yaaaaaaaaaaaaaaaaaaayyyyy!!!!!!!!!!!!!!!!!!\\r\\nAnd baby yoda is the Cutest!!!', 'static/images/y8hi3rlepm', '14 January, 2020'),\n('Milly Bobby Brown Everybody!', 'And this is now Milly Bobby Brown. She is the best.', 'static/images/iozb2rv6pm', '14 January, 2020'),\n('heyyyy', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadddddddddddddd ddddddddddddddd dddddddddddddd ddddddddddd', 'static/images/1_b_v1ippm', '14 January, 2020');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `user_signup`\n--\n\nDROP TABLE IF EXISTS `user_signup`;\nCREATE TABLE IF NOT EXISTS `user_signup` (\n `name` varchar(120) NOT NULL,\n `email` varchar(120) NOT NULL,\n `username` varchar(120) NOT NULL,\n `password` varchar(120) NOT NULL,\n `profilepic` longblob\n) ENGINE=MyISAM DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `user_signup`\n--\n\nINSERT INTO `user_signup` (`name`, `email`, `username`, `password`, `profilepic`) VALUES\n('Ajay', '[email protected]', 'ajay03', 'ajaysoni0312', 0x3132333534353436352e6a7067);\nCOMMIT;\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n" }, { "alpha_fraction": 0.6791132092475891, "alphanum_fraction": 0.691948652267456, "avg_line_length": 37.04545593261719, "blob_id": "b3bcc02189e7b9aa6039717a7ffbbc62bd6ca765", "content_id": "639a869b1dcef6ea2a9df0cec9db0f5a9c2969b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 857, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/Blogging/static/main.js", "repo_name": "AjaySoni98/blog-site", "src_encoding": "UTF-8", "text": "function loginFunc(){\r\n document.getElementById(\"login-form\").style.visibility = \"visible\";\r\n document.getElementById(\"signup-form\").style.visibility = \"hidden\";\r\n}\r\n\r\nfunction signupFunc(){\r\n document.getElementById(\"signup-form\").style.visibility = \"visible\";\r\n document.getElementById(\"login-form\").style.visibility = \"hidden\";\r\n}\r\n\r\nfunction newPost(){\r\n document.getElementById(\"post\").classList.toggle(\"postbox1\");\r\n document.getElementById(\"blur\").style.filter = \"blur(1px)\";\r\n document.getElementById(\"blur1\").style.filter = \"blur(1px)\";\r\n document.getElementById(\"blur2\").style.filter = \"blur(1px)\";\r\n}\r\n\r\nfunction deblur(){\r\n document.getElementById(\"blur\").style.filter = \"blur(0px)\";\r\n document.getElementById(\"blur1\").style.filter = \"blur(0px)\";\r\n document.getElementById(\"blur2\").style.filter = \"blur(0px)\";\r\n}" } ]
4
naomitr/Calendar
https://github.com/naomitr/Calendar
726fbd3f4beaafb3f41a182d1442d4511bd1ae29
ef67b827a700c8ce281c841afb3f517d92b0df70
e3ec0f5a6d4584b88589d46847df70731b947183
refs/heads/master
2020-06-17T23:08:21.245545
2019-07-09T22:46:31
2019-07-09T22:46:31
196,094,073
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6326530575752258, "alphanum_fraction": 0.7091836929321289, "avg_line_length": 30.66666603088379, "blob_id": "ed0d8b84a8a97012ef465c523373b75400be2f10", "content_id": "fd07fc97796984146474b737fce38fb6edf549ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 54, "num_lines": 6, "path": "/calendar.py", "repo_name": "naomitr/Calendar", "src_encoding": "UTF-8", "text": "import calendar\r\n# calendar.setfirstweekday(calendar.FRIDAY)\r\n# the_day = calendar.weekday(2002, 7, 19)\r\n# print(the_day)\r\ncalendar = calendar.calendar(2019, w=2, l=1, c=6, m=3)\r\nprint(calendar)\r\n" } ]
1
jbdatascience/MapReduce-Machine-Learning
https://github.com/jbdatascience/MapReduce-Machine-Learning
bb3d08f6bdeac42f098ac4b80848bb9600073a59
4981bdfb0c80ea998bc83cf94b83e4ae8814d64b
a352b8e18eedb95e24ce1c5a65db8aaf5b52c157
refs/heads/master
2020-03-26T01:45:44.363311
2015-06-07T01:14:38
2015-06-07T01:14:38
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.532974898815155, "alphanum_fraction": 0.5419355034828186, "avg_line_length": 32.42499923706055, "blob_id": "0dda52b12a27fcbdc42cbf939c7b7ebd15eeba40", "content_id": "65b578037a30db672cbe54eccee4819c4ddc33f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2790, "license_type": "no_license", "max_line_length": 81, "num_lines": 80, "path": "/Random Sample MapReduce/SimpleRandomSampleNoReplacementMR.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 7 16:01:20 2015\n\n@author: amazaspshaumyan\n\"\"\"\n\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom mrjob.protocol import RawValueProtocol, JSONProtocol\nimport random\nimport heapq\n\n\nclass SimpleRandomSampleNoReplacementMR(MRJob):\n ''' Simple Random Sampling without replacement for relatively small sample\n sizes. \n Do not use for large sample sizes that can not fit in memory (current code\n uses only one reducer)\n \n Each line in input data is assigned random priority then n lines with largest\n corresponding priorities are selected (where n is size of random sample)\n\n '''\n \n INPUT_PROTOCOL = RawValueProtocol\n \n INTERNAL_PROTOCOL = JSONProtocol\n \n OUTPUT_PROTOCOL = RawValueProtocol\n \n def __init__(self,*args,**kwargs):\n super(SimpleRandomSampleNoReplacementMR,self).__init__(*args, **kwargs)\n self.pq = [] \n \n def configure_options(self):\n super(SimpleRandomSampleNoReplacementMR,self).configure_options()\n self.add_passthrough_option(\"--sample-size\",\n type= int,\n help = \"number of elements in sample\")\n \n def load_options(self,args):\n super(SimpleRandomSampleNoReplacementMR,self).load_options(args)\n if self.options.sample_size is None:\n self.option_parser.error(\"You need to specify sample size\")\n else:\n self.n = self.options.sample_size\n \n def mapper_rs(self,_,line):\n r = random.randrange(1000000)\n if len(self.pq) < self.n:\n heapq.heappush(self.pq,(r,line))\n else:\n if self.pq[0][0] < r:\n heapq.heappushpop(self.pq,(r,line))\n \n def mapper_rs_final(self):\n yield 1, self.pq\n \n def reducer_rs(self,key,samples):\n pq_final = []\n for sample in samples:\n for element in sample:\n if len(pq_final) < self.n:\n pq_final.append(element)\n if len(pq_final)==self.n:\n heapq.heapify(pq_final)\n else:\n if pq_final[0][0] < element[0]:\n heapq.heappushpop(pq_final,element)\n for r,line in pq_final:\n yield None, line\n \n def steps(self):\n return [MRStep(mapper = self.mapper_rs,\n mapper_final = self.mapper_rs_final,\n reducer = self.reducer_rs)]\n \nif __name__==\"__main__\":\n SimpleRandomSampleNoReplacementMR.run()\n \n \n \n \n \n \n \n \n " }, { "alpha_fraction": 0.5345167517662048, "alphanum_fraction": 0.5388903021812439, "avg_line_length": 38.232322692871094, "blob_id": "977ae52dfa4b806fa0f8983f8c89932d68679fb2", "content_id": "10dd3d37caa555444070d5de91fafd7b8aea87cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11661, "license_type": "no_license", "max_line_length": 116, "num_lines": 297, "path": "/Gaussian Mixture Model MapReduce/InitialiseGaussianMixtures.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "'''\nInitialisation step for MapReduce implementation of GMM.\n\nUsing MapReduce paradigm samples data from large dataset, so that sample fits\ninto one machine, then run K-means algorithm on sampled datato find centroids \nand cluster allocation of points.\nCluster allocation of data points is used to get initial parameters for GMM \n(i.e. : mixing coefficients (pdf of latent variable), mean vectors and covariance\nmatrix for each cluster)\n'''\n\nfrom mrjob.protocol import RawValueProtocol,JSONProtocol, JSONValueProtocol\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nimport random\nimport heapq\nimport numpy as np\n\n\n\ndef extract_features(line):\n ''' extracts features from line of input'''\n data = line.strip().split(\",\")\n return [ float(e) for e in data[1:] ]\n\n\n######################### K-means ###########################################\n\n\nclass KmeansInitGMM(object):\n '''\n K-means algorihm for clustering.\n\n Parameters:\n -----------\n \n clusters - (int) number of expected clusters\n dim - (int) dimensionality of input\n epsilon - (float) convergence threshold for k-means\n iteration_limit - (int) maximum number of iteration, where each \n iteration consists of e_step and m_step\n data - (list) list of lists, where each inner list is \n single data point\n \n '''\n \n def __init__(self, clusters, dim, epsilon, iteration_limit, data):\n self.k = clusters\n self.data = [extract_features(line) for line in data]\n self.m = dim\n self.r = [0]*len(data) # vector of cluster assignments\n self.convergence_epsilon = epsilon\n self.iteration_limit = iteration_limit\n \n \n def loss(self):\n ''' \n Calculates loss function of K-means\n J = sum_n[ sum_k [r_n_k*||x_n-mu_k||^2]]]\n '''\n r = self.r\n mu = self.clusters\n J = sum([np.dot((np.array(x)-mu[r[i]]).T,np.array(x)-mu[r[i]]) for i,x in enumerate(self.data)])\n return J\n \n def initialise(self):\n ''' randomly choses points from list'''\n self.clusters = random.sample(self.data,self.k) \n \n def e_step(self):\n ''' E-step in K means algorithm, finds assignment of points to centroids'''\n for n,data_point in enumerate(self.data):\n min_cl = 0\n min_sq_dist = -1\n for i,cluster in enumerate(self.clusters):\n dist_sq = sum([ (data_point[i]-cluster[i])**2 for i in range(self.m)])\n if min_sq_dist==-1:\n min_sq_dist = dist_sq\n else:\n if dist_sq < min_sq_dist:\n min_sq_dist = dist_sq\n min_cl = i\n self.r[n] = min_cl\n\n \n def m_step(self):\n ''' M-step in K-means algorithm, finds centroids that minimise loss function'''\n self.clusters = [[0]*self.m for i in range(self.k)] # update clusters\n cluster_counts = [0]*self.k\n for i,x in enumerate(self.data):\n cluster_counts[self.r[i]]+=1\n self.clusters[self.r[i]] = [self.clusters[self.r[i]][j]+x[j] for j in range(self.m)]\n mean_vector = lambda x,n: [float(el)/n for el in x]\n self.clusters = [mean_vector(self.clusters[i],cluster_counts[i]) for i in range(self.k)] \n \n \n def run_k_means(self):\n ''' \n Runs single pass of k-means algorithm\n '''\n self.initialise() # initialise clusters\n next_loss = self.loss() # calculate loss function for initial clusters\n prev_loss = next_loss +2*self.convergence_epsilon\n iteration = 0\n losses = []\n while prev_loss - next_loss > self.convergence_epsilon and iteration < self.iteration_limit:\n self.e_step()\n self.m_step()\n prev_loss = next_loss\n losses.append(prev_loss)\n next_loss = self.loss()\n iteration+=1\n \n \n def run(self, reruns = 10):\n ''' \n Runs k-means several times and choosed and chooses parameters (mean vectors,\n point cluster allocation) from the k-means run with smallest value of \n loss function.\n \n (Since loss function is not convex,it is not guaranteed that parameters \n obtained from single k-means algorithm pass will give global minimum\n of k-means loss function)\n '''\n clusters = [[0]*self.m for i in range(self.k)]\n loss_before = -1\n r = self.r\n for i in range(reruns):\n self.run_k_means()\n loss_new = self.loss()\n if loss_before==-1:\n loss_before = loss_new\n clusters = [el[:] for el in self.clusters]\n r = self.r[:]\n else:\n if loss_new < loss_before:\n loss_before = loss_new\n clusters = [el[:] for el in self.clusters]\n r = self.r[:]\n \n self.final_r = r\n self.final_clusters = clusters\n \n \n def gmm_params(self):\n ''' \n Calculates initial parameters for GMM based on cluster allocation of\n points in best K-means\n '''\n total=0\n mixing = [0]*self.k\n covars = [np.zeros([self.m,self.m], dtype = np.float64) for i in range(self.k)]\n mu = [np.zeros(self.m, dtype = np.float64) for i in range(self.k)]\n for i,dp in enumerate(self.data):\n k = self.final_r[i] # cluster\n x = np.array(dp, dtype = np.float64)\n mixing[k]+=1\n total+=1\n mu[k]+=x\n covars[k]+=np.outer(x,x)\n mu = [mu[j]/p for j,p in enumerate(mixing)]\n covars = [1.0/mixing[j]*(covars[j] - mixing[j]*np.outer(mu[j],mu[j])) for j in range(self.k)]\n mixing = [float(p)/total for p in mixing]\n \n matrix_to_list = lambda x: [list(e) for e in x]\n mixing = mixing\n mu = matrix_to_list(mu)\n covariance = [matrix_to_list(e) for e in covars]\n return {\"mixing\":mixing,\"mu\":mu,\"covariance\":covariance}\n\n \n######## intialise parameters of Gaussian Mixture Model #####################\n\n\nclass InitialiseGaussianMixtureMR(MRJob):\n '''\n MapReduce class that initialises parameters of GMM.\n Each mapper assigns random priority to each line of input, chooses n (n = sample size)\n lines with lowest priority level and outputs it.\n Single reducer collects m (where m is number of mappers) lists of size n\n and choses n lines with smallest priority, these final n lines of input\n represent random sample of size n from data. Then k-means algorithm is used\n on sampled data to find parameters for initialising.\n \n Command Line Options:\n ---------------------\n \n --sample-size - sample size\n --clusters - number of clusters\n --dimensions - dimensionality of data\n --kmeans-convergence - convergence threshold for k-means convergence\n --iteration-limit - limit on number of iterations for k-means\n --kmeans-reruns - number of times to run k-means\n \n '''\n \n \n INPUT_PROTOCOL = RawValueProtocol\n \n INTERNAL_PROTOCOL = JSONProtocol\n \n OUTPUT_PROTOCOL = JSONValueProtocol\n \n def __init__(self,*args,**kwargs):\n super(InitialiseGaussianMixtureMR,self).__init__(*args, **kwargs)\n self.pq = [] \n \n def configure_options(self):\n super(InitialiseGaussianMixtureMR,self).configure_options()\n self.add_passthrough_option(\"--sample-size\",\n type= int,\n help = \"number of elements in sample\")\n self.add_passthrough_option(\"--clusters\",\n type = int,\n help = \"number of clusters\")\n self.add_passthrough_option(\"--dimensions\",\n type = int,\n help = \"dimensionality of input data\")\n self.add_passthrough_option(\"--kmeans-convergence\",\n type = float,\n default = 0.01,\n help = \"convergence parameter for K-means loss function\")\n self.add_passthrough_option(\"--iteration-limit\",\n type = int,\n default = 100,\n help = \"largest number of iterations that k-means algorithm is allowed\")\n self.add_passthrough_option(\"--kmeans-reruns\",\n type = int,\n default = 10,\n help = \"number of k-means reruns \")\n \n \n \n def load_options(self, args):\n super(InitialiseGaussianMixtureMR,self).load_options(args)\n # size of sample for k-means, that will initialise parameters of GMM\n if self.options.sample_size is None:\n self.option_parser.error(\"You need to specify sample size\")\n else:\n self.n = self.options.sample_size\n # number of cluters\n if self.options.clusters is None:\n self.option_parser.error(\"You need to specify number of clusters\")\n else:\n self.k = self.options.clusters\n # dimensionality\n if self.options.dimensions is None:\n self.option_parser.error(\"You need to specify dimensionality of data\")\n else:\n self.dim = self.options.dimensions\n \n \n def mapper_initialise_gmm(self,_,line):\n '''\n Randomly samples n lines of input (where n is sample_size option), by\n assigning random priority level and then choosing n lines of input \n with smallest priority level\n '''\n r = random.randrange(1000000)\n if len(self.pq) < self.n:\n heapq.heappush(self.pq,(r,line))\n else:\n if self.pq[0][0] < r:\n heapq.heappushpop(self.pq,(r,line))\n \n def mapper_initialise_gmm_final(self):\n yield 1, self.pq\n \n def reducer_kmeans_initialise_gmm(self,key,samples):\n '''\n Subsamples from mapper output and runs K-means algorithm on subsampled\n data to initialise parameters of GMM. \n '''\n pq_final = []\n for sample in samples:\n for element in sample:\n if len(pq_final) < self.n:\n pq_final.append(element)\n if len(pq_final)==self.n:\n heapq.heapify(pq_final)\n else:\n if pq_final[0][0] < element[0]:\n heapq.heappushpop(pq_final,element)\n lines = [line for r,line in pq_final]\n kmeans = KmeansInitGMM(self.k, self.dim, self.options.kmeans_convergence,self.options.iteration_limit,lines)\n kmeans.run(reruns = self.options.kmeans_reruns)\n params = kmeans.gmm_params()\n yield None, params\n \n \n def steps(self):\n return [MRStep(mapper = self.mapper_initialise_gmm,\n mapper_final = self.mapper_initialise_gmm_final,\n reducer = self.reducer_kmeans_initialise_gmm)]\n \nif __name__==\"__main__\":\n InitialiseGaussianMixtureMR.run()\n \n " }, { "alpha_fraction": 0.5640149116516113, "alphanum_fraction": 0.5654132962226868, "avg_line_length": 37.99390411376953, "blob_id": "cd41647f34c64a2d4e2e251350369ae15aaf6052", "content_id": "9458432fa50ab2c6f35658c9ee9a48af77f400da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6436, "license_type": "no_license", "max_line_length": 139, "num_lines": 164, "path": "/Gaussian Discriminant Analysis MapReduce/gda.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom mrjob.protocol import RawValueProtocol,JSONProtocol, JSONValueProtocol\nimport numpy as np\nimport json\n\n################### Helper function & classes #################################\n\n\ndef extract_features(line):\n ''' Extracts data from line of input '''\n data = line.strip.split(\",\")\n return data[1], data[2:]\n \ndef matrix_to_list(input_data):\n return [list(e) for e in input_data]\n \nclass DimensionalityMismatchError(Exception):\n ''' Error when dimensionalities do not match '''\n def __init__(self,expected,real):\n self.exp = expected\n self.real = real\n \n def __str__(self):\n error = \"Expected number of dimensions: \"+str(self.exp)+\" observed: \"+ str(self.real)\n return error\n \n \nclass TargetValueError(Exception):\n ''' Error for target values '''\n def __init__(self,observed):\n self.observed = observed\n \n def __str__(self):\n error = \"Observed value \"+str(self.e) + \" is not target value\"\n return error\n \n \n####################### MapReduce Job ########################################\n\n\nclass GaussianDiscriminantAnalysisMR(MRJob):\n '''\n Calculates parameters required for Linear Discriminant Analysis and \n Quadratic Discrminant Analysis. \n \n \n Command Line Options:\n ---------------------\n \n --feature-dimensions - dimensionality of features (dependent variables)\n --targets - list of all valid target values (json-encoded list)\n '''\n \n INPUT_PROTOCOL = RawValueProtocol\n \n INTERNAL_PROTOCOL = JSONProtocol\n \n OUTPUT_PROTCOL = JSONValueProtocol\n \n \n def __init__(self,*args,**kwargs):\n super(GaussianDiscriminantAnalysisMR,self).__init__(*args,**kwargs)\n self.k = len(self.targets)\n self.priors = [0]*self.k\n self.means = [np.zeros(self.dim) for i in range(self.k)]\n self.covariate = [np.zeros([self.dim,self.dim]) for i in range(self.k)]\n self.total = 0\n self.targets = json.loads(self.targest)\n self.target_set = set(self.targets)\n self.target_to_index = {}\n for i,target in enumerate(self.targets):\n self.target_to_index[target] = i\n \n \n def configure_options(self):\n super(GaussianDiscriminantAnalysisMR,self).configure_options()\n self.add_passthrough_option(\"--feature-dimensions\", \n type = int,\n help = \"dimensionality of features\")\n self.add_passthrough_option(\"--targets\",\n type = str,\n help = \"targets\")\n\n \n def load_options(self,args):\n super(GaussianDiscriminantAnalysisMR,self).load_options(args)\n if self.options.feature_dimension is None:\n self.option_parser.error(\"You must specify dimensionality of data\")\n else:\n self.dim = self.options.feature_dimension\n if self.options.targets is None:\n self.option_parser.error(\"You must specify targets\")\n else:\n self.targets = self.options.targets\n \n \n def mapper_gda(self,_,line):\n '''\n Calculates and summarise intermediate values for each mapper.\n (Intermediate values include number of observations in each class,\n total number of observations etc. )\n '''\n y,features = extract_features(line)\n n = len(features)\n x = np.array(features)\n index = self.target_to_index[y]\n # error if dimensionalities do not match\n if len(features) != self.dim: \n raise DimensionalityMismatchError(self.dim,n)\n # targets are not in set of targets defined\n if y not in self.target_set:\n raise TargetValueError(y)\n self.total+=1\n self.means[index] += x\n self.covariate[index] += np.outer(x,x)\n self.priors[index] += 1\n \n \n def mapper_final_gda(self):\n '''Outputs data summarised for each mapper to reducer'''\n yield 1,{ \"total\": self.total,\n \"class counts\": self.priors,\n \"means\": matrix_to_list(self.means),\n \"covariates\": [matrix_to_list(e) for e in self.covariate]}\n \n \n def reducer_gda_parameters(self,key, parameters):\n ''' Summarises intermediate values produced by each mapper to get final parameters '''\n all_parameters = {}\n # sum two lists (each list has length = number of classes)\n vec_sum = lambda x,y: [x[i]+y[i] for i in range(self.k)]\n # sum two list of lists\n list_of_vec_sum = lambda x,y: [vec_sum(x[i],y[i]) for i in range(self.k)]\n list_of_matrix_sum = lambda x,y: [list_of_vec_sum(x[i],y[i]) for i in range(self.k)]\n # summarise parameters produced by each mapper\n for parameter in parameters:\n if len(all_parameters)==0:\n all_parameters = parameters\n else:\n all_parameters[\"total\"]+=parameters[\"total\"]\n all_parameters[\"class counts\"] = vec_sum(parameter[\"class counts\"],all_parameters[\"class counts\"])\n all_parameters[\"means\"] = list_of_vec_sum(parameter[\"means\"],all_parameters[\"means\"])\n all_parameters[\"covariates\"] = list_of_matrix_sum(parameter[\"covariates\"],all_parameters[\"covariates\"])\n # calculate final parameters\n for i in range(self.k):\n all_parameters[\"means\"][i] = float(all_parameters[\"means\"][i])/all_parameters[\"class counts\"][i]\n mu = np.array(all_parameters[\"means\"][i])\n all_parameters[\"covariates\"][i] = np.array(all_parameters[\"covariates\"][i]) - all_parameters[\"class counts\"][i]*np.outer(mu,mu)\n all_parameters[\"covariates\"][i] = matrix_to_list(all_parameters[\"covariates\"][i])\n yield None, all_parameters\n \n \n def steps(self):\n return [MRStep(mapper = self.mapper_gda,\n mapper_final = self.mapper_final_gda,\n reducer = self.reducer_lda_parameters)]\n \n \nif __name__==\"__main__\":\n GaussianDiscriminantAnalysisMR.run()\n \n \n " }, { "alpha_fraction": 0.5428189635276794, "alphanum_fraction": 0.5568470358848572, "avg_line_length": 37.98958206176758, "blob_id": "c43ae785ec2a462777718da9113098e0ffbab3e8", "content_id": "a1ebe38fa94d7f7b0ec40bf0b549c998e1153cf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7485, "license_type": "no_license", "max_line_length": 125, "num_lines": 192, "path": "/Multivariate Descriptive Statistics/MultivariateDescriptiveStatistics.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom mrjob.protocol import RawValueProtocol, JSONProtocol, JSONValueProtocol\nimport numpy as np\n\n\n######################## Helper functions & classes ##########################\n\nclass DimensionalityMismatch(Exception):\n \n def __init__(self,expected,real):\n self.exp = expected\n self.real = real\n \n def __str__(self):\n error = \"Dimensionality mismatch. \"+\"Expected: \"+str(self.exp)+\" real: \"+ str(self.real)\n return error\n \n \ndef extract_relevant_features(l):\n '''\n Extracts quantitative features for which summary statistics should be calculated\n '''\n data = l.strip().split(\",\")\n return [float(e) for e in data[1:5]]\n \ndef kurtosis(p4,covariance,n):\n '''\n Calcultes unbiased kurtosis (see Joanes and Gill (1998)).\n \n \n Input:\n ------\n \n p4 - list of size m, where each entry is sum of fourth order feature.\n covariance - two-dimensional list of size m x m, which is outer\n product of input matrix with itself\n n - number of observations\n \n Output:\n -------\n - (float) kurtosis\n \n [where m is dimensionality of data]\n '''\n kurtosis_standard = [ (kurt/n)/((n-1)*covariance[i,i]/n)**2 -3 for i,kurt in enumerate(p4)]\n kurtosis_unbiased = [ (kurt*(n+1)+6)*(n-1)/(n-2)/(n-3) for kurt in kurtosis_standard]\n return kurtosis_unbiased\n\ndef skewed(p3,covariance,n):\n ''' \n Calcultes skeweness\n\n Input:\n ------\n \n p3 - list of size m, where each entry is sum of cubes of each feature.\n covariance - two-dimensional list of size m x m, which is outer\n product of input matrix with itself\n n - number of observations\n \n Output:\n -------\n - (float) kurtosis\n \n [where m is dimensionality of data]\n '''\n return [np.sqrt(n*(n-1))/(n-2)*((skew/n)/(((n-1)*covariance[i,i]/n)**1.5)) for i,skew in enumerate(p3)]\n \n########################## MapReduce Job ######################################\n\nclass MultivariateDescriptiveStatisticsMR(MRJob):\n ''' \n Calculates descriptive statistics for multivariate dataset.\n \n Following statistics are calculated:\n \n - Covariance Matrix \n - Skewness of each variable (measure of assymetry)\n - Kurtosis of each variable (measure of peakedness)\n - Minimum for each variable\n - Maximum for each variable\n - Mean for each variable\n \n Note: accuracy of results were compared on test results with corresponding\n functions in R (min,max,mean,cov,skewness[library(e1071)], kurtosis[library(e1071)])\n '''\n \n INPUT_PROTOCOL = RawValueProtocol\n \n INTERNAL_PROTOCOL = JSONProtocol\n \n OUTPUT_PROTOCOL = JSONValueProtocol\n \n \n def __init__(self, *args, **kwargs):\n super(MultivariateDescriptiveStatisticsMR,self).__init__(*args, **kwargs)\n d = self.dim\n self.n = 0\n self.max,self.min,self.mean = [0]*d,[0]*d,[0]*d\n self.third_order, self.fourth_order = [0]*d, [0]*d\n self.covariates = np.zeros([d,d], dtype = np.float64)\n \n \n def configure_options(self):\n super(MultivariateDescriptiveStatisticsMR,self).configure_options()\n self.add_passthrough_option(\"--dimensions\", type = int, \n help = \"Number of columns of data matrix\")\n \n def load_options(self,args):\n super(MultivariateDescriptiveStatisticsMR,self).load_options(args)\n if self.options.dimensions is None:\n self.option_parser.error(\"You need specify expected dimensionlity\")\n else:\n self.dim = self.options.dimensions\n\n\n def mapper_covar(self,_,line):\n # extract features that you want to analyse\n variables = MultivariateDescriptiveStatisticsMR.extract_relevant_features(line)\n assert(len(variables)==self.dim), \"input dimensionality mismatch\"\n self.n+=1\n self.max = [max(m, var) for var in variables for m in self.max]\n self.min = [min(m, var) for var in variables for m in self.min]\n self.mean = [s+var for var in variables for s in self.mean]\n self.third_order = [p+var**3 for var in variables for p in self.third_order]\n self.fourth_order = [p+var**4 for var in variables for p in self.fourth_order]\n self.covariates += np.outer(np.array(variables),np.array(variables))\n \n \n def mapper_covar_final(self):\n yield 1,(\"max\", self.max)\n yield 1,(\"min\", self.min)\n yield 1,(\"mean\", self.mean)\n yield 1,(\"observations\", self.n)\n yield 1,(\"third order\", self.third_order)\n yield 1,(\"fourth order\", self.fourth_order)\n yield 1,(\"covariates\", [list(row) for row in self.covariates])\n \n \n def reducer_summarise(self,key,values):\n m = self.dim\n p1,max_list,min_list = [0]*m,[0]*m,[0]*m\n p3, p4 = [0]*m,[0]*m\n covar_matr = np.zeros([m,m], dtype = np.float64)\n n = 0\n for val in values:\n if val[0]==\"max\":\n max_list = [max(max_list[i],var) for i,var in enumerate(val[1])]\n elif val[0]==\"min\":\n min_list = [min(min_list[i],var) for i,var in enumerate(val[1])]\n elif val[0]==\"mean\":\n p1 = [p1[i]+var for i,var in enumerate(val[1])]\n elif val[0]==\"observations\":\n n+=val[1]\n elif val[0]==\"third order\":\n p3 = [p3[i]+cube for i,cube in enumerate(val[1])]\n elif val[0]==\"fourth order\":\n p4 = [p4[i]+quad for i,quad in enumerate(val[1])]\n else:\n covar_matr+=np.array(val[1])\n # vector of means\n means = [float(mu)/n for mu in p1]\n # covariance matrix (biased but with lowest MSE)\n covariance = (covar_matr - np.outer(np.array(means),np.array(means))*n)/(n-1)\n # fourth moment: calculate sum((x_i-mean(x))^4) by decomposing it\n p4 = [p4[i]-4*means[i]*p3[i]+6*(means[i]**2)*(covar_matr[i,i])-4*p1[i]*(means[i]**3)+n*means[i]**4 for i in range(m)]\n # third moment: calculate sum((x_i-mean(x))^3) by decompsing it\n p3 = [p3[i]-3*means[i]*covar_matr[i,i]+3*(means[i]**2)*p1[i] - n*means[i]**3 for i in range(m)] \n kurtosis_unbiased = kurtosis(p4,covariance,n) # calculate kurtosis for each variable\n skewness = skewed(p3,covariance,n) # calculate skewness for each variable\n matrix_to_list = lambda x: [list(e) for e in x]\n covariance = matrix_to_list(covariance)\n summary_statistics = {\"mean\": means,\n \"max\": max_list,\n \"min\": min_list,\n \"covariance\": covariance,\n \"skewness\": skewness,\n \"kurtosis\": kurtosis_unbiased,\n \"observations\": n }\n yield None, summary_statistics\n \n \n def steps(self):\n return [MRStep(mapper = self.mapper_covar,\n mapper_final = self.mapper_covar_final,\n reducer = self.reducer_summarise)]\n \nif __name__==\"__main__\":\n MultivariateDescriptiveStatisticsMR.run()" }, { "alpha_fraction": 0.5285956859588623, "alphanum_fraction": 0.5379388332366943, "avg_line_length": 35.62105178833008, "blob_id": "c9093b485d6f3240d171ad31e0e76877d1cfd7ff", "content_id": "5ea57784cda1a2825ac9bdd76611c5dd0fa9a188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3532, "license_type": "no_license", "max_line_length": 79, "num_lines": 95, "path": "/Gaussian Discriminant Analysis MapReduce/gda_wrapper.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport GDA as gda\nimport numpy as np\nfrom boto.s3.connection import S3Connection\nimport json\nimport os\n\n# use if you did not set up this parameters in configuration file\nEMR_DEFAULT_PARAMS = [\"--ec2-core-instance-bid-price\", \"0.4\", \n \"--ec2-core-instance-type\" ,\"m1.small\",\n \"--num-ec2-core-instances\", \"1\", \n \"--ec2-task-instance-bid-price\", \"0.4\", \n \"--ec2-task-instance-type\", \"m1.small\", \n \"--num-ec2-task-instances\",\"1\"]\n\n# access and secret key\nACCESS_KEY = \"YOUR_ACCESS_KEY\"\nSECRET_KEY = \"YOUR_SECRET_KEY\"\n\n\n\nclass GaussianDiscriminantAnalysis(object):\n '''\n Performs Gaussian Discriminant Analysis for classification. Two approaches \n are available QDA (each class has its own covariance matrix) or LDA (\n covariance matrix is shared).\n \n '''\n \n def __init__(self,targets,dimensions, input_path, output_path, \n emr_local = \"local\", emr_defaults = True):\n self.targets = targets\n self.dimensions = dimensions\n self.input_path = input_path\n self.output_path = output_path\n self.emr_local = emr_local\n self.emr_defaults = emr_defaults\n self.params = {}\n \n def configure(self):\n '''\n Sets configuration parameters to run map reduce job for finding\n parameters of Discriminant Analysis\n '''\n configs = [\"--feature-dimensions\",str(self.dim),\n \"--targets\", json.loads(self.targets),\n \"-r\", self.emr_local,\n \"--output-dir\",self.output_path,\n \"--no-output\",self.input_path]\n configs_new = []\n if self.emr_defaults is True:\n configs_new.extend(EMR_DEFAULT_PARAMS)\n configs_new.extend(configs)\n # start job\n mrJobGDA = gda.GaussianDiscriminantAnalysisMR(configs_new)\n with mrJobGDA.make_runner() as runner:\n runner.run()\n\n def load_params(self):\n if self.emr_local == \"local\":\n self.params = self.local_load_params(self.output_path)\n else:\n self.params = self.s3_load_params(self.output_path)\n\n \n def s3_load_params(self,s3_path):\n ''' load parameters if they are on amazon s3'''\n path = s3_path.strip(\"s3://\").split(\"/\")\n mybucket = self.conn.get_bucket(path[0]) # connect to s3 bucket\n s3_file_keys = [f for f in mybucket.list(prefix = \"/\".join(path[1:]))]\n for s3key in s3_file_keys:\n if mybucket.lookup(s3key).size > 0:\n data = s3key.get_contents_as_string()\n params = json.loads(data)\n return params\n \n def local_load_params(self,local_path):\n ''' load paramters if they are on local machine'''\n current_dir = os.getcwd()\n os.chdir(local_path)\n for filename in os.listdir(os.getcwd()):\n if \"part-\" in filename:\n if os.path.getsize(filename) > 0:\n with open(filename,\"r\") as in_file:\n data = json.load(in_file)\n os.chdir(current_dir)\n return data\n \n def posterior_probs(self, method = ):\n ''' get class probability\n \n\n method - (str) can have two values either 'QDA' or 'LDA' \n '''\n \n \n \n \n " }, { "alpha_fraction": 0.8478260636329651, "alphanum_fraction": 0.8478260636329651, "avg_line_length": 29.66666603088379, "blob_id": "96f3b128aa8c22dda3735b15b65440b2ceee6490", "content_id": "10762f4220d39f184137ac9c883551d26105d534", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 61, "num_lines": 3, "path": "/README.md", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# MapReduce-Machine-Learning\n\nMap-Reduce implementation of some machine learning algorithms\n" }, { "alpha_fraction": 0.5235518217086792, "alphanum_fraction": 0.5305620431900024, "avg_line_length": 38.529998779296875, "blob_id": "64b03fd05d92563cda4945860e124405a5476f49", "content_id": "ca341863ca9c8fbb64805d1b741269dfc9a7fbf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8131, "license_type": "no_license", "max_line_length": 118, "num_lines": 200, "path": "/Gaussian Mixture Model MapReduce/gmm.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nGaussian Mixture Model on EMR\n\n\n\"\"\"\n\nimport InitialiseGaussianMixtures as gmm_init\nimport IterationGaussianMixtureMR as gmm_iterator\nimport numpy as np\nfrom boto.s3.connection import S3Connection\nimport json\nimport os\n\n# use if you did not set up this parameters in configuration file\nEMR_DEFAULT_PARAMS = [\"--ec2-core-instance-bid-price\", \"0.4\", \n \"--ec2-core-instance-type\" ,\"m1.small\",\n \"--num-ec2-core-instances\", \"1\", \n \"--ec2-task-instance-bid-price\", \"0.4\", \n \"--ec2-task-instance-type\", \"m1.small\", \n \"--num-ec2-task-instances\",\"1\"]\n\n# access and secret key\nACCESS_KEY = \"YOUR_ACCESS_KEY\"\nSECRET_KEY = \"YOUR_SECRET_KEY\"\n\n\n \ndef dist_tot(mu_before, mu_after):\n ''' calculates sum of distances between list of vectors '''\n diffs = [np.array(mu_before[i])-np.array(mu) for i,mu in enumerate(mu_after)]\n return sum([np.sqrt(np.dot(mu_diff.T,mu_diff)) for mu_diff in diffs])\n \n \n \nclass Runner(object):\n \n \"\"\"\n (i.e. sample and run K-means on sample to determine initial parameters\n )\n \"\"\"\n \n def __init__(self,d,k,init_eps,sample_size,init_iteration_limit,\n iteration_eps,em_iteration_limit, input_path, \n output_path,emr_local = \"local\", emr_defaults = False):\n self.dim = d # dimensionality of data\n self.clusters = k # number of expected clusters\n self.init_eps = init_eps # convergence threshold for K-means on initialisation step\n self.init_iteration_limit = init_iteration_limit # limit for iterations for K-means on initial step\n self.iteration_eps = iteration_eps # convergence threshold for EM parameter\n self.em_iteration_limit = em_iteration_limit # maximum number of iterations of EM algorithm\n self.input_path = input_path\n self.output_path = output_path\n self.sample_size = sample_size\n self.emr_defaults = emr_defaults\n assert emr_local=='emr' or emr_local=='local', \" 'emr_local' should be either 'emr' or 'local' \"\n self.emr_local = emr_local\n if self.emr_local == \"emr\":\n self.conn = S3Connection(aws_access_key_id = ACCESS_KEY,\n aws_secret_access_key = SECRET_KEY)\n \n \n \n ############### Initialisation of GMM parameters ##########################\n \n \n def config_and_run_init_step(self):\n ''' \n Sets configuration paramters to run initial step of GMM algorithm.\n By default job will run in 'local' mode\n '''\n # set configuration\n init_configs = [\"--dimensions\",str(self.dim),\n \"--sample-size\",str(self.sample_size),\n \"--clusters\",str(self.clusters),\n \"--iteration-limit\",str(self.init_iteration_limit),\n \"--kmeans-convergence\",str(self.init_eps),\n \"-r\", self.emr_local,\n \"--output-dir\",\"_\".join([self.output_path,\"0\"]),\n \"--no-output\",self.input_path]\n init_configs_new = []\n if self.emr_defaults is True:\n init_configs_new.extend(EMR_DEFAULT_PARAMS[:])\n init_configs_new.extend(init_configs)\n # start job \n mrJobInitStep = gmm_init.InitialiseGaussianMixtureMR(init_configs_new)\n with mrJobInitStep.make_runner() as runner:\n runner.run()\n \n \n ####################### Iterations of EM-algorithm ######################\n \n @staticmethod\n def delta_stop_iterate(old_params,new_params):\n '''\n \n '''\n mu_old = old_params[\"mu\"]\n mu_new = new_params[\"mu\"]\n delta = dist_tot(mu_new,mu_old)\n return delta\n \n \n \n def iterate_em(self):\n '''\n Performs em iterations until convergence\n '''\n delta = 10\n get_params = lambda p,i: self.load_params(\"_\".join([p,str(i)])) # get parameters from previous iter.\n old_params = get_params(self.output_path,0)\n iteration = 1\n while delta > self.iteration_eps and iteration < self.em_iteration_limit:\n self.config_and_run_iter_step(iteration, json.dumps(old_params))\n new_params = get_params(self.output_path,iteration)\n delta = self.delta_stop_iterate(old_params,new_params)\n iteration+=1\n old_params = new_params\n \n \n\n def config_and_run_iter_step(self,iteration, parameters):\n '''\n Configure parameters to run single iteration of EM algorithm \n (each iteration consists of E-step and M-step)\n '''\n iter_configs = [ \"--dimensions\",str(self.dim),\n \"--clusters\",str(self.clusters),\n \"--parameters\", parameters,\n \"-r\", self.emr_local,\n \"--output-dir\",\"_\".join([self.output_path,str(iteration)]),\n \"--no-output\",self.input_path ]\n iter_configs_new = []\n if self.emr_defaults is True:\n iter_configs_new.extend(EMR_DEFAULT_PARAMS)\n iter_configs_new.extend(iter_configs)\n # start job\n mrJobIterStep = gmm_iterator.IterationGaussianMixtureMR(iter_configs_new)\n with mrJobIterStep.make_runner() as runner:\n runner.run()\n \n \n def load_params(self,path):\n if self.emr_local == \"local\":\n return self.local_load_params(path)\n return self.s3_load_params(path)\n\n \n def s3_load_params(self,s3_path):\n ''' load parameters if they are on amazon s3'''\n path = s3_path.strip(\"s3://\").split(\"/\")\n mybucket = self.conn.get_bucket(path[0]) # connect to s3 bucket\n s3_file_keys = [f for f in mybucket.list(prefix = \"/\".join(path[1:]))]\n for s3key in s3_file_keys:\n if mybucket.lookup(s3key).size > 0:\n data = s3key.get_contents_as_string()\n params = json.loads(data)\n return params\n \n def local_load_params(self,local_path):\n ''' load paramters if they are on local machine'''\n current_dir = os.getcwd()\n os.chdir(local_path)\n for filename in os.listdir(os.getcwd()):\n if \"part-\" in filename:\n if os.path.getsize(filename) > 0:\n with open(filename,\"r\") as in_file:\n data = json.load(in_file)\n os.chdir(current_dir)\n return data\n \n def folder_cleanup(self):\n pass\n \n \n def main_run():\n pass\n \n \n \nif __name__==\"__main__\":\n d = 2\n k = 2\n init_eps = 0.01\n sample_size = 100\n init_iteration_limit = 20\n iteration_eps = 0.01\n em_iteration_limit = 10\n \n #input_path = \"/Users/amazaspshaumyan/Desktop/MapReduceAlgorithms/map_reduce/gmm_test_data.txt\"\n #output_path = \"/Users/amazaspshaumyan/Desktop/MapReduceAlgorithms/map_reduce/gmm_test_final_iteration\"\n output_path = \"s3://test-map-reduce-movielabs/expectation_maximization_clients/gmm_test_output_initial_test\"\n input_path = \"s3://test-map-reduce-movielabs/expectation_maximization_clients/gmm_test_data.txt\"\n emr_local = \"emr\"\n emr_defaults = True\n gmm_mr = Runner(d,k,init_eps,sample_size,init_iteration_limit,\n iteration_eps,em_iteration_limit, input_path, \n output_path,emr_local, emr_defaults)\n gmm_mr.config_and_run_init_step()\n gmm_mr.iterate_em()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n " }, { "alpha_fraction": 0.5059587359428406, "alphanum_fraction": 0.5102325677871704, "avg_line_length": 39.25581359863281, "blob_id": "764ba8257ed25161a28e1428d625ba58bf91a13d", "content_id": "25ed5ee2575768c996fa58273d655793d45662f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12167, "license_type": "no_license", "max_line_length": 116, "num_lines": 301, "path": "/RidgeRegression/RidgeRegressionMapReduce.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nfrom mrjob.job import MRJob\nfrom mrjob.protocol import RawValueProtocol,JSONProtocol,JSONValueProtocol\nfrom mrjob.step import MRStep\nimport heapq\nimport csv\nimport numpy as np\nimport random\n\n\n# ----------------------------- Helper Classes & Methods --------------------------------\n\ndef cholesky_solution_least_squares(part_one, part_two):\n '''Cholesky decomposition '''\n R = np.linalg.cholesky(part_one)\n z = np.linalg.solve(R,part_two)\n theta = np.linalg.solve(np.transpose(R),z)\n return theta\n \n \nclass PrioritySampler(object):\n \n def __init__(self,sample_size):\n self.sample_size = sample_size\n self.sample = []\n \n def process_observation(self,observation):\n if len(self.sample) < self.sample_size:\n self.sample.append(observation)\n if len(self.sample) == self.sample_size:\n heapq.heapify(self.sample_cv)\n else:\n if observation[0] > self.sample[0][0]:\n heapq.heapreplace(self.sample,observation)\n \n def process_observations(self,observations):\n for observation in observations:\n self.process_observation(observation)\n \n \n\nclass RidgeRegressionHoldOutCV(object):\n \n def __init__(self,lambdas, data):\n self.lambdas = lambdas\n self.data = data\n \n \n def run_ridge_regression(self, lambda_ridge , scaling = None):\n \n def scaler(x, column_scaler):\n m = np.shape(x)[1]\n for i in range(m):\n x[:,i] = column_scaler(x[:,i])\n return x\n \n X,Y = [],[]\n for observation in self.data:\n features , y = observation[1:]\n X.append(features)\n Y.append(y)\n X = np.array(X)\n Y = np.array(Y)\n if scaling == \"max-min\":\n X = scaler(X,lambda x: x/(np.max(x) - np.min(x)))\n elif scaling == \"z-score\":\n X = scaler(X,lambda x: (x - np.mean(x))/np.std(x))\n # scale y to account for bias term\n Y = Y - np.mean(Y)\n # in case of max-min and no scaling, we need to substract mean from features\n if scaling != \"z-score\":\n X = scaler(X, lambda x: x-np.mean(x))\n \n def cv(self, scaling = None):\n err = [ self.run_ridge_regression(lambda_ridge, scaling) for lambda_ridge in self.lambdas]\n lambda_best, err = min([ (self.lambdas[i],err[i]) for i in range(len(self.lambdas)) ], key = lambda t: t[1])\n return lambda_best\n \n \n \nclass DimensionMismatch(Exception):\n \n def __init__(self,expected,observed):\n self.exp = expected\n self.obs = observed\n \n def __str__(self):\n err = \"Expected number of observations: \"+self.exp+\" , observed: \"+self.obs\n return err\n\n\n\nclass RidgeRegression(MRJob):\n '''\n \n Input File:\n -----------\n \n Extract relevant features from input line by changing extract_variables\n method. You can add features for non-linear models (like x^2 or exp(x)).\n Current code assumes following input line format:\n \n input line = <>,<feature_1>,...,<feature_n>,<dependent variable>\n \n Options:\n -----------\n \n --dimension - (int) number of explanatory variables\n --scaling - (str) 'z-score' or 'max-min'\n --hold-out-sample-size - (int) size of hold out cross validation set \n --cv-lambdas - (str) name of file containing set of regularisation \n parameters for cross validation\n \n '''\n \n INPUT_PROTOCOL = RawValueProtocol\n \n INTERNAL_PROTOCOL = JSONProtocol\n \n OUTPUT_PROTCOL = JSONValueProtocol\n \n def __init__(self,*args,**kwargs):\n super(RidgeRegression,self).__init__(*args,**kwargs)\n if self.scaling==\"max-min\":\n self.max = [0]*self.dim\n self.min = [0]*self.dim\n self.mu = [0]*self.dim\n self.y_av = 0.0\n self.x_t_x = np.zeros([self.dim,self.dim], dtype = np.float)\n self.x_t_y = [0]*self.dim\n self.n = 0\n self.lambdas_cv = self.read_lambdas(self.options.cv_lambdas)\n self.sampler = Sampler(self.cv_size)\n \n #------------------------------------------- load & configure options ---------------------------------------#\n \n def configure_options(self):\n super(RidgeRegression,self).configure_options()\n self.add_passthrough_option(\"--dimension\",\n type = int,\n help = \"Number of explanatory variables\")\n self.add_passthrough_option(\"--hold-out-sample-size\",\n type = int,\n help = \"Size of sample for hold out cross validation\",\n default = 1000)\n self.add_passthrough_option(\"--scaling\",\n type = str,\n help = \"Can be 'z-score' or 'max-min' \")\n self.add_file_option(\"--cv-lambdas\",\n type = \"str\",\n help = \"Name of file that contains regularisation parameters for cross validation\")\n \n def load_options(self,args):\n super(RidgeRegression,self).load_options(args)\n # dimensionality\n if self.options.dimension is None:\n self.option_parser.error(\"You need to specify number of explanatory variables\")\n else:\n self.dim = self.options.dimension\n # set of lambdas for cross validation\n if self.options.cv_lambdas is None:\n self.option_parser.error(\"You need to specify name of file with set of regularisation parameters\")\n # sample size for hold out cross validation\n self.cv_size = self.options.hold_out_sample_size\n # scaling options\n if self.options.scaling not in [None,'z-score','max-min']:\n self.options_parser.error(\"You need to define proper scaling ('z-score' or 'max-min')\")\n \n \n #----------------------------------------- helper functions ----- --------------------------------------------#\n \n @staticmethod\n def extract_features(line):\n '''\n Extracts dependent variable and features from line of input\n '''\n data = line.strip().split(\",\")\n features = [float(e) for e in data[1:-1]]\n y = float(data[-1])\n return (y,features)\n \n \n @staticmethod\n def read_lambdas(filename):\n ''' reads regularisation parameters'''\n with open(filename,\"r\") as csvfile:\n lambdas = list(csv.reader(csvfile))\n return [float(e) for e in lambdas]\n \n \n def join_mapper_intermediate_stats(self, mapper_one, mapper_two):\n '''\n Aggregates mapper outputs\n '''\n mapper_one[\"mu\"] = [mapper_one[\"mu\"][i] + mapper_two[i] for i in range(self.dim)]\n sum_lists = lambda x,y,n: [x[i] + y[i] for i in range(n)]\n xtx_1, xtx_2 = mapper_one[\"x_t_x\"], mapper_two[\"x_t_x\"] \n mapper_one[\"x_t_x\"] = [sum_lists(xtx_1[i],xtx_2[i],self.dim) for i in range(self.dim)]\n mapper_one[\"y_av\"] += mapper_two[\"y_av\"]\n mapper_one[\"n\"] += mapper_two[\"n\"]\n if self.options.scaling == \"max-min\":\n mapper_one[\"max\"] = [max(mapper_one[\"max\"][i],mapper_two[\"max\"][i]) for i in range(self.dim)]\n mapper_one[\"min\"] = [min(mapper_one[\"min\"][i],mapper_two[\"min\"][i]) for i in range(self.dim)]\n return mapper_one\n \n \n def estimate_params(self,data,lambda_ridge,scaling = None):\n xtx = np.array(data[\"x_t_x\"])\n xty = np.array(data[\"x_t_y\"]) \n mu = np.array(data[\"mu\"])\n y_av = data[\"y_av\"]\n n = data[\"n\"]\n beta_bias = y_av # (bias terms)\n if scaling is None:\n part_one = xtx - n*np.outer(mu,mu)+lambda_ridge*np.eye(self.dim)\n part_two = xty - n*y_av*mu\n elif scaling == \"z_score\":\n sigma = 1.0/np.sqrt(np.diag((1.0/n*(xtx-np.outer(mu,mu))))) # vector of standard deviations\n scaler = np.outer(sigma,sigma)\n part_one = np.dot(scaler,xtx-n*np.outer(mu,mu)) + lambda_ridge*np.eye(self.dim)\n part_two = sigma*xty - sigma*mu*y_av*n\n elif scaling == \"max-min\":\n scale_vec = 1.0/( np.array(data[\"max\"]) - np.array(data[\"min\"]) )\n scaler = np.outer(scale_vec,scale_vec)\n part_one = np.dot(scaler,xtx-n*np.outer(mu,mu)) + lambda_ridge*np.eye(self.dim)\n part_two = scale_vec*xty - scale_vec*mu*y_av*n\n theta = cholesky_solution_least_squares(part_one, part_two)\n return {\"bias_term\": beta_bias,\"theta\":list(theta)}\n \n \n \n #----------------------------------------------- Map - Reduce Job -------------------------------------------#\n \n def mapper_ridge(self,_,line):\n y, features = self.extract_features(line)\n x = np.array(features)\n # update instance variables\n if self.options.scaling==\"max-min\":\n self.max = [max(current_max,features[i]) for i,current_max in enumerate(features)]\n self.min = [max(current_max,features[i]) for i,current_max in enumerate(features)]\n self.mu = [ av+features[i] for i,av in enumerate(self.mu) ]\n self.x_t_y = [ xty_i + y*features[i] for xty_i,i in enumerate(features)]\n self.x_t_x = np.outer(x,x)\n self.y_av +=y\n self.n +=1\n # make sample for hold out cross validation set\n rand_priority = random.randrange(start = 0, stop = 100000000)\n observation = (rand_priority,features,y)\n self.sampler.process_observation(observation)\n \n \n \n def mapper_ridge_final(self):\n x_t_x = [list(row) for row in self.x_t_x] # transform numpy array to json-encodable data structure\n intermediate_stats = {\"mu\": self.mu,\n \"x_\"\n \"x_t_x\": x_t_x,\n \"y_av\": self.y_av,\n \"n\": self.n\n }\n if self.options.scaling == \"max-min\":\n intermediate_stats[\"max\"] = self.max\n intermediate_stats[\"min\"] = self.min\n yield None, (\"stats\",intermediate_stats)\n yield None, (\"hold_out_cv\",self.sampler.sample)\n \n \n \n def reducer_ridge(self, key, vals):\n '''\n \n '''\n sampler = Sampler(self.cv_size)\n final_summary_stats = {\"mu\": [0]*self.dim,\n \"x_t_x\": [[0]*self.dim for i in range(self.dim)],\n \"x_t_y\": [0]*self.dim,\n \"y_av\": 0,\n \"n\": 0 }\n for val in vals:\n if val[0]==\"stats\":\n mapper_summary = val[1]\n final_summary_stats = self.join_mapper_intermediate_stats(final_summary_stats,mapper_summary)\n else:\n sampler.process_observations(val[1])\n # for each scaling type use cross validation to verify best lambda\n # then use it on all data (including cv set) to find parameters\n ridge = RidgeRegressionHoldOutCV(self.lambdas, sampler.sample)\n best_lambda = ridge.cv(self.options.scaling)\n yield None, self.estimate_params(final_summary_stats,best_lambda,self.options.scaling)\n\n \n \n def steps(self):\n return [MRStep(mapper = self.mapper_ridge,\n mapper_final = self.mapper_ridge_final,\n reducer = self.reducer_ridge)]\n \nif __name__==\"__main__\":\n RidgeRegression.run()\n \n \n \n \n \n \n" }, { "alpha_fraction": 0.5264230370521545, "alphanum_fraction": 0.5290592908859253, "avg_line_length": 38.67942428588867, "blob_id": "b329823b2b57412259bb7e80ab4763702cbc968f", "content_id": "83b1c0dea94066b4a433810140b5b6ae49365963", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8345, "license_type": "no_license", "max_line_length": 110, "num_lines": 209, "path": "/KNN MapReduce/knn.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nfrom mrjob.job import MRJob\nfrom mrjob.protocol import RawValueProtocol,JSONProtocol\nfrom mrjob.step import MRStep\nimport heapq\nimport csv\n\n\n################# Helper functions & classes ##################################\n\ndef dist(x,y):\n ''' defines euclidean distance between two vector-lists'''\n return sum([(x[i] - e)**2 for i,e in enumerate(y)])\n\n\nclass DimensionalityMismatchError(Exception):\n ''' Error for case when dimensionalities do not match'''\n def __init__(self,expected,real):\n self.expected = expected\n self.real = real\n \n def __str__(self):\n error = \"Expected dimensions: \"+str(self.expected)+ \" observed: \"+str(self.real)\n return error\n \n \n################### MapReduce Job ########################################### \n\n\n\nclass KnnMapReduce(MRJob):\n '''\n K nearest neighbours algorithm for classification and regression.\n Assumes that number of data points to be estimated is small and can be fitted\n into single machine.\n \n \n Input File:\n -----------\n \n Extract relevant features from input line by changing extract_features\n method. Current code assumes following input line format:\n \n <non_informative_index>,<feature 1>,<feature 2>,...,< dependent variable >\n \n \n Options:\n -------\n --dimensionality - number of dimensions in explanatory variables\n --knn-type - type of estimation (should be either 'regression' \n or 'classification')\n --n-neighbours - number of nearest neighbours used for estimation\n --points-to-estimate - file containing points that need to be estimated\n \n \n Output:\n -------\n Output line format:\n \n <feature 1>,<feature 2>,<feature 3>,< estimated dependent variable >\n\n '''\n \n INPUT_PROTOCOL = RawValueProtocol\n \n INTERNAL_PROTOCOL = JSONProtocol\n \n OUTPUT_PROTOCOL = RawValueProtocol\n \n def __init__(self,*args,**kwargs):\n super(KnnMapReduce,self).__init__(*args,**kwargs)\n with open(self.options.points_to_estimate,\"r\") as input_file:\n data = list(csv.reader(input_file))\n self.points = {}\n for dp in data:\n self.points[tuple([float(e) for e in dp])] = []\n \n \n #################### load & configure options #############################\n \n def configure_options(self):\n super(KnnMapReduce,self).configure_options()\n self.add_passthrough_option(\"--dimensionality\",\n type = int,\n help = \"dimenisonality of features\")\n self.add_passthrough_option(\"--knn-type\",\n type = str,\n help = \"either regression or classification\")\n self.add_passthrough_option(\"--n-neighbours\",\n type = int,\n help = \"number of neighbours used in classification or regression\")\n self.add_file_option(\"--points-to-estimate\",\n type = \"str\",\n help = \"File containing all points that should be estimated\")\n \n \n def load_options(self,args):\n super(KnnMapReduce,self).load_options(args)\n # feature dimensionality\n if self.options.dimensionality is None:\n self.option_parser.error(\"You need to specify feature dimensionality\")\n else:\n self.dim = self.options.dimensionality\n # type of knn (either regression or classification)\n if self.options.knn_type != \"regression\" and self.options.knn_type != \"classification\":\n self.option_parser.error(\"Either 'regression' or 'classification' \")\n else:\n self.knn_type = self.options.knn_type\n # dimensionality\n if self.options.n_neighbours is None:\n self.option_parser.error(\"You need to specify number of nearest neighbours\")\n else:\n self.n_neighbours = self.options.n_neighbours\n if self.options.points_to_estimate is None:\n self.option_parser.error(\"You need to specify file containing points which needs to be estimated\")\n \n ################# Helper functions for extracting features ################\n \n def extract_features(self,line):\n ''' Extracts data from line of input '''\n data = line.strip().split(\",\")\n return (data[-1], [ float(e) for e in data[1:-1] ])\n \n \n ################# Map - Reduce Job ######################################## \n \n \n def mapper_knn(self,_,line):\n '''\n Finds nearest neighbours for each point in set of points that \n needs to be estimated.\n '''\n y, features = self.extract_features(line)\n if len(features) != self.dim:\n raise DimensionalityMismatchError(self.dim,len(features))\n # for each point select n neighbours that are closest to it\n for dp in self.points:\n d_inv = -1*dist(features,dp)\n observation = tuple([d_inv,features,y])\n # if number of nearest neighbours is smaller than threshold add them\n if len(self.points[dp]) < self.n_neighbours:\n self.points[dp].append(observation)\n if len(self.points[dp]) == self.n_neighbours:\n heapq.heapify(self.points[dp])\n # compare with largest distance and push if it is smaller\n else:\n largest_neg_dist = self.points[dp][0][0]\n if d_inv > largest_neg_dist:\n heapq.heapreplace(self.points[dp],observation)\n\n def mapper_knn_final(self):\n '''\n Each mapper outputs dictionary with key being data point that\n needs to be estimated and value being priority queue of length \n 'self.n_neighbours' of observation from training set\n '''\n yield 1, self.points.items()\n \n \n def reducer_knn(self,key,points):\n '''\n Aggregates mapper output and finds set of training points which are \n closest to point that needs to be estoimated. Then depending on \n estimation type ('classification' or 'regression') outputs estimate\n '''\n for mapper_neighbors in points:\n merged = None\n mapper_knn = {}\n for k,v in mapper_neighbors:\n mapper_knn[tuple(k)] = v\n # process mapper outputs and find closest neighbours\n if merged is None:\n merged = mapper_knn\n else:\n for point in merged.keys():\n pq = mapper_knn[point]\n while pq:\n if len(merged[point]) < self.n_neighbours:\n heapq.heappush(merged[point],heapq.heappop(pq))\n else:\n largest_neg_dist = merged[point][0][0]\n if pq[0][0] > largest_neg_dist:\n heapq.heapreplace(merged[point], heapq.heappop(pq))\n for point in merged.keys():\n # regression\n if self.options.knn_type == \"regression\":\n estimates = [ float(observation[-1]) for observation in merged[point]]\n estimate = sum(estimates)/self.options.n_neighbours\n # classification\n else:\n estimates = {}\n for neg_dist,features,y in merged[point]:\n estimates[y] = estimates.get(y,0) + 1\n estimate,counts = max(estimates.items(),key = lambda x: x[-1])\n # format output\n output = list(point)\n output.append(estimate)\n yield None, \",\".join([str(e) for e in output])\n \n \n def steps(self):\n return [MRStep(mapper = self.mapper_knn,\n mapper_final = self.mapper_knn_final,\n reducer = self.reducer_knn)]\n \nif __name__==\"__main__\":\n KnnMapReduce.run()\n \n \n \n \n " }, { "alpha_fraction": 0.574202299118042, "alphanum_fraction": 0.5800148248672485, "avg_line_length": 37.82692337036133, "blob_id": "417a3c8e4159184cffe5eec5f0d50e7b6e713dbe", "content_id": "e37da06bcfaf057900c34aae9c48b12744c0d49b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8086, "license_type": "no_license", "max_line_length": 115, "num_lines": 208, "path": "/Gaussian Mixture Model MapReduce/IterationGaussianMixtureMR.py", "repo_name": "jbdatascience/MapReduce-Machine-Learning", "src_encoding": "UTF-8", "text": "\nfrom mrjob.job import MRJob\nfrom mrjob.protocol import JSONProtocol, RawValueProtocol, JSONValueProtocol\nfrom mrjob.step import MRStep\nimport json\nimport numpy as np\n\n\ndef multivar_gauss_pdf(x, mu, cov):\n '''\n Caculates the multivariate normal density (pdf)\n \n Parameters:\n -----------\n \n x - numpy array of a \"d x 1\" sample vector\n mu - numpy array of a \"d x 1\" mean vector\n cov - numpy array of a d x d\" covariance matrix\n \n (where d - dimensionality of data)\n\n Output:\n -------\n - (float) probability of x given parameters of \n Gaussian Distribution\n '''\n part1 = 1 / ( ((2* np.pi)**(len(mu)/2)) * (np.linalg.det(cov)**(1/2)) )\n part2 = (-1/2) * np.dot(np.dot((x-mu).T,(np.linalg.inv(cov))),(x-mu))\n return float(part1 * np.exp(part2))\n \n\ndef responsibility(x,mu,cov,p,K):\n ''' \n Calculates conditional probability of latent variable given\n observed data and parameters\n \n Parameters:\n -----------\n \n x - numpy array of a \"d x 1\" sample vector\n mu - list of length \"K\" of lists \"d x 1\" mean vector \n cov - list of length \"K\" numpy arrays each \"d x d\" covariance matrix\n p - list of floats, each float prior probability of cluster\n K - number of clusters (values of latent variables)\n \n (where d - dimensionality of data)\n \n Output:\n - list of floats, each element of list is responsibility corresponding \n to x and relevant latent variable valiue\n '''\n resps = [p[k]*multivar_gauss_pdf(x,np.array(mu[k]),np.array(cov[k])) for k in range(K)]\n p_x = sum(resps)\n return [float(r_k)/p_x for r_k in resps]\n \n\ndef extract_features(line):\n ''' extracts features from line of input'''\n data = line.strip().split(\",\")\n return [ float(e) for e in data[1:] ]\n \n \ndef make_json_encodable(mixing, means, covar):\n '''\n Transforms \n \n Parameters:\n -----------\n \n mixing - list of size k\n means - list of size k of numpy arrays (each numpy array has size d)\n covar - list of size k of two dimensional numpy array (matrix of size dxd)\n \n (where d is dimensionality and k is number of clusters)\n\n Output:\n --------\n - dictionary with parameter names as keys \n {\"mu\": list of mean vectors, \"mixing\": list of mixing coefficients,\n \"covariance\": list of covariance matrices}\n \n '''\n matrix_to_list = lambda x: [list(e) for e in x]\n mixing = mixing\n means = matrix_to_list(means)\n covariance = [matrix_to_list(e) for e in covar]\n return {\"mixing\":mixing,\"mu\":means,\"covariance\":covariance}\n\n\n \nclass IterationGaussianMixtureMR(MRJob):\n '''\n Runs single iteration of Expectation Maximization Algorithm for Gaussian\n Mixture Model.\n \n Mappers use parameters from previous iteration to calculate responsibilities\n and intermediate values that are then used by single reducer to calculate\n new parameters.\n \n Command Line Options:\n ---------------------\n \n --clusters - number of clusters\n --dimensions - dimensionality of data\n --parameters - (str)json encoded dictionary of parameters\n \n '''\n INPUT_PROTOCOL = RawValueProtocol\n \n INTERNAL_PROTOCOL = JSONProtocol\n \n OUTPUT_PROTOCOL = JSONValueProtocol\n \n\n def __init__(self,*args,**kwargs):\n super(IterationGaussianMixtureMR,self).__init__(*args,**kwargs)\n # sum of responsibilities for each cluster & number of observations\n self.resp_sum = [0]*self.clusters\n self.N = 0\n # sum of observations weighted by reponsibility \n self.resp_w_sum = [np.zeros(self.dim, dtype = np.float64) for i in range(self.clusters)]\n # sum of x_n*x_n_t (outer products) weighted by reponsibility\n self.resp_w_cov = [np.zeros([self.dim,self.dim], dtype = np.float64) for i in range(self.clusters)] \n \n \n def configure_options(self):\n super(IterationGaussianMixtureMR,self).configure_options()\n self.add_passthrough_option(\"--dimensions\",\n type = int,\n help = \"dimensionality of input data\")\n self.add_passthrough_option(\"--clusters\",\n type = int,\n help = \"number of clusters\")\n self.add_passthrough_option(\"--parameters\",\n type = str,\n help = \"file with parameters from previous iteration\")\n \n \n def load_options(self,args):\n super(IterationGaussianMixtureMR,self).load_options(args)\n # number of clusters\n if self.options.clusters is None:\n self.option_parser.error(\"You need to specify number of clusters\")\n else:\n self.clusters = self.options.clusters\n # data dimensionality\n if self.options.dimensions is None:\n self.option_parser.error(\"You need to specify dimensionality of data\")\n else:\n self.dim = self.options.dimensions\n # filename where parameters from previous iteration are saved\n if self.options.parameters is None:\n self.option_parser.error(\"You need to load file with distribution parameters\")\n \n def mapper_gmm_init(self):\n params = json.loads(self.options.parameters)\n self.mu = params[\"mu\"]\n self.covar = params[\"covariance\"]\n self.mixing = params[\"mixing\"]\n \n def mapper_gmm(self,_,line):\n features = extract_features(line)\n assert(len(features)==self.dim), \"dimension mismatch\"\n x = np.array(features)\n r_n = responsibility(x,self.mu,self.covar,self.mixing,self.clusters) # responsibilities\n self.resp_sum = [self.resp_sum[i]+r_n_k for i,r_n_k in enumerate(r_n)]\n self.resp_w_sum = [w_sum + r_n[i]*x for i,w_sum in enumerate(self.resp_w_sum)]\n self.resp_w_cov = [w_covar+r_n[i]*np.outer(x,x) for i,w_covar in enumerate(self.resp_w_cov)]\n self.N+=1\n \n def mapper_final_gmm(self):\n matrix_to_list = lambda x: [list(e) for e in x]\n # sum of responsibilities\n yield 1,(\"r_sum\", self.resp_sum) \n # sum of observations weighted by responsibility\n yield 1,(\"r_w_sum\", [list(e) for e in self.resp_w_sum])\n # covariates weighted by responsibility\n yield 1,(\"r_w_cov\", [ matrix_to_list(cov) for cov in self.resp_w_cov])\n # number of observations\n yield 1,(\"total\", self.N) \n \n \n def reducer_gmm(self,key, values):\n N = 0;\n r_sum = [0]*self.clusters\n r_w_sum = [np.zeros(self.dim, dtype = np.float64) for i in range(self.clusters)]\n r_w_cov = [np.zeros([self.dim,self.dim], dtype = np.float64) for i in range(self.clusters)]\n for value in values:\n if value[0]==\"r_sum\":\n r_sum = [r_sum[i]+gamma for i,gamma in enumerate(value[1])]\n elif value[0]==\"r_w_sum\":\n r_w_sum = [r_w_sum[i]+np.array(r_w_new, dtype = np.float64) for i,r_w_new in enumerate(value[1])]\n elif value[0]==\"r_w_cov\":\n r_w_cov = [ r_w_cov[i] + np.array(cov) for i,cov in enumerate(value[1])]\n elif value[0]==\"total\":\n N+=value[1]\n mixing = [float(gamma)/N for gamma in r_sum]\n means = [1.0/r_sum[i]*r_w_sum[i] for i, gamma in enumerate(mixing)]\n covar = [ 1.0/r_sum[k]*r_w_cov_k - np.outer(means[k],means[k]) for k,r_w_cov_k in enumerate(r_w_cov)] \n yield None, make_json_encodable(mixing,means,covar)\n\n def steps(self):\n return [MRStep(mapper_init = self.mapper_gmm_init,\n mapper = self.mapper_gmm, \n mapper_final = self.mapper_final_gmm,\n reducer = self.reducer_gmm)]\n \nif __name__==\"__main__\":\n IterationGaussianMixtureMR.run()\n \n " } ]
10
Nico76/python
https://github.com/Nico76/python
f05265cce46b072498981694a02f381fde3b89ca
1572c435f2d4b38aeff742389a051ef963e31ac1
2d2846124f6471affa80b0e613916ecac14fa444
refs/heads/master
2015-08-11T06:56:45.530658
2014-05-09T02:03:24
2014-05-09T02:03:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6746888160705566, "alphanum_fraction": 0.6780083179473877, "avg_line_length": 27.619047164916992, "blob_id": "b8312657666abca1fe7b0a2a5d6335b74003b30d", "content_id": "1ac88f4a43e02b1fe8aa01a705c36283dcb8d77a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 75, "num_lines": 42, "path": "/jeffPrograms/AverageExceptions.py", "repo_name": "Nico76/python", "src_encoding": "UTF-8", "text": "def main():\n\t# Ask user for filename\n\tfilename = input('Please enter a filename to access: ')\n\n\t# Call process_file(), passing the\n\t#\t\tuser-designated filename\n\tprocess_file(filename)\n\n# The process_file() function takes one parameter (file).\n#\t\tIt contains a loop that tries to loop through each\n#\t\tline of the file and convert the string to an integer,\n#\t\tthen add that integer to the accumulator, 'total,'\n#\t\tthen increases a counter by 1.\n#\n#\tIf it encounters an IOError or ValueError, these are\n#\t\thandled.\ndef process_file(file):\n\tcounter = 0 \t# Initiate counter\n\ttotal = 0\t\t\t# Initiate accumulator\n\n\t\n\ttry:\n\t\tf = open(file, 'r') # Open file\n\t\tfor line in f:\n\t\t\t# Convert each line to an integer and add it to the accumulator\n\t\t\ttotal += int(line) # Increase the accumulator\n\t\t\tcounter += 1 # Increment counter\n\t\t\tprint('.')\n\t\tf.close() # Close file\n\t\t\n\t\tprint('Found ' + str(counter) + ' numbers.')\n\t\tprint('The total of these numbers is: ' + str(total))\n\n\texcept IOError as e: # If the file cannot be found...\n\t\t\tprint('Error:', e)\n\texcept ValueError as e: # If the line cannot be converted to an integer...\n\t\t\tprint('Error:', e)\n\t\t\tprint('Invalid entry: ' + line)\n\n\n# Start the program\nmain()\n\t\t\t" }, { "alpha_fraction": 0.6881150603294373, "alphanum_fraction": 0.6911430954933167, "avg_line_length": 30.4761905670166, "blob_id": "03afbbff07b931aba6528c14d6aef7567577d58c", "content_id": "9a7c6b12623e64dbf652be4b4ba4423340e615a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1321, "license_type": "no_license", "max_line_length": 82, "num_lines": 42, "path": "/jeffPrograms/ChargeAccount.py", "repo_name": "Nico76/python", "src_encoding": "UTF-8", "text": "def main():\n\taccounts = []\t\t\t\t\t# Initialize 'accounts' list\n\tpath = 'charge_accounts.txt'\t# Store the file path in a variable for easy changes\n\tf = open(path, 'r')\t\t\t\t# Open the file\n\tsearchLength = 7\t\t\t\t# Define searchLength variable for input validation\n\tnumber = ''\t\t\t\t\t\t# Initialize number variable\n\tsearch = False\n\n\t# Iterate over each line in the text file, appending\n\t#\teach line to the 'accounts' list.\n\tfor line in f:\n\t\taccounts.append(line.strip())\n\tf.close()\t\t\t\t\t\t# Close the file.\n\n\t# This while loop asks the user for the account number\n\t#\tto number. Unless its length is 7, it will\n\t#\tprompt the user to try again.\n\twhile len(number) != searchLength:\n\t\tnumber = input('Please enter your 7-digit account number: ')\n\t\tif len(number) != searchLength:\n\t\t\tprint()\n\t\t\tprint('The account number your entered was not 7 digits.')\n\t\t\tprint('Please try again.')\n\t\t\tprint()\n\n\t# Set 'search' equal to the result of calling\n\t#\tsearchAccounts(), passing the accounts list\n\t#\tand user input as arguments\n\tsearch = searchAccounts(accounts, number)\n\n\tif search == True:\n\t\tprint('You account number was found! >>> ' + number) \t# Search success statement\n\telse:\n\t\tprint('You did not enter a valid account number.')\t\t# Search failure statement\n\ndef searchAccounts(acc, num):\n\tfor n in acc:\n\t\tif n == num:\n\t\t\treturn True\n\n\nmain()" }, { "alpha_fraction": 0.6687074899673462, "alphanum_fraction": 0.6768707633018494, "avg_line_length": 34.011905670166016, "blob_id": "b65944a7b20f093cc62dd90cecd5ca1050a82bc5", "content_id": "974e81dad6a1f7ea8e225e44ff3558c14cc55eec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2940, "license_type": "no_license", "max_line_length": 143, "num_lines": 84, "path": "/jeffPrograms/Rainfall.py", "repo_name": "Nico76/python", "src_encoding": "UTF-8", "text": "def main():\n\t# Accumulate user input into list\n\trainfall_list = get_rainfall()\n\t# Store the result of calc_rainfall_total() in rainfall_list_total\n\trainfall_list_total = calc_rainfall_total(rainfall_list)\n\t# Store the result of calc_rainfall_avg() in rainfall_list_avg\n\trainfall_list_avg = calc_rainfall_avg(rainfall_list, int(rainfall_list_total))\n\n\t# Print the results\n\tprint()\n\tprint('### RESULTS ###########################################')\n\tprint('Rainfall total for the year:\\t\\t ' + str(rainfall_list_total) + ' inches')\n\tprint('Average monthly rainfall:\\t\\t ' + '%.2f' % (rainfall_list_avg) + ' inches') # Use string formatting to limit result to 2 decimal places\n\t# This part has a lot of function calls in one line. It's slightly\n\t#\t\tconfusing at first glance, and I'm sure there's a better\n\t#\t\tway to do this... but this was my initial attempt at solving \n\t#\t\tthe problem, and it seemd to work fine.\n\t#\tTo determine the named month, determine_month() is called. The argument\n\t#\t\tpassed to the function is the index of the result of min(rainfall_list)\n\t#\t\tand max(rainfall_list). The index corresponds to a key in the dictionary\n\t#\t\tin determine_month(). The proper key is found and the value is returned,\n\t#\t\ta named month. This process is identical for both the min and max\n\t#\t\tsave for the difference between using 'min' on one and 'max' on the other.\n\tprint('Month with the least rainfall:\\t\\t ' + str(determine_month(rainfall_list.index(min(rainfall_list)))))\n\tprint('Month with the highest rainfall:\\t ' + str(determine_month(rainfall_list.index(max(rainfall_list)))))\n\tprint('#######################################################')\n\tprint()\n\n# Get_rainfall iterates 12 times\n# accepting an integer from the\n# user and appending that number\n# to the rainfall list.\ndef get_rainfall():\n\ti = 0\n\tlist = []\n\twhile i < 12:\n\t\ttry:\n\t\t\t# Get user input and append it to a list\n\t\t\tlist.append(int(input('Enter the amount of rainfall for month ' + str(i+1) + ': ')))\n\t\t\ti += 1\n\t\texcept ValueError: # Handle non-integer input\n\t\t\tprint('You did not enter a valid number. Please try again.')\n\treturn list\n\n# calc_rainfall_total() iterates over\n#\t\teach of the elements in the list\n#\t\tpassed and totals them in the\n#\t\t'total' accumulator.\ndef calc_rainfall_total(list):\n\ttotal = 0\n\tfor e in list:\n\t\ttotal += e\n\treturn total\n\n# calc_rainfall_avg() calculates\n#\t\tthe average of the list by\n#\t\tdividing the total by the\n#\t\tnumber of elements in the list\ndef calc_rainfall_avg(list, total):\n\treturn total / len(list)\n\n# determine_month() takes an integer\n#\t\tand finds the corresponding value\n#\t\tfor that key in the below dictionary. \n#\t\tThe integer passed is the index of \n#\t\tan element of\tthe rainfall_list.\ndef determine_month(num):\n\treturn {\n\t0: \"January\",\n\t1: \"February\",\n\t2: \"March\",\n\t3: \"April\",\n\t4: \"May\",\n\t5: \"June\",\n\t6: \"July\",\n\t7: \"August\",\n\t8: \"September\",\n\t9: \"October\",\n\t10: \"November\",\n\t11: \"December\",\n\t}.get(num)\n\n\nmain()" }, { "alpha_fraction": 0.6757493019104004, "alphanum_fraction": 0.6790190935134888, "avg_line_length": 27.24615478515625, "blob_id": "238df5cd40527e329d0e86426d31b730478699a9", "content_id": "858fa1025e002de7701ad5afd4c87dd1ea0cdcd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 89, "num_lines": 65, "path": "/jeffPrograms/final.py", "repo_name": "Nico76/python", "src_encoding": "UTF-8", "text": "# This global variable is used to determine\n#\t\thow many items will be entered into the\n#\t\tlist.\nCOUNT = 20\n\ndef main():\n\tl_numbers = [] # Initialize numbers list\n\tc = 0 # Initialize counter\n\n\t# This for loop appends 'COUNT' numbers\n\t#\t\tto l_numbers by calling get_num()\n\t#\t\tto grab obtain user input\n\tfor i in range(0, COUNT):\n\t\tc +=1 \n\t\tl_numbers.append(get_num(c))\n\n\n\t# Print the results\n\tprint()\n\tprint('#### RESULTS ####')\n\tprint('The lowest number in the list is:\\t', min(l_numbers))\n\tprint('The highest number in the list is:\\t', max(l_numbers))\n\tprint('The sum of the list is:\\t\\t\\t', list_sum(l_numbers))\n\tprint('The average of the list is:\\t\\t', list_avg(l_numbers))\n\tprint()\n\n# get_num takes one argument, 'counter' which\n#\t\tis simply used to show the user how many\n#\t\tnumbers they've entered.\n#\n# get_num tries to obtain 'float' input from\n#\t\tthe user. It handles 'ValueError' exceptions\n#\t\tand loops back to the input prompt if an\n#\t\texception is thrown. Otherwise, it returns\n#\t\tthe input back to the 'for' loop in main()\ndef get_num(counter):\n\twhile True:\n\t\ttry:\n\t\t\tprint(counter, \". \", sep='', end='') # Print the counter for the user\n\t\t\tnum = float(input('Please enter a number to be added to the list: ')) # Get user input\n\t\texcept ValueError: # This handles incorrect input\n\t\t\tprint('That is not a valid number. Try again...') # Error msg\n\t\telse:\n\t\t\treturn num # Return user input to the for loop in main()\n\n#\tlist_sum() takes one argument, 'list.'\n#\t\tEach item is iterated over and\n#\t\taccumulated in the 'total' variable\ndef list_sum(list):\n\ttotal = 0\n\tfor n in list:\n\t\ttotal += n\n\treturn total\n\n#\tlist_avg() takes one argument, 'list.'\n#\t\tFirst, list_sum() is called to\n#\t\tfigure the total. Then that total\n#\t\tis averaged and returned.\ndef list_avg(list):\n\ttotal = list_sum(list)\n\treturn total / len(list)\n\n\n\nmain()" }, { "alpha_fraction": 0.649899423122406, "alphanum_fraction": 0.6700201034545898, "avg_line_length": 21.636363983154297, "blob_id": "b8bc0f3655735ef8d4a5f043d17da99bf9f850f4", "content_id": "a755c75a29cbf41b28655d7e48ee33582e7a10e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 41, "num_lines": 22, "path": "/jeffPrograms/LotteryNumberList.py", "repo_name": "Nico76/python", "src_encoding": "UTF-8", "text": "def main():\n\timport random \t# Import 'random' library\n\tlist = []\t\t# Create empty list\n\ti = 0\t\t\t# Initialize iterator for loop\n\n\t# This while loop will loop 6 times\n\t#\tand append a random integer in\n\t#\tthe range 0..99 inclusive to\n\t#\tthe end of the 'list' list on\n\t#\teach iteration.\n\twhile i < 6:\n\t\tlist.append(random.randint(0,99))\n\t\ti += 1\n\n\t# This for loop will iterate over\n\t#\teach element in the 'list'\n\t#\tlist, printing out the element\n\t#\ton each iteration.\n\tfor n in list:\n\t\tprint(n)\n\nmain()" }, { "alpha_fraction": 0.6959947347640991, "alphanum_fraction": 0.7032173275947571, "avg_line_length": 26.709091186523438, "blob_id": "5d72a1ce80097307c09d069d49fccb2f34883136", "content_id": "ebdf038595d53037e7f4d73d26a78bb641cc8451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1523, "license_type": "no_license", "max_line_length": 77, "num_lines": 55, "path": "/jeffPrograms/bjRandomNumberFile.py", "repo_name": "Nico76/python", "src_encoding": "UTF-8", "text": "# Import random module\nimport random\n\n# Declare global variable for the filename\nglobal FILENAME\nFILENAME = './randomNumbers.txt'\n\ndef main():\n\t# Ask user for number of random numbers to generate\n\tnumbers = int(input('How many random numbers would you like to generate? '))\n\t\n\t# Call 'create_file()' passing 'numbers' as the argument\n\t#\t\tin order to generate the proper number of random\n\t#\t\tnumbers\n\tcreate_file(numbers)\n\n\t# Call 'read_file()' to read each random number in the\n\t#\t\tthe file and sum the numbers, then print the sum\n\tread_file(FILENAME)\n\n# 'Create_file()' takes one parameter, the number the\n#\t\tuser entered, then generates that many random\n#\t\tnumbers, each on a separate line in the\n#\t\t'randomNumbers.txt' file.\ndef create_file(num):\n\toutFile = open(FILENAME, 'w') # Open FILENAME in write mode\n\tfor x in range(0,num):\n\t\toutFile.write(str(random.randint(1,1000))+'\\n')\n\t\n\toutFile.close() # Close the file\n\n# 'Read_file()' takes one parameter (the filename).\n#\t\tIt opens the 'randomNumbers.txt' file in\n#\t\tread mode, and adds each line (random number)\n#\t\tto the 'line_sum' accumulator. It then prints\n#\t\tthe result.\ndef read_file(file):\n\tcounter = 0\n\tline_sum = 0 # Initialize accumulator\n\tinFile = open(FILENAME, 'r') # Open FILENAME in read mode\n\tfor line in inFile:\n\t\tline_sum += int(line)\n\t\tcounter += 1\n\n\tinFile.close() # Close the file\n\n\tfor x in range(0,5):\n\t\tprint('.')\n\n\tprint('Sum of random numbers:\\t' + str(line_sum))\n\tprint('Random numbers found:\\t' + str(counter))\n\t\n\n# Start the program\nmain()" } ]
6
poojalnarayan/isi-project
https://github.com/poojalnarayan/isi-project
dcbfef03f50bf030b61a285baa4440b68d57d72a
b0f5f02451522a338bcd4001236936e2311966a1
ad75b6011c001ac63963b8c9e302afd60902a1d5
refs/heads/master
2020-06-04T23:44:06.667948
2019-06-19T13:58:17
2019-06-19T13:58:17
192,237,429
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7682458162307739, "alphanum_fraction": 0.77432781457901, "avg_line_length": 83.43243408203125, "blob_id": "a499b0dc793e99eaa3a89021c78e8b45aebf5376", "content_id": "63bb934de1504e33e4b6fffdfe438866a032c11c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3124, "license_type": "no_license", "max_line_length": 419, "num_lines": 37, "path": "/README.md", "repo_name": "poojalnarayan/isi-project", "src_encoding": "UTF-8", "text": "# isi-project\n\n----\ninstructions to run: \n\n1. python preprocess_json.py \n2. python process_queries.py <query-file> \n\nstep 1, reads the compressed json object in memory and creates a indicator dictionary (I_dict) containing indicator values as keys and list of (P-values, Q-values) as the values. Since this step is independent of the queries file, this is to be run once and the resulting I_dict is stored on disk. For efficiency, we can compress the I_dict pkl file, but currently it is uncompressed. \n\nstep 2: I read the I_dict (created from step 1 for every query file) and check for coverage by constructing a histogram of P-values. I sort the histogram and take the top-3 as these indicate max coverage. I compute the coverage as freq of P-values in histogram / total # of queries. I read the compressed json file to and the top 3 P-dicts to construct the necessary output as specified in the readme of the challenge. \n\nFor both steps 1 and 2, I report the time taken for the in-memory solution. \n\nSince step 1 is to be only run once, we can get some gains when we have to run with several query files (by taking a space-tradeoff hit, since we store the I_dict on disk)\n\n----\n\noutput of the process_queries.py is present in sample1_output.txt (for input sample1.txt) and sample2_output.txt (for input sample2.txt) \n\n----\n\nHere is the progress on the redis front. I have to tell upfront that this is a new thing to me, but looks like a very exciting technology to store and retrieve large blobs of data quickly. \n\nI went through the tutorial and soon found that redis does not natively support json objects. So the option was to either 1) use the json objects as strings and use the redis to efficiently store and retrieve these objects 2) decompose the nested json objects into something that can be used within redis. \n\nI then found this nice blogpost and video explaining a new module in redis called rejson. https://redislabs.com/blog/redis-as-a-json-store/ The video confirmed my hypothesis for the choices I have (if I do not have rejson). Then I decided to use rejson for it seems to be more flexible and provides performance improvements over both the above options as stated in the video. \n\nThen I figured out how to enable redis-json module in my redis server. Basically had to clone the redis-json repository (https://github.com/RedisJSON/RedisJSON), compile this and include the compiled .so file in the redis.conf and restart my local redis-server. \n\nI could even interface this with python and use the JSON.SET command from the python script. (https://pypi.org/project/rejson/). I am now at a point where I am a little stuck with 2 issues: \n\n1. jsonset is working but jsonget is returning empty when I try to get the object that I wrote to redis db before. However, I see this obj when I use the redis-cli and the JSON.get command there \n\n2. When I try the jsonset on the large json object from the file given in the programming challenge, I get a 'connection reset by peer' error message. Perhaps there is a setting that allows one to add large blobs of data to the redis db or something like that. \n\n---\n" }, { "alpha_fraction": 0.6388400793075562, "alphanum_fraction": 0.6476274132728577, "avg_line_length": 28.179487228393555, "blob_id": "6c4ff66a16235a8f86e8669613e4d406d59462dd", "content_id": "34053d8ddfd2fb9e152939854d7bb5ac2e28a24f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1138, "license_type": "no_license", "max_line_length": 99, "num_lines": 39, "path": "/process_queries_redis.py", "repo_name": "poojalnarayan/isi-project", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport redis\nimport json\nimport gzip\nimport time\n\nredis_host = \"localhost\"\nredis_port = 6379\nredis_password = \"\"\n\nstart = time.time()\nprint('reading the gzipped json obj file')\nwith gzip.open('prop_idents_v6_nice.json.gz') as f:\n json_obj = json.load(f) \nprint('done')\nend_json = time.time()\n\nprint(\"time taken ( read json): %.2f s\" % (end_json - start)) \n\nprint('writing json objects to a redis database ..')\nr = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_password, decode_responses=True)\n\nstart_redis = time.time()\nfor idx, k in enumerate(json_obj.keys()):\n if idx % 5 == 0:\n end_redis = time.time()\n print('completed ' + str(idx) + ' obj. :' + str(end_redis - start_redis))\n start_redis = time.time()\n r.execute_command('JSON.SET', k, '.', json.dumps(json_obj[k]))\n\n#r.execute_command('JSON.SET', 'object', '.', json.dumps(json_obj))\nprint('done.')\nend = time.time()\n\nprint(\"time taken ( write redis): %.2f s\" % (end - end_json)) \ntotal_time = end-start\nprint(\"time taken ( total): %.2f s\" % total_time) \n#reply = json.loads(r.execute_command('JSON.GET', 'object'))\n" }, { "alpha_fraction": 0.5925508141517639, "alphanum_fraction": 0.5948081016540527, "avg_line_length": 22.289474487304688, "blob_id": "0905a963faa942cc98cb5acb62959967af888a8f", "content_id": "1e8ddf6cf3b70544a7670a64aac037bd0c46415d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 54, "num_lines": 38, "path": "/preprocess_json.py", "repo_name": "poojalnarayan/isi-project", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport pickle\nimport json\nimport time\nimport gzip\n\ndef save_dict(obj, name):\n with open('obj_'+name+'.pkl','wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) \n f.close()\n\nstart = time.time()\nprint('reading the gzipped json obj file')\nwith gzip.open('prop_idents_v6_nice.json.gz') as f:\n json_obj = json.load(f) \nprint('done')\n\nprint('creating identifier dictionary .. ')\nI_dict = dict()\nfor key in json_obj.keys(): \n for k,v in json_obj[key].items():\n if k in I_dict:\n curr = I_dict[k]\n curr.append((v, key))\n else:\n curr = list()\n curr.append((v, key))\n I_dict[k] = curr\nprint('done..')\n\nprint('saving the dictionary on disk ..')\nsave_dict(I_dict, 'I_dict')\nprint('done')\n\nend = time.time()\ntotal_time = end-start\nprint(\"time taken ( preprocess): %.2f s\" % total_time) \n" }, { "alpha_fraction": 0.49289563298225403, "alphanum_fraction": 0.5036746859550476, "avg_line_length": 29.014705657958984, "blob_id": "f897627a20ee49b667bd8e92ce8711ac9260f95d", "content_id": "ab606df9324a45f1d978b4520b819e1c9f36a9ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2041, "license_type": "no_license", "max_line_length": 80, "num_lines": 68, "path": "/process_queries.py", "repo_name": "poojalnarayan/isi-project", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport pickle \nimport time\nimport gzip\nimport json \n\ndef load_dict(name):\n with open('obj_' + name + '.pkl', 'rb') as f:\n return pickle.load(f)\n f.close()\n\ndef read_file(filename):\n with open(queries_file) as f:\n lines = [line.rstrip('\\n') for line in f]\n f.close()\n return lines\n\nstart = time.time()\nprint('reading the identifier queries file ..')\nqueries_file = sys.argv[1]\nqueries = read_file(queries_file)\nprint('done')\n\nprint('reading the I_dict obj file ..')\nI_dict = load_dict('I_dict')\nprint('done')\n\nP_hist = dict()\nfor i_query in queries:\n if i_query in I_dict:\n for p in [x[1] for x in I_dict[i_query]]:\n if p in P_hist:\n P_hist[p] = P_hist[p] + 1\n else:\n P_hist[p] = 1\n\nsorted_P_hist = sorted ( list(P_hist.items()), key=lambda x: x[1], reverse=True)\ntop_3_P = [x[0] for x in sorted_P_hist[:3]]\ntop_3_P_coverage = [ float(x[1])/len(queries) for x in sorted_P_hist[:3]]\n\nprint('reading the gzipped json obj file')\nwith gzip.open('prop_idents_v6_nice.json.gz') as f:\n json_obj = json.load(f) \nprint('done')\n\ntop_3_P_dicts = [ json_obj[P] for P in top_3_P ]\n\nprint('--------------------------------------------------------')\nprint('top 3 P values and their coverage given the query file')\nprint('---')\nfor i, (p,c) in enumerate(zip(top_3_P, top_3_P_coverage)):\n print('--------------------------------------------------------')\n print('top ' + str(i+1) + \": \" + p + \", coverage: \" + str(c))\n print('--------------------------------------------------------')\n for i_query in queries:\n if len(top_3_P_dicts) > i and i_query in top_3_P_dicts[i]:\n print(i_query + \": \" + top_3_P_dicts[i][i_query])\n else:\n print(i_query + ': NULL')\n\nprint('--------------------------------------------------------')\n\nend = time.time()\ntotal_time = end-start\nprint(\"time taken (query search): %.2f s\" % total_time) \nprint('--------------------------------------------------------')\n" } ]
4
momentum-morehouse/django-uptact-cynwachi
https://github.com/momentum-morehouse/django-uptact-cynwachi
3bba713f3c819b34d698077db0bf0ef201882880
1963e45976e70348d84cdd580faa1bf31708afe6
56ce3dd0e7a904e580364c9a5de23d17ab11a8fe
refs/heads/master
2023-08-21T19:18:18.286693
2020-11-02T15:12:26
2020-11-02T15:12:26
303,807,620
0
0
null
2020-10-13T19:24:47
2020-11-02T15:29:25
2021-09-22T19:42:40
Python
[ { "alpha_fraction": 0.5565611124038696, "alphanum_fraction": 0.6033182740211487, "avg_line_length": 26.625, "blob_id": "b596e854d0bc88633a7fef0a462906f9f0a0b680", "content_id": "ca01ae60cbd13abac3eaa39a375531f756e142af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 137, "num_lines": 24, "path": "/contacts/migrations/0003_auto_20201022_1142.py", "repo_name": "momentum-morehouse/django-uptact-cynwachi", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.1 on 2020-10-22 16:42\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contacts', '0002_auto_20201019_0959'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='note',\n name='contact',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='contacts.contact'),\n ),\n migrations.AddField(\n model_name='note',\n name='note',\n field=models.DateTimeField(blank=True, null=True),\n ),\n ]\n" } ]
1
durga266/ultra_sonic-sensor
https://github.com/durga266/ultra_sonic-sensor
afe5da2ef9ea2dcd39cf6de3eef15c98d3fefc0b
1816c6dea9c2b93c079545fe397ca42c8b9c8c67
4348c7dbf45e8b41d6869c69452967b97d40ed58
refs/heads/master
2020-06-04T14:28:50.260272
2019-06-15T09:46:43
2019-06-15T09:46:43
192,062,266
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6128093004226685, "alphanum_fraction": 0.6550218462944031, "avg_line_length": 23.535715103149414, "blob_id": "a143fe319800e294b7f052941965464509087363", "content_id": "ccc4bc59a5792fb7a42a0bc7fb4f4af2c3e7fd2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/ultrasonic_sensor.py", "repo_name": "durga266/ultra_sonic-sensor", "src_encoding": "UTF-8", "text": "import RPi.GPIO as gpio\nimport time\ngpio.setwarnings(False)\ngpio.setmode(gpio.BCM) #16,28\ntrigpin = 25\nechopin = 1\ngpio.setup(trigpin,gpio.OUT)\ngpio.setup(echopin,gpio.IN)\n\nprint(\"calculating distance\")\n\nwhile(True):\n gpio.output(trigpin,0)\n time.sleep(2)\n gpio.output(trigpin,1)\n time.sleep(0.00001)\n gpio.output(trigpin,0)\n\n while(gpio.input(echopin)==0):\n start_time = time.time()\n while(gpio.input(echopin)==1):\n stop_time = time.time()\n duration = stop_time - start_time\n distance = (34000*duration)/2\n if(distance>0) and (distance<400):\n print(\"obstacle is at a distance : \",str(distance))\n else:\n print(\"out of range\")\n" } ]
1
jcfellers/DSP539_pyExam
https://github.com/jcfellers/DSP539_pyExam
d0317c7e4e0ceddf2356186e1e4f5bc72314e63e
50e185f68b198909b9ba150bcb42db458deefed3
a511162d708c38dfc7b64cc04dbea5541d8de4c0
refs/heads/master
2023-04-17T11:25:42.265953
2021-04-29T16:28:35
2021-04-29T16:28:35
362,874,108
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7821782231330872, "alphanum_fraction": 0.789250373840332, "avg_line_length": 40.588233947753906, "blob_id": "cd07ce951553800e787cf7fffa27ceac79dff42d", "content_id": "bebf1fe3e2cfba3e938781282ab45ad0efe42852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 707, "license_type": "no_license", "max_line_length": 87, "num_lines": 17, "path": "/README.txt", "repo_name": "jcfellers/DSP539_pyExam", "src_encoding": "UTF-8", "text": "Background Information:\n\n1) test_Jfellers_kmers.py is the python test script. It is setup to test the \nJfellers_kmers script by using the first string in strings.txt. Parameters for testing\nare established at the top of the script. \n\n2) Jfellers_kmers.py is the python computational script. It outputs two files for each\nstring in the file entered on the command line. The first is for the dataframe\ncontaining all kmer computations the other is for its linguistic complexity.\nOutputs go to the Results directory. \n\nDirections for use:\n\n1) To run pytest, type to the command line: pytest\n\n2) To run the computations on the file of strings, type to the command line:\npython3 Jfellers_kmers.py strings.txt\n" }, { "alpha_fraction": 0.5999019145965576, "alphanum_fraction": 0.6065211892127991, "avg_line_length": 27.957143783569336, "blob_id": "f09d44bda85e32e22f5d810d4744e00688346503", "content_id": "12a511bf6bcd512efcf1a543d2d7b8160883e21f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4079, "license_type": "no_license", "max_line_length": 80, "num_lines": 140, "path": "/Jfellers_kmers.py", "repo_name": "jcfellers/DSP539_pyExam", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 21 13:34:17 2021\n\n@author: Justin\n\"\"\"\nfrom collections import Counter\nimport pandas as pd\nimport sys\n\n# function for possible kmers\ndef possible_kmers(string, k):\n '''\n Parameters\n ----------\n string : \n Type: string\n Description: Any string of characters composed of the letters A,C,G,T.\n \n k : \n Type: int\n Description: Defined length of the sub-string.\n\n Returns\n -------\n The number of possible kmer combinations: min( len(string)-k+1, 4**k ).\n '''\n \n # minimum between string length minus k plus 1 and 4^k\n assert type(k) == int\n return(min(len(string)-k+1, 4**k))\n\n# function for observed kmers\ndef observed_kmers(string, k):\n '''\n Parameters\n ----------\n string : \n Type: string\n Description: Any string of characters composed of the letters A,C,G,T.\n \n k : \n Type: int\n Description: Defined length of the sub-string.\n\n Raises\n ------\n IndexError\n Check indexing on loop for building substrings.\n\n Returns\n -------\n The number of observed (e.g. unique) kmer combinations.\n '''\n assert type(k) == int\n # string converted to a list\n strLst = list(string)\n # list to keep track of substrings\n substrings=[]\n # for every element in strLst except those that will raise IndexError,\n for i in range(0, len(strLst)-k+1):\n # create sub of starting, ending, and middle characters & append\n sub = strLst[i : i+k : 1]\n substrings.append(sub)\n # use Counter w/lst comprehension to count unique sub frequencies\n uniqueCounts = Counter([tuple(i) for i in substrings])\n # return the length of uniqeCounts\n return(len(uniqueCounts))\n\n# function for pandas df with all possible k and their observed & possible kmers\ndef k_df(string):\n '''\n Parameters\n ----------\n string : string\n Any string of characters composed of the letters A,C,G,T.\n\n Returns\n -------\n k_df : pandas dataframe\n Dataframe of k, observed kmers, possible kmers\n '''\n # create empty dataframe with applicable columns\n cols = ['k', 'Observed_kmers', 'Possible_kmers']\n k_df = pd.DataFrame(columns = cols)\n # set maximum k\n max_k = len(string)\n # for every value of k up to max_k:\n for i in range(1, max_k+1):\n pkmers = possible_kmers(string, i)\n okmers = observed_kmers(string, i)\n data = {'k': [i], 'Observed_kmers': [okmers],\n 'Possible_kmers': [pkmers]}\n data_df = pd.DataFrame.from_dict(data)\n k_df = k_df.append(data_df, ignore_index = True)\n return (k_df)\n\n# function for linguistic complexity\ndef ling_complex(string):\n '''\n Parameters\n ----------\n string : string\n Any string of characters composed of the letters A,C,G,T.\n\n Returns\n -------\n Computed liguistic complexity for provided string. \n '''\n \n kmers_df = k_df(string)\n complexity = sum(kmers_df['Observed_kmers'])/sum(kmers_df['Possible_kmers'])\n return(complexity)\n\ndef main(string):\n # write the output files for each string in the read-in file\n # pandas dataframe to csv\n k_df(string).to_csv('Results/%s_kmersDataframe.csv' % string, index = False)\n # convert complexity from float to Series and write to csv\n complexity = pd.Series(ling_complex(string), name = 'Linguistic Complexity')\n complexity.to_csv('Results/%s_lingComplexity.csv' % string, index = False)\n\nif __name__ == '__main__':\n # read in strings.csv\n #file = 'strings.txt'\n file = sys.argv[1]\n open_file = open(file, 'r')\n line = open_file.readline()[:-1]\n # while there is a line to be read-in...\n while line:\n # test that a string is being read-in from the file\n assert type(line) == str\n # excecute main script on line\n main(line)\n # move to the next line\n line = open_file.readline()[:-1]\n # close the file\n open_file.close()\n \n print('Script Complete')\n \n \n \n \n \n" }, { "alpha_fraction": 0.670040488243103, "alphanum_fraction": 0.6933198571205139, "avg_line_length": 23.09756088256836, "blob_id": "61c48736feb5d0b8df50151795f841be8f839849", "content_id": "211030813d5d73a5d987cee9db7e7b9a361ea49d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "no_license", "max_line_length": 55, "num_lines": 41, "path": "/test_Jfellers_kmers.py", "repo_name": "jcfellers/DSP539_pyExam", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 29 09:45:29 2021\n\n@author: Justin\n\"\"\"\n\nfrom Jfellers_kmers import *\n\n# Setup: decisions for testing environment\nfile = 'strings.txt'\nopen_file = open(file, 'r')\n \n# Testing Parameters: use first string in file & k = 7 \nline = open_file.readline()[:-1]\nk_test = 7\n\n# Expected Results \nexpected_possible_kmers = 3\nexpected_observed_kmers = 3\nexpected_kmers_df_shape = (len(line),3)\nexpected_ling_complexity = 0.875\n\n# close the file\nopen_file.close()\n\ndef test_possible_kmers():\n actual_result = possible_kmers(line, k_test)\n assert actual_result == expected_possible_kmers\n \ndef test_observed_kmers():\n actual_result = observed_kmers(line, k_test)\n assert actual_result == expected_observed_kmers\n \ndef test_k_df():\n actual_result = k_df(line).shape\n assert actual_result == expected_kmers_df_shape\n\ndef test_ling_complex():\n actual_result = ling_complex(line)\n assert actual_result == expected_ling_complexity\n" } ]
3
osvenskan/sysv_ipc
https://github.com/osvenskan/sysv_ipc
01e00c113b03b64351627efb76f65616f1847e46
d8b463a63c0864ea6985cd82f45850e26a9d6c6a
6240a4cfe2decb8e6a372fcdea7fbdf629c6ed27
refs/heads/develop
2023-08-26T16:32:45.002894
2021-01-17T17:36:50
2021-01-17T17:36:50
118,961,693
11
6
NOASSERTION
2018-01-25T20:04:46
2021-01-17T17:37:35
2021-01-17T18:19:33
C
[ { "alpha_fraction": 0.440828412771225, "alphanum_fraction": 0.47633135318756104, "avg_line_length": 16.842105865478516, "blob_id": "c49fb91cde36a177af488d461eff579466354eeb", "content_id": "12f44dd9b89ad885d31eed5d71e36c4a4cd855a2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 338, "license_type": "permissive", "max_line_length": 77, "num_lines": 19, "path": "/keys.c", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h> \n#include <limits.h>\n\nint main() { \n int i;\n int key;\n \n\n for (i = 0; i < 10000000; i++) {\n // ref: http://www.c-faq.com/lib/randrange.html\n key = ((int)((double)rand() / ((double)RAND_MAX + 1) * INT_MAX)) + 1;\n printf(\"%d\\n\", key);\n }\n \n \n \n return 1;\n}" }, { "alpha_fraction": 0.659375011920929, "alphanum_fraction": 0.6625000238418579, "avg_line_length": 21.64285659790039, "blob_id": "596bcc4e225ca3c2b317de06c9d630919845f2f5", "content_id": "9564d63b9aa8f56e53e070d41bb8cbf86da6913b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 320, "license_type": "permissive", "max_line_length": 46, "num_lines": 14, "path": "/demos/sem_and_shm/utils.h", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "struct param_struct {\n int iterations;\n int live_dangerously;\n int key;\n int permissions;\n int size;\n};\n\n\nvoid md5ify(char *, char *);\nvoid say(const char *, char *);\nint acquire_semaphore(const char *, int, int);\nint release_semaphore(const char *, int, int);\nvoid read_params(struct param_struct *);\n\n\n\n" }, { "alpha_fraction": 0.5093167424201965, "alphanum_fraction": 0.5465838313102722, "avg_line_length": 17.941177368164062, "blob_id": "f885111a4f470bab9d648592d448b7f74aed01cf", "content_id": "bedc888e4704427f81d3df6517271b322fe33eaf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "permissive", "max_line_length": 71, "num_lines": 17, "path": "/extras/memory_limit_test.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "import sysv_ipc\n\ndone = False\n\nsize = 1024\n\nwhile not done:\n s = \"Trying %d (%dk)...\" % (size, size / 1024)\n print(s)\n try:\n mem = sysv_ipc.SharedMemory(None, sysv_ipc.IPC_CREX, size=size)\n except MemoryError:\n done = True\n else:\n mem.detach()\n mem.remove()\n size += 1024\n" }, { "alpha_fraction": 0.667664647102356, "alphanum_fraction": 0.667664647102356, "avg_line_length": 19.875, "blob_id": "732674ac8524da0acc910b5f1a333b1121d8daf1", "content_id": "ff414ac099f7dc4cbea1095b857bcf2694873884", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 334, "license_type": "permissive", "max_line_length": 71, "num_lines": 16, "path": "/demos/message_queues/cleanup.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "import sysv_ipc\nimport utils\n\nparams = utils.read_params()\n\nkey = params[\"KEY\"]\n\ntry:\n mq = sysv_ipc.MessageQueue(key)\nexcept sysv_ipc.ExistentialError:\n print('''Message queue with key \"{}\" doesn't exist.'''.format(key))\nelse:\n mq.remove()\n print('Message queue with key \"{}\" removed'.format(key))\n\nprint(\"\\nAll clean!\")\n" }, { "alpha_fraction": 0.6823821067810059, "alphanum_fraction": 0.692307710647583, "avg_line_length": 17.31818199157715, "blob_id": "1bdd9fe2baccfe79de2572ae7491dcd7eae6f5af", "content_id": "bc72bc1d760a8c0d8d9cbaf97b10b377ea1ca8fc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 403, "license_type": "permissive", "max_line_length": 67, "num_lines": 22, "path": "/prober/probe_page_size.c", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "//#define _XOPEN_SOURCE 500\n#include \"Python.h\"\n\n#include <stdio.h>\n\n// Code for determining page size swiped from Python's mmapmodule.c\n#if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)\nstatic int\nmy_getpagesize(void)\n{\n\treturn sysconf(_SC_PAGESIZE);\n}\n#else\n#include <unistd.h>\n#define my_getpagesize getpagesize\n#endif\n\nint main(void) { \n printf(\"%d\\n\", my_getpagesize());\n \n return 0; \n}\n" }, { "alpha_fraction": 0.6505376100540161, "alphanum_fraction": 0.6720430254936218, "avg_line_length": 29.83333396911621, "blob_id": "450618c9d577e350cfd08b30ea7313f3fa2a7d2f", "content_id": "41e17c4431d02c297c18d4f7a35808cc333b3457", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 186, "license_type": "permissive", "max_line_length": 54, "num_lines": 6, "path": "/demos/sem_and_shm/make_all.sh", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\ngcc -Wall -c -o md5.o md5.c\ngcc -Wall -c -o utils.o utils.c\ngcc -Wall -L. md5.o utils.o -o premise premise.c\ngcc -Wall -L. md5.o utils.o -o conclusion conclusion.c\n\n" }, { "alpha_fraction": 0.6973069906234741, "alphanum_fraction": 0.7059245705604553, "avg_line_length": 47.017242431640625, "blob_id": "cd35d11b81b179dda47de9e771f01ee18dc6ef09", "content_id": "77644c45c1e1b401ef31a99ea89a9517774a675a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2785, "license_type": "permissive", "max_line_length": 96, "num_lines": 58, "path": "/tests/base.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "# Python imports\nimport unittest\nimport random\nimport time\n\n# Project imports\nimport sysv_ipc\n\n\ndef make_key():\n \"\"\"Generate a random key suitable for an IPC object.\"\"\"\n return random.randint(sysv_ipc.KEY_MIN, sysv_ipc.KEY_MAX)\n\n\ndef sleep_past_granularity():\n \"\"\"A utility method that encapsulates a type-specific detail of testing.\n\n I test all of the time-related variables in the IPC structs (o_time, shm_atime, shm_dtime,\n shm_ctime, msg_ctime, msg_stime, and msg_rtime) to ensure they change when they're supposed\n to (e.g. when a segment is detached, for shm_dtime). For variables that are initialized to 0\n (like o_time), it's easy to verify that they're 0 to start with and then non-zero after the\n change.\n\n Other variables (like shm_ctime) are trickier to test because they're already non-zero\n immediately after the object is created. My test has to save the value, do something that\n should change it, and then compare the saved value to the current one via assertNotEqual().\n\n Some (most? all?) systems define those time-related values as integral values (int or long),\n so their granularity is only 1 second. If I don't force at least 1 second to elapse between\n the statement where I save the value and the statement that should change it, they'll almost\n always happen in the same second and the assertNotEqual() even though all code (mine and the\n system) has behaved correctly.\n\n This method sleeps for 1.1 seconds to avoid the problem described above.\n \"\"\"\n time.sleep(1.1)\n\n\nclass Base(unittest.TestCase):\n \"\"\"Base class for test cases.\"\"\"\n def assertWriteToReadOnlyPropertyFails(self, target_object, property_name,\n value):\n \"\"\"test that writing to a readonly property raises an exception\"\"\"\n # The attributes tested with this code are implemented differently in C.\n # For instance, Semaphore.value is a 'getseters' with a NULL setter,\n # whereas Semaphore.name is a reference into the Semaphore member\n # definition.\n # Under Python 2.6, writing to sem.value raises AttributeError whereas\n # writing to sem.name raises TypeError. Under Python 3, both raise\n # AttributeError (but with different error messages!).\n # This illustrates that Python is a little unpredictable in this\n # matter. Rather than testing each of the numerous combinations of\n # of Python versions and attribute implementation, I just accept\n # both TypeError and AttributeError here.\n # ref: http://bugs.python.org/issue1687163\n # ref: http://bugs.python.org/msg127173\n with self.assertRaises((TypeError, AttributeError)):\n setattr(target_object, property_name, value)\n" }, { "alpha_fraction": 0.7335766553878784, "alphanum_fraction": 0.7397810220718384, "avg_line_length": 35.52000045776367, "blob_id": "1199ad262c5bb929e11f95b1262c7d70c8ae3581", "content_id": "dd5a8fe8971b9a6bad960d218da74995f6266a52", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2740, "license_type": "permissive", "max_line_length": 74, "num_lines": 75, "path": "/mq.h", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "#include <limits.h> // for definition of SSIZE_MAX\n\ntypedef struct {\n PyObject_HEAD\n key_t key;\n int id;\n unsigned long max_message_size;\n} MessageQueue;\n\n/* Message queue message struct for send() & receive()\nOn many systems this is defined in sys/msg.h already, but it's better\nfor me to define it here. Name it something other than msgbuf to avoid\nconflict with the struct that the OS header file might define.\n*/\nstruct queue_message {\n long type;\n char message[];\n};\n\n/* Maximum message size is limited by (a) the largest Python string I can\ncreate and (b) SSIZE_MAX. The latter restriction comes from the spec which\nsays, \"If the value of msgsz is greater than {SSIZE_MAX}, the result is\nimplementation-defined.\"\nref: http://www.opengroup.org/onlinepubs/000095399/functions/msgrcv.html\n*/\n#define MIN(a,b) (((a)<(b))?(a):(b))\n#define QUEUE_MESSAGE_SIZE_MAX MIN(SSIZE_MAX, PY_STRING_LENGTH_MAX)\n\n/* The max message size is probably a very big number, and since a\nmax-sized buffer is allocated every time receive() is called, it would be\nugly if the default message size for new queues was the same as the max.\nIn addition, many operating systems limit the entire queue to 2048 bytes,\nso defaulting the max message to something larger seems a bit stupid.\n\nThis value is also present in numeric form in ReadMe.html, so if you\nchange it here, change it there too.\n*/\n#define QUEUE_MESSAGE_SIZE_MAX_DEFAULT 2048\n\n/* Object methods */\nPyObject *MessageQueue_new(PyTypeObject *, PyObject *, PyObject *);\nint MessageQueue_init(MessageQueue *, PyObject *, PyObject *);\nvoid MessageQueue_dealloc(MessageQueue *);\nPyObject *MessageQueue_send(MessageQueue *, PyObject *, PyObject *);\nPyObject *MessageQueue_receive(MessageQueue *, PyObject *, PyObject *);\nPyObject *MessageQueue_remove(MessageQueue *);\n\n/* Object attributes (read-write & read-only) */\nPyObject *mq_get_mode(MessageQueue *);\nint mq_set_mode(MessageQueue *, PyObject *);\n\nPyObject *mq_get_uid(MessageQueue *);\nint mq_set_uid(MessageQueue *, PyObject *);\n\nPyObject *mq_get_gid(MessageQueue *);\nint mq_set_gid(MessageQueue *, PyObject *);\n\nPyObject *mq_get_max_size(MessageQueue *);\nint mq_set_max_size(MessageQueue *, PyObject *);\n\nPyObject *mq_get_key(MessageQueue *);\nPyObject *mq_get_last_send_time(MessageQueue *);\nPyObject *mq_get_last_receive_time(MessageQueue *);\nPyObject *mq_get_last_change_time(MessageQueue *);\nPyObject *mq_get_last_send_pid(MessageQueue *);\nPyObject *mq_get_last_receive_pid(MessageQueue *);\nPyObject *mq_get_current_messages(MessageQueue *);\nPyObject *mq_get_c_uid(MessageQueue *);\nPyObject *mq_get_c_gid(MessageQueue *);\n\nPyObject *mq_str(MessageQueue *);\nPyObject *mq_repr(MessageQueue *);\n\n/* Misc. */\nPyObject *mq_remove(int);\n\n" }, { "alpha_fraction": 0.41491472721099854, "alphanum_fraction": 0.4236414134502411, "avg_line_length": 35.273380279541016, "blob_id": "c24ca30e9e421bff36e25596387b2b93d4aec911", "content_id": "7b10aef17a392f458c43048508884519ead38f4c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5042, "license_type": "permissive", "max_line_length": 136, "num_lines": 139, "path": "/demos/sem_and_shm/conclusion.c", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "#include <sys/ipc.h>\t\t/* for system's IPC_xxx definitions */\n#include <sys/shm.h>\t\t/* for shmget, shmat, shmdt, shmctl */\n#include <sys/sem.h>\t\t/* for semget, semctl, semop */\n\n#include <stdio.h> \n#include <errno.h> \n#include <unistd.h> \n#include <string.h> \n#include <time.h>\n\n#include \"md5.h\"\n#include \"utils.h\"\n\nstatic const char MY_NAME[] = \"Mrs. Conclusion\";\n\n// Set up a Mrs. Premise & Mrs. Conclusion conversation.\n\nint main() { \n int sem_id = 0;\n int shm_id = 0;\n int rc;\n char s[1024];\n int i;\n int done;\n char last_message_i_wrote[256];\n char md5ified_message[256];\n void *address = NULL;\n struct param_struct params;\n \n say(MY_NAME, \"Oooo 'ello, I'm Mrs. Conclusion!\");\n \n read_params(&params);\n\n // Mrs. Premise has already created the semaphore and shared memory. \n // I just need to get handles to them.\n sem_id = semget(params.key, 0, params.permissions);\n \n if (-1 == sem_id) {\n sem_id = 0;\n sprintf(s, \"Getting a handle to the semaphore failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n else {\n // get a handle to the shared memory\n shm_id = shmget(params.key, params.size, params.permissions);\n \n if (shm_id == -1) {\n shm_id = 0;\n sprintf(s, \"Couldn't get a handle to the shared memory; errno is %d\", errno);\n say(MY_NAME, s);\n }\n else {\n sprintf(s, \"Shared memory's id is %d\", shm_id);\n say(MY_NAME, s);\n\n // Attach the memory.\n address = shmat(shm_id, NULL, 0);\n\n if ((void *)-1 == address) {\n address = NULL;\n sprintf(s, \"Attaching the shared memory failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n else {\n sprintf(s, \"shared memory address = %p\", address);\n say(MY_NAME, s);\n\n i = 0;\n done = 0;\n last_message_i_wrote[0] = '\\0';\n while (!done) {\n sprintf(s, \"iteration %d\", i);\n say(MY_NAME, s);\n\n // Wait for Mrs. Premise to free up the semaphore.\n rc = acquire_semaphore(MY_NAME, sem_id, params.live_dangerously);\n if (rc)\n done = 1;\n else {\n while ( (!rc) && \\\n (!strcmp((char *)address, last_message_i_wrote)) \n ) {\n // Nothing new; give Mrs. Premise another change to respond.\n sprintf(s, \"Read %zu characters '%s'\", strlen((char *)address), (char *)address);\n say(MY_NAME, s);\n rc = release_semaphore(MY_NAME, sem_id, params.live_dangerously);\n if (!rc) {\n rc = acquire_semaphore(MY_NAME, sem_id, params.live_dangerously);\n }\n }\n \n md5ify(last_message_i_wrote, md5ified_message);\n\n // I always accept the first message (when i == 0)\n if ( (i == 0) || (!strcmp(md5ified_message, (char *)address)) ) {\n // All is well\n i++;\n \n if (i == params.iterations) \n done = 1;\n\n // MD5 the reply and write back to Mrs. Premise.\n md5ify((char *)address, md5ified_message);\n\n // Write back to Mrs. Premise.\n sprintf(s, \"Writing %zu characters '%s'\", strlen(md5ified_message), md5ified_message);\n say(MY_NAME, s);\n\n strcpy((char *)address, md5ified_message);\n\n strcpy(last_message_i_wrote, md5ified_message);\n }\n else {\n sprintf(s, \"Shared memory corruption after %d iterations.\", i);\n say(MY_NAME, s); \n sprintf(s, \"Mismatch; rc = %d, new message is '%s', expected '%s'.\", rc, (char *)address, md5ified_message);\n say(MY_NAME, s);\n done = 1;\n } \n }\n\n // Release the semaphore.\n rc = release_semaphore(MY_NAME, sem_id, params.live_dangerously);\n if (rc)\n done = 1;\n }\n\n if (-1 == shmdt(address)) {\n sprintf(s, \"Detaching the memory failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n address = NULL;\n }\n }\n }\n \n \n return 0; \n}\n" }, { "alpha_fraction": 0.670314610004425, "alphanum_fraction": 0.6819425225257874, "avg_line_length": 29.45833396911621, "blob_id": "0b4f8174b875c479d88c1f32f2e23eada735586b", "content_id": "04e195055347e5ade82f185beec39745c7d3956b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "permissive", "max_line_length": 93, "num_lines": 48, "path": "/demos/buffer_protocol/demo.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "import sysv_ipc\n\n# Create a shared memory segment and write the (English) alphabet to the shared memory.\nmem = sysv_ipc.SharedMemory(None, sysv_ipc.IPC_CREX, size=sysv_ipc.PAGE_SIZE)\n\nASCII_A = 0x61\nalphabet = ''.join([chr(ASCII_A + i) for i in range(26)])\nalphabet = bytes(alphabet, 'ASCII')\nmem.write(alphabet)\n\n# Create a bytearray from the SharedMemory.\nba = bytearray(mem)\n\n# bytearray instances have \"most of the usual methods of mutable sequences\", such as replace.\n# https://docs.python.org/3/library/functions.html#func-bytearray\nba = ba.replace(b'c', b'x')\n\nassert(ba[:4] == b'abxd')\n\n# Unlike a memoryview (see below), changes to the bytearray do *not* affect the underlying\n# SharedMemory -- the bytearray is a copy.\nassert(mem.read(4) == b'abcd')\n\n# Reset the memory to contain the alphabet unmodified.\nmem.write(alphabet)\n\n# Create a memoryview from the SharedMemory.\nmv = memoryview(mem)\n\n# This memoryview has format = 'B', itemsize = 1, shape = (sysv_ipc.PAGE_SIZE, ), ndim = 1,\n# strides = (1, ), and is read/write.\n\n# This shows that you can take slices of a memoryview\nassert([chr(c) for c in mv[3:6]] == ['d', 'e', 'f'])\n\n# This shows that you can write to the memoryview.\nmv[4] = ord('x')\n\nassert([chr(c) for c in mv[3:6]] == ['d', 'x', 'f'])\n\n# Changes to the underlying segment are reflected in the memoryview\nmem.write(b'xxx')\nassert([chr(c) for c in mv[:6]] == ['x', 'x', 'x', 'd', 'x', 'f'])\n\nmem.detach()\nmem.remove()\n\nprint('Done!')\n" }, { "alpha_fraction": 0.4691168963909149, "alphanum_fraction": 0.4751148521900177, "avg_line_length": 36.1374397277832, "blob_id": "031e6581d5b2413cd3c7eb209e459ce095d4f583", "content_id": "11cbb4da8b95777f00948e8e666cf396cb00f17a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7836, "license_type": "permissive", "max_line_length": 123, "num_lines": 211, "path": "/demos/sem_and_shm/premise.c", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "#include <sys/ipc.h>\t\t/* for system's IPC_xxx definitions */\n#include <sys/shm.h>\t\t/* for shmget, shmat, shmdt, shmctl */\n#include <sys/sem.h>\t\t/* for semget, semctl, semop */\n\n#include <stdlib.h> \n#include <stdio.h> \n#include <errno.h> \n#include <unistd.h> \n#include <string.h>\n#include <time.h>\n#include <fcntl.h>\n#include <stdarg.h>\n\n#include \"md5.h\"\n#include \"utils.h\"\n\nconst char MY_NAME[] = \"Mrs. Premise\";\n\n// Set up a Mrs. Premise & Mrs. Conclusion conversation.\n\nvoid get_current_time(char *);\n\nint main() { \n int rc;\n char s[1024];\n char last_message_i_wrote[256];\n char md5ified_message[256];\n int i = 0;\n int done = 0;\n struct param_struct params;\n int shm_id;\n void *address = NULL;\n int sem_id;\n struct shmid_ds shm_info;\n\n say(MY_NAME, \"Oooo 'ello, I'm Mrs. Premise!\");\n \n read_params(&params);\n \n // Create the shared memory\n shm_id = shmget(params.key, params.size, IPC_CREAT | IPC_EXCL | params.permissions);\n \n if (shm_id == -1) {\n shm_id = 0;\n sprintf(s, \"Creating the shared memory failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n else {\n sprintf(s, \"Shared memory's id is %d\", shm_id);\n say(MY_NAME, s);\n\n // Attach the memory.\n address = shmat(shm_id, NULL, 0);\n\n if ((void *)-1 == address) {\n address = NULL;\n sprintf(s, \"Attaching the shared memory failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n else {\n sprintf(s, \"shared memory address = %p\", address);\n say(MY_NAME, s);\n }\n }\n \n if (address) {\n // Create the semaphore\n sem_id = semget(params.key, 1, IPC_CREAT | IPC_EXCL | params.permissions);\n \n if (-1 == sem_id) {\n sem_id = 0;\n sprintf(s, \"Creating the semaphore failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n else {\n sprintf(s, \"the semaphore id is %d\", sem_id);\n say(MY_NAME, s);\n \n // I seed the shared memory with a random string (the current time).\n get_current_time(s);\n \n strcpy((char *)address, s);\n strcpy(last_message_i_wrote, s);\n\n sprintf(s, \"Wrote %zu characters: %s\", strlen(last_message_i_wrote), last_message_i_wrote);\n say(MY_NAME, s);\n \n i = 0;\n while (!done) {\n sprintf(s, \"iteration %d\", i);\n say(MY_NAME, s);\n\n // Release the semaphore...\n rc = release_semaphore(MY_NAME, sem_id, params.live_dangerously);\n // ...and wait for it to become available again. In real code \n // I might want to sleep briefly before calling .acquire() in\n // order to politely give other processes an opportunity to grab\n // the semaphore while it is free so as to avoid starvation. But \n // this code is meant to be a stress test that maximizes the \n // opportunity for shared memory corruption and politeness is \n // not helpful in stress tests.\n if (!rc)\n rc = acquire_semaphore(MY_NAME, sem_id, params.live_dangerously);\n\n if (rc)\n done = 1;\n else {\n // I keep checking the shared memory until something new has \n // been written.\n while ( (!rc) && \\\n (!strcmp((char *)address, last_message_i_wrote)) \n ) {\n // Nothing new; give Mrs. Conclusion another change to respond.\n sprintf(s, \"Read %zu characters '%s'\", strlen((char *)address), (char *)address);\n say(MY_NAME, s);\n rc = release_semaphore(MY_NAME, sem_id, params.live_dangerously);\n if (!rc) {\n rc = acquire_semaphore(MY_NAME, sem_id, params.live_dangerously);\n }\n }\n\n\n if (rc) \n done = 1;\n else {\n sprintf(s, \"Read %zu characters '%s'\", strlen((char *)address), (char *)address);\n say(MY_NAME, s);\n\n // What I read must be the md5 of what I wrote or something's \n // gone wrong.\n md5ify(last_message_i_wrote, md5ified_message);\n \n if (strcmp(md5ified_message, (char *)address) == 0) {\n // Yes, the message is OK\n i++;\n if (i == params.iterations)\n done = 1;\n\n // MD5 the reply and write back to Mrs. Conclusion.\n md5ify(md5ified_message, md5ified_message);\n \n sprintf(s, \"Writing %zu characters '%s'\", strlen(md5ified_message), md5ified_message);\n say(MY_NAME, s);\n\n strcpy((char *)address, md5ified_message);\n strcpy((char *)last_message_i_wrote, md5ified_message);\n }\n else {\n sprintf(s, \"Shared memory corruption after %d iterations.\", i);\n say(MY_NAME, s); \n sprintf(s, \"Mismatch; new message is '%s', expected '%s'.\", (char *)address, md5ified_message);\n say(MY_NAME, s);\n done = 1;\n }\n }\n }\n }\n\n // Announce for one last time that the semaphore is free again so that \n // Mrs. Conclusion can exit.\n say(MY_NAME, \"Final release of the semaphore followed by a 5 second pause\"); \n rc = release_semaphore(MY_NAME, sem_id, params.live_dangerously);\n sleep(5);\n // ...before beginning to wait until it is free again. \n // Technically, this is bad practice. It's possible that on a \n // heavily loaded machine, Mrs. Conclusion wouldn't get a chance\n // to acquire the semaphore. There really ought to be a loop here\n // that waits for some sort of goodbye message but for purposes of\n // simplicity I'm skipping that.\n\n say(MY_NAME, \"Final wait to acquire the semaphore\");\n rc = acquire_semaphore(MY_NAME, sem_id, params.live_dangerously);\n if (!rc) {\n say(MY_NAME, \"Destroying the shared memory.\");\n \n if (-1 == shmdt(address)) {\n sprintf(s, \"Detaching the memory failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n address = NULL;\n \n \n if (-1 == shmctl(shm_id, IPC_RMID, &shm_info)) {\n sprintf(s, \"Removing the memory failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n }\n }\n\n say(MY_NAME, \"Destroying the semaphore.\");\n // Clean up the semaphore\n if (-1 == semctl(sem_id, 0, IPC_RMID)) {\n sprintf(s, \"Removing the semaphore failed; errno is %d\", errno);\n say(MY_NAME, s);\n }\n }\n return 0; \n}\n\n\nvoid get_current_time(char *s) {\n time_t the_time;\n struct tm *the_localtime;\n char *pAscTime;\n\n the_time = time(NULL);\n the_localtime = localtime(&the_time);\n pAscTime = asctime(the_localtime);\n \n strcpy(s, pAscTime);\n}\n" }, { "alpha_fraction": 0.6549485325813293, "alphanum_fraction": 0.6612826585769653, "avg_line_length": 37.742332458496094, "blob_id": "5b9d7464934084677d44f10df6032eab07459d9d", "content_id": "fadf6ccdee60e2ab98764dbe258502dbe0e804a4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6315, "license_type": "permissive", "max_line_length": 93, "num_lines": 163, "path": "/tests/test_module.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "# Python imports\nimport unittest\nimport os\nimport resource\nimport warnings\nimport numbers\nimport tempfile\n\n# Project imports\nimport sysv_ipc\nfrom .base import Base\n\nONE_MILLION = 1000000\n\n\nclass TestModuleConstants(Base):\n \"\"\"Check that the sysv_ipc module-level constants are defined as expected\"\"\"\n def test_constant_values(self):\n \"\"\"test that constants are what I expect\"\"\"\n self.assertEqual(sysv_ipc.IPC_CREX, sysv_ipc.IPC_CREAT | sysv_ipc.IPC_EXCL)\n self.assertEqual(sysv_ipc.PAGE_SIZE, resource.getpagesize())\n self.assertIn(sysv_ipc.SEMAPHORE_TIMEOUT_SUPPORTED, (True, False))\n self.assertIsInstance(sysv_ipc.SEMAPHORE_VALUE_MAX, numbers.Integral)\n self.assertGreaterEqual(sysv_ipc.SEMAPHORE_VALUE_MAX, 1)\n self.assertIsInstance(sysv_ipc.VERSION, str)\n self.assertIsInstance(sysv_ipc.IPC_PRIVATE, numbers.Integral)\n self.assertIsInstance(sysv_ipc.KEY_MIN, numbers.Integral)\n self.assertIsInstance(sysv_ipc.KEY_MAX, numbers.Integral)\n self.assertGreater(sysv_ipc.KEY_MAX, sysv_ipc.KEY_MIN)\n self.assertIsInstance(sysv_ipc.SHM_RDONLY, numbers.Integral)\n self.assertIsInstance(sysv_ipc.SHM_RND, numbers.Integral)\n # These constants are only available under Linux as of this writing (Jan 2018).\n for attr_name in ('SHM_HUGETLB', 'SHM_NORESERVE', 'SHM_REMAP'):\n if hasattr(sysv_ipc, attr_name):\n self.assertIsInstance(getattr(sysv_ipc, attr_name), numbers.Integral)\n\n self.assertIsInstance(sysv_ipc.__version__, str)\n self.assertEqual(sysv_ipc.VERSION, sysv_ipc.__version__)\n self.assertIsInstance(sysv_ipc.__author__, str)\n self.assertIsInstance(sysv_ipc.__license__, str)\n self.assertIsInstance(sysv_ipc.__copyright__, str)\n\n\nclass TestModuleErrors(Base):\n \"\"\"Exercise the exceptions defined by the module\"\"\"\n def test_errors(self):\n self.assertTrue(issubclass(sysv_ipc.Error, Exception))\n self.assertTrue(issubclass(sysv_ipc.InternalError, sysv_ipc.Error))\n self.assertTrue(issubclass(sysv_ipc.PermissionsError, sysv_ipc.Error))\n self.assertTrue(issubclass(sysv_ipc.ExistentialError, sysv_ipc.Error))\n self.assertTrue(issubclass(sysv_ipc.BusyError, sysv_ipc.Error))\n self.assertTrue(issubclass(sysv_ipc.NotAttachedError, sysv_ipc.Error))\n\n\nclass TestModuleFunctions(Base):\n \"\"\"Exercise the sysv_ipc module-level functions\"\"\"\n def test_attach(self):\n \"\"\"Exercise attach()\"\"\"\n # Create memory, write something to it, then detach\n mem = sysv_ipc.SharedMemory(None, sysv_ipc.IPC_CREX)\n mem.write('hello world')\n mem.detach()\n self.assertFalse(mem.attached)\n self.assertEqual(mem.number_attached, 0)\n\n # Reattach memory via a different SharedMemory instance\n mem2 = sysv_ipc.attach(mem.id)\n self.assertFalse(mem.attached)\n self.assertTrue(mem2.attached)\n self.assertEqual(mem.number_attached, 1)\n self.assertEqual(mem2.number_attached, 1)\n\n self.assertEqual(mem2.read(len('hello world')), b'hello world')\n\n mem2.detach()\n\n mem.remove()\n\n self.assertRaises(sysv_ipc.ExistentialError, sysv_ipc.SharedMemory, mem.key)\n\n def test_attach_kwargs(self):\n \"\"\"Ensure attach takes kwargs as advertised\"\"\"\n mem = sysv_ipc.SharedMemory(None, sysv_ipc.IPC_CREX)\n mem.write('hello world')\n mem.detach()\n mem2 = sysv_ipc.attach(mem.id, flags=0)\n mem2.detach()\n mem.remove()\n\n def test_ftok(self):\n \"\"\"Exercise ftok()'s behavior of raising a warning as documented\"\"\"\n # Test default value of silence_warning\n with warnings.catch_warnings(record=True) as recorded_warnings:\n warnings.simplefilter(\"always\")\n\n sysv_ipc.ftok('.', 42)\n\n self.assertEqual(len(recorded_warnings), 1)\n self.assertTrue(issubclass(recorded_warnings[-1].category, Warning))\n\n # Test explicit False value of silence_warning\n with warnings.catch_warnings(record=True) as recorded_warnings:\n warnings.simplefilter(\"always\")\n\n sysv_ipc.ftok('.', 42, silence_warning=False)\n\n self.assertEqual(len(recorded_warnings), 1)\n self.assertTrue(issubclass(recorded_warnings[-1].category, Warning))\n\n # Test explicit True value of silence_warning\n with warnings.catch_warnings(record=True) as recorded_warnings:\n warnings.simplefilter(\"always\")\n\n sysv_ipc.ftok('.', 42, silence_warning=True)\n\n self.assertEqual(len(recorded_warnings), 0)\n\n def test_ftok_kwargs(self):\n \"\"\"Ensure ftok() takes kwargs as advertised\"\"\"\n sysv_ipc.ftok('.', 42, silence_warning=True)\n\n def test_ftok_return_value(self):\n \"\"\"Ensure ftok() returns an int\"\"\"\n self.assertIsInstance(sysv_ipc.ftok('.', 42, silence_warning=True), numbers.Integral)\n\n def test_ftok_raises_os_error(self):\n \"\"\"Ensure ftok() failure raises an exception\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n # Create a path that should cause ftok() to fail.\n does_not_exist_path = os.path.join(tmp_dir_name, \"does_not_exist\")\n with self.assertRaises(OSError):\n sysv_ipc.ftok(does_not_exist_path, 42, silence_warning=True)\n\n def test_remove_semaphore(self):\n \"\"\"Exercise remove_semaphore()\"\"\"\n sem = sysv_ipc.Semaphore(None, sysv_ipc.IPC_CREX)\n\n sysv_ipc.remove_semaphore(sem.id)\n\n with self.assertRaises(sysv_ipc.ExistentialError):\n sysv_ipc.Semaphore(sem.key)\n\n def test_remove_shared_memory(self):\n \"\"\"Exercise remove_shared_memory()\"\"\"\n mem = sysv_ipc.SharedMemory(None, sysv_ipc.IPC_CREX)\n\n sysv_ipc.remove_shared_memory(mem.id)\n\n with self.assertRaises(sysv_ipc.ExistentialError):\n sysv_ipc.SharedMemory(mem.key)\n\n def test_remove_message_queue(self):\n \"\"\"Exercise remove_message_queue()\"\"\"\n mq = sysv_ipc.MessageQueue(None, sysv_ipc.IPC_CREX)\n\n sysv_ipc.remove_message_queue(mq.id)\n\n with self.assertRaises(sysv_ipc.ExistentialError):\n sysv_ipc.MessageQueue(mq.key)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6790322661399841, "alphanum_fraction": 0.6806451678276062, "avg_line_length": 22.846153259277344, "blob_id": "733de2d62ca2f13dce16cd3bac4808467bcceea9", "content_id": "aaab98626dfb7b29cb241e3612f3cfa4786210ac", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "permissive", "max_line_length": 75, "num_lines": 26, "path": "/demos/sem_and_shm/cleanup.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "# 3rd party modules\nimport sysv_ipc\n\n# Modules for this project\nimport utils\n\nparams = utils.read_params()\n\nkey = params[\"KEY\"]\n\ntry:\n semaphore = sysv_ipc.Semaphore(key)\nexcept sysv_ipc.ExistentialError:\n print('''The semaphore with key \"{}\" doesn't exist.'''.format(key))\nelse:\n semaphore.remove()\n print('Removed the semaphore with key \"{}\".'.format(key))\n\n\ntry:\n memory = sysv_ipc.SharedMemory(key)\nexcept sysv_ipc.ExistentialError:\n print('''The shared memory with key \"{}\" doesn't exist.'''.format(key))\nelse:\n memory.remove()\n print('Removed the shared memory with key \"{}\".'.format(key))\n" }, { "alpha_fraction": 0.6449552774429321, "alphanum_fraction": 0.6507024168968201, "avg_line_length": 30.31999969482422, "blob_id": "cca00497456d60b002734ac226c35e1e56cd9915", "content_id": "9e1643203b77b42df34b11d458b459e63202dc9e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1566, "license_type": "permissive", "max_line_length": 85, "num_lines": 50, "path": "/post_dist.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# Python imports\nimport time\nimport hashlib\nimport shutil\nimport os\n\nRSS_TIMESTAMP_FORMAT = \"%a, %d %b %Y %H:%M:%S GMT\"\n\nwith open(\"VERSION\") as f:\n VERSION = f.read().strip()\n\n# Make a copy of the tarball for posterity\ntarball_name = \"sysv_ipc-%s.tar.gz\" % VERSION\nshutil.copyfile(os.path.join(\"dist\", tarball_name),\n os.path.join(\"releases\", tarball_name))\n\ntarball_name = \"releases/sysv_ipc-%s.tar.gz\" % VERSION\nmd5_name = \"releases/sysv_ipc-%s.md5.txt\" % VERSION\nsha1_name = \"releases/sysv_ipc-%s.sha1.txt\" % VERSION\n\n# Generate hashes of the tarball\ntarball_content = open(tarball_name, 'rb').read()\nfor hash_function_name in ('md5', 'sha1', 'sha256'):\n hash_function = getattr(hashlib, hash_function_name)\n hash_value = hash_function(tarball_content).hexdigest()\n\n hash_filename = \"releases/sysv_ipc-{}.{}.txt\".format(VERSION, hash_function_name)\n\n open(hash_filename, \"wb\").write(hash_value.encode('ascii'))\n print(hash_function_name + \" = \" + hash_value)\n\n# Print an RSS item suitable for pasting into rss.xml\ntimestamp = time.strftime(RSS_TIMESTAMP_FORMAT, time.gmtime())\n\nprint(\"\"\"\n\n <item>\n <guid isPermaLink=\"false\">%s</guid>\n <title>sysv_ipc %s Released</title>\n <pubDate>%s</pubDate>\n <link>http://semanchuk.com/philip/sysv_ipc/</link>\n <description>Version %s of sysv_ipc has been released.\n </description>\n </item>\n\n\"\"\" % (VERSION, VERSION, timestamp, VERSION))\n\nprint(\"Don't forget this:\\ngit tag rel\" + VERSION)\n" }, { "alpha_fraction": 0.4162062704563141, "alphanum_fraction": 0.4226519465446472, "avg_line_length": 19.11111068725586, "blob_id": "d293e098d2ce5e2c9d8a1286eb720864539b349b", "content_id": "855dce96e4ee4ff5d48888e58576cd1c3fecf298", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "permissive", "max_line_length": 49, "num_lines": 54, "path": "/demos/sem_and_shm/utils.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "import time\nimport sys\n\nNULL_CHAR = '\\0'\n\n\ndef say(s):\n who = sys.argv[0]\n if who.endswith(\".py\"):\n who = who[:-3]\n s = \"%s@%1.6f: %s\" % (who, time.time(), s)\n print(s)\n\n\ndef write_to_memory(memory, s):\n say(\"writing %s \" % s)\n s += NULL_CHAR\n s = s.encode()\n memory.write(s)\n\n\ndef read_from_memory(memory):\n s = memory.read()\n s = s.decode()\n i = s.find(NULL_CHAR)\n if i != -1:\n s = s[:i]\n say(\"read %s\" % s)\n\n return s\n\n\ndef read_params():\n params = {}\n\n with open(\"params.txt\", \"r\") as f:\n for line in f:\n line = line.strip()\n if line:\n if line.startswith('#'):\n # comment in input; ignore\n pass\n else:\n name, value = line.split('=')\n name = name.upper().strip()\n\n if name == \"PERMISSIONS\":\n value = int(value, 8)\n else:\n value = int(value)\n\n params[name] = value\n\n return params\n" }, { "alpha_fraction": 0.7869986295700073, "alphanum_fraction": 0.7888427972793579, "avg_line_length": 53.17499923706055, "blob_id": "d55f09f05193a1a80aa787d96c5434955099c3e5", "content_id": "871eba6277aff400071ef1f3051b1b29a4426aa9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2169, "license_type": "permissive", "max_line_length": 77, "num_lines": 40, "path": "/demos/message_queues/ReadMe.txt", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "This demonstrates use of message queues via two applications named after\nMrs. Premise and Mrs. Conclusion of the Monty Python sketch. \nhttp://www.youtube.com/watch?v=crIJvcWkVcs\n\nLike those two characters, these programs chat back and forth and the result \nis a lot of nonsense. In this case, what the programs are saying isn't the\ninteresting part. What's interesting is how they're doing it.\n\nMrs. Premise and Mrs. Conclusion (the programs, not the sketch characters)\ncommunicate with Sys V message queues.\n\nMrs. Premise starts things off by creating the queue and sending a random \nstring (the current time) to it. She then sits in a loop receiving whatever \nmessage is on the queue. If it is the same message she wrote, she sends it\nback to the queue. If it is a new message, it must be from Mrs. Conclusion.\n\nMeanwhile, Mrs. Conclusion is doing exactly the same thing, except that she\nassumes Mrs. Premise will write the first message.\n\nWhen either of these programs receives a new message, they send back an\nmd5 hash of that message. This serves two purposes. First, it ensures that\nsubsequent messages are very different so that if a message somehow gets\ncorrupted (say by being partially overwritten by the next message), it will\nnot escape notice. Second, it ensures that corruption can be detected if\nit happens, because Mrs. Premise and Mrs. Conclusion can calculate what the\nother's response to their message should be.\n\nSince message queues manage all of the concurrency issues transparently,\nMrs. Premise and Mrs. Conclusion won't ever find their messages corrupted\nno matter how many messages they exchange. You can experiment with this by \nsetting ITERATIONS in params.txt to a very large value.\n\nThese programs are not meant as a demonstration on how to make best use of a \nmessage queue. In fact, they're very badly behaved because they poll the\nqueue as fast as possible -- they'll send your CPU usage right up to 100%.\nRemember, they're trying as hard as they can to step one another so as to \nexpose any concurrency problems that might be present. \n\nReal code would want to sleep (or do something useful) in between calling\nsend() and receive(). \n\n" }, { "alpha_fraction": 0.6351791620254517, "alphanum_fraction": 0.6644951105117798, "avg_line_length": 20.928571701049805, "blob_id": "8f0a85ac11e63aa6c75085d56e4f146fbf4745b0", "content_id": "e057e7463c4661c8bb44aff1ce7cc23f6a041e9d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "permissive", "max_line_length": 78, "num_lines": 14, "path": "/extras/explore_max_semaphore_value.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "import sysv_ipc\n\n'''This is a simple test to see how many times a semaphore can be released.'''\n\nsem = sysv_ipc.Semaphore(None, sysv_ipc.IPC_CREX)\n\nprint('Semaphore key is {}'.format(sem.key))\n\nfor i in range(1, 100000):\n sem.release()\n\n print('{:05}: value is {}'.format(i, sem.value))\n\nsem.remove()\n" }, { "alpha_fraction": 0.7824817299842834, "alphanum_fraction": 0.7843065857887268, "avg_line_length": 50.67924499511719, "blob_id": "bb296b56eae88a500fbb16d4cc4d302d65ed4c26", "content_id": "35d819b83f0848b4ae178990d359879393af367c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2740, "license_type": "permissive", "max_line_length": 77, "num_lines": 53, "path": "/demos/sem_and_shm/ReadMe.txt", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "This demonstrates use of shared memory and semaphores via two applications \nnamed after Mrs. Premise and Mrs. Conclusion of the Monty Python sketch. \nhttp://www.youtube.com/watch?v=crIJvcWkVcs\n\nLike those two characters, these programs chat back and forth and the result \nis a lot of nonsense. In this case, what the programs are saying isn't the\ninteresting part. What's interesting is how they're doing it.\n\nMrs. Premise and Mrs. Conclusion (the programs, not the sketch characters)\ncommunicate through IPC shared memory with a semaphore to control access\nto that memory.\n\nMrs. Premise starts things off by creating the shared memory and semaphore\nand writing a random string (the current time) to the memory. She then sits\nin a loop reading the memory. If it holds the same message she wrote, she\ndoes nothing. If it is a new message, it must be from Mrs. Conclusion.\n\nMeanwhile, Mrs. Conclusion is doing exactly the same thing, except that she\nassumes Mrs. Premise will write the first message.\n\nWhen either of these programs reads a new message, they write back an md5 \nhash of that message. This serves two purposes. First, it ensures that\nsubsequent messages are very different so that if a message somehow gets\ncorrupted (say by being partially overwritten by the next message), it will\nnot escape notice. Second, it ensures that corruption can be detected if\nit happens, because Mrs. Premise and Mrs. Conclusion can calculate what the\nother's response to their message should be.\n\nSince they use a semaphore to control access to the shared memory, Mrs. \nPremise and Mrs. Conclusion won't ever find their messages corrupted no\nmatter how many messages they exchange. You can experiment with this by\nsetting ITERATIONS in params.txt to a very large value. You can change \nLIVE_DANGEROUSLY (also in params.txt) to a non-zero value to tell Mrs. \nPremise and Mrs. Conclusion to run without using the semaphore. The shared\nmemory will probably get corrupted in fewer than 1000 iterations.\n\nTo run the demo, start Mrs. Premise first in one window and then run\nMrs. Conclusion in another. \n\n\n The Fancy Version \n =================\n\nIf you want to get fancy, you can play with C versions of Mrs. Premise and \nMrs. Conclusion. The script make_all.sh will compile them for you. (Linux\nusers will need to edit the script and uncomment the line for the \nLinux-specific linker option.) \n\nThe resulting executables are called premise and conclusion and work exactly \nthe same as their Python counterparts. You can have the two C programs talk \nto one another, or you can have premise.py talk to the C version of \nconclusion...the possibilities are endless. (Actually, there are only four \npossible combinations but \"endless\" sounds better.)\n\n" }, { "alpha_fraction": 0.6165562868118286, "alphanum_fraction": 0.6192052960395813, "avg_line_length": 25.964284896850586, "blob_id": "0e173020c607d6d208aaea5eee77b85333fec78a", "content_id": "6a6002fbe2bb9de10076510d6df1d799fcf9f210", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1510, "license_type": "permissive", "max_line_length": 85, "num_lines": 56, "path": "/demos/sem_and_shm/conclusion.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "# Python modules\nimport hashlib\n\n# 3rd party modules\nimport sysv_ipc\n\n# Utils for this demo\nimport utils\n\nutils.say(\"Oooo 'ello, I'm Mrs. Conclusion!\")\n\nparams = utils.read_params()\n\nsemaphore = sysv_ipc.Semaphore(params[\"KEY\"])\nmemory = sysv_ipc.SharedMemory(params[\"KEY\"])\n\nutils.say(\"memory attached at %d\" % memory.address)\n\nwhat_i_wrote = \"\"\ns = \"\"\n\nfor i in range(0, params[\"ITERATIONS\"]):\n utils.say(\"i = %d\" % i)\n if not params[\"LIVE_DANGEROUSLY\"]:\n # Wait for Mrs. Premise to free up the semaphore.\n utils.say(\"acquiring the semaphore...\")\n semaphore.acquire()\n\n s = utils.read_from_memory(memory)\n\n while s == what_i_wrote:\n if not params[\"LIVE_DANGEROUSLY\"]:\n # Release the semaphore...\n utils.say(\"releasing the semaphore\")\n semaphore.release()\n # ...and wait for it to become available again.\n utils.say(\"acquiring for the semaphore...\")\n semaphore.acquire()\n\n s = utils.read_from_memory(memory)\n\n if what_i_wrote:\n what_i_wrote = what_i_wrote.encode()\n try:\n assert(s == hashlib.md5(what_i_wrote).hexdigest())\n except AssertionError:\n raise AssertionError(\"Shared memory corruption after %d iterations.\" % i)\n\n s = s.encode()\n what_i_wrote = hashlib.md5(s).hexdigest()\n\n utils.write_to_memory(memory, what_i_wrote)\n\n if not params[\"LIVE_DANGEROUSLY\"]:\n utils.say(\"releasing the semaphore\")\n semaphore.release()\n" }, { "alpha_fraction": 0.7652789950370789, "alphanum_fraction": 0.7723649144172668, "avg_line_length": 44.15999984741211, "blob_id": "ce3fd667dcb328464d8bd05c813ee2d4215c36dd", "content_id": "f58ffa6bcef8ad5de078dbb3cad7f8018d7688f7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1129, "license_type": "permissive", "max_line_length": 77, "num_lines": 25, "path": "/demos/buffer_protocol/ReadMe.md", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "This demonstrates that `sysv_ipc` implements Python's \"buffer protocol\" which\nallows you to create `bytearray` and `memoryview` objects from\n`sysv_ipc.SharedMemory` instances. The demo doesn't do anything exciting;\nit's main value is in the code.\n\nThe `memoryview` type under Python 3 has some nice features that aren't\npresent under Python 2.\n\n## Caveat\n\nA `memoryview` is just that, a view on a chunk of memory. It has some\nsimilarities to a raw C pointer, namely, speed and, in this case, danger).\n\nThe size of the `memoryview` is set when it's created, so shrinking the\nunderlying memory segment could be fatal to your code.\n\nFor instance, if process A creates a `memoryview` atop an 8k chunk\nof `sysv_ipc.SharedMemory` and then process B shrinks that same\n`sysv_ipc.SharedMemory` segment to 4k, process A will segfault when it tries\nto access any part of the `memoryview` past byte 4096. `Sysv_ipc` can't\nprotect you from this because once the `memoryview` is created, `sysv_ipc`\nisn't invoked for reads and writes to the `memoryview`.\n\nIn practice, I'm not sure if any platforms allow resizing SysV shared\nmemory segments.\n" }, { "alpha_fraction": 0.5108463168144226, "alphanum_fraction": 0.5313168168067932, "avg_line_length": 24.5703125, "blob_id": "3fe3f8c973f7ce30331d18d69ebec419331a8717", "content_id": "30780c4c894f47356ad720ede74b57dcbaf72874", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3273, "license_type": "permissive", "max_line_length": 104, "num_lines": 128, "path": "/demos/sem_and_shm/utils.c", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "#include <time.h>\n#include <stdio.h> \n#include <stdlib.h> \n#include <unistd.h> \n#include <errno.h> \n#include <semaphore.h>\n#include <string.h>\n\n\n#include <sys/ipc.h>\t\t/* for system's IPC_xxx definitions */\n#include <sys/shm.h>\t\t/* for shmget, shmat, shmdt, shmctl */\n#include <sys/sem.h>\t\t/* for semget, semctl, semop */\n\n#include \"utils.h\"\n#include \"md5.h\"\n\n\nvoid md5ify(char *inString, char *outString) {\n\tmd5_state_t state;\n\tmd5_byte_t digest[16];\n int i;\n \n\tmd5_init(&state);\n\tmd5_append(&state, (const md5_byte_t *)inString, strlen(inString));\n\tmd5_finish(&state, digest);\n\n for (i = 0; i < 16; i++)\n sprintf(&outString[i * 2], \"%02x\", digest[i]);\n}\n\nvoid say(const char *pName, char *pMessage) {\n time_t the_time;\n struct tm *the_localtime;\n char timestamp[256];\n \n the_time = time(NULL);\n \n the_localtime = localtime(&the_time);\n \n strftime(timestamp, 255, \"%H:%M:%S\", the_localtime);\n \n printf(\"%s @ %s: %s\\n\", pName, timestamp, pMessage);\n \n}\n\n\nint release_semaphore(const char *pName, int sem_id, int live_dangerously) {\n int rc = 0;\n struct sembuf op[1];\n char s[1024];\n \n say(pName, \"Releasing the semaphore.\");\n \n if (!live_dangerously) {\n op[0].sem_num = 0;\n op[0].sem_op = 1;\n op[0].sem_flg = 0;\n\n if (-1 == semop(sem_id, op, (size_t)1)) {\n sprintf(s, \"Releasing the semaphore failed; errno is %d\\n\", errno);\n say(pName, s);\n }\n }\n \n return rc;\n}\n\n\nint acquire_semaphore(const char *pName, int sem_id, int live_dangerously) {\n int rc = 0;\n struct sembuf op[1];\n char s[1024];\n\n say(pName, \"Waiting to acquire the semaphore.\");\n\n if (!live_dangerously) {\n op[0].sem_num = 0;\n op[0].sem_op = -1;\n op[0].sem_flg = 0;\n if (-1 == semop(sem_id, op, (size_t)1)) {\n sprintf(s, \"Acquiring the semaphore failed; errno is %d\\n\", errno);\n say(pName, s);\n }\n }\n\n return rc;\n}\n\n\nvoid read_params(struct param_struct *params) {\n char line[1024];\n char name[1024];\n int value = 0;\n \n FILE *fp;\n \n fp = fopen(\"params.txt\", \"r\");\n \n while (fgets(line, 1024, fp)) {\n if (strlen(line) && ('#' == line[0]))\n ; // comment in input, ignore\n else {\n sscanf(line, \"%[ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghjiklmnopqrstuvwxyz]=%i\\n\", name, &value);\n \n // printf(\"name = %s, value = %d\\n\", name, value);\n\n if (!strcmp(name, \"ITERATIONS\"))\n params->iterations = value;\n if (!strcmp(name, \"LIVE_DANGEROUSLY\"))\n params->live_dangerously = value;\n if (!strcmp(name, \"KEY\"))\n params->key = value;\n if (!strcmp(name, \"PERMISSIONS\"))\n params->permissions = value;\n if (!strcmp(name, \"SHM_SIZE\"))\n params->size = value;\n \n name[0] = '\\0';\n value = 0;\n }\n }\n \n // printf(\"iterations = %d\\n\", params->iterations);\n // printf(\"danger = %d\\n\", params->live_dangerously);\n // printf(\"key = %d\\n\", params->key);\n // printf(\"permissions = %o\\n\", params->permissions);\n // printf(\"size = %d\\n\", params->size);\n}\n" }, { "alpha_fraction": 0.6247139573097229, "alphanum_fraction": 0.6278824210166931, "avg_line_length": 29.37967872619629, "blob_id": "46855d93da22a2a3de4daf1fbb1895e487d1b0ed", "content_id": "9bb3f40533d4635406cd028e781430c6d25536b5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5681, "license_type": "permissive", "max_line_length": 94, "num_lines": 187, "path": "/prober.py", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "import os.path\nimport os\nimport subprocess\nimport distutils.sysconfig\n\n# Set these to None for debugging or subprocess.PIPE to silence compiler\n# warnings and errors.\nSTDOUT = subprocess.PIPE\nSTDERR = subprocess.PIPE\n# STDOUT = None\n# STDERR = None\n\n# This is the max length that I want a printed line to be.\nMAX_LINE_LENGTH = 78\n\nPYTHON_INCLUDE_DIR = os.path.dirname(distutils.sysconfig.get_config_h_filename())\n# print(PYTHON_INCLUDE_DIR)\n\n\ndef line_wrap_paragraph(s):\n # Format s with terminal-friendly line wraps.\n done = False\n beginning = 0\n end = MAX_LINE_LENGTH - 1\n lines = []\n while not done:\n if end >= len(s):\n done = True\n lines.append(s[beginning:])\n else:\n last_space = s[beginning:end].rfind(' ')\n\n lines.append(s[beginning:beginning + last_space])\n beginning += (last_space + 1)\n end = beginning + MAX_LINE_LENGTH - 1\n\n return lines\n\n\ndef print_bad_news(value_name, default):\n s = \"Setup can't determine %s on your system, so it will default to %s which may not \" + \\\n \"be correct.\"\n s = s % (value_name, default)\n\n plea = \"Please report this message and your operating system info to the package \" + \\\n \"maintainer listed in the README file.\"\n\n lines = line_wrap_paragraph(s) + [''] + line_wrap_paragraph(plea)\n\n border = '*' * MAX_LINE_LENGTH\n\n s = border + \"\\n* \" + ('\\n* '.join(lines)) + '\\n' + border\n\n print(s)\n\n\ndef does_build_succeed(filename):\n # Utility function that returns True if the file compiles and links\n # successfully, False otherwise.\n cmd = \"cc -Wall -I%s -o ./prober/foo ./prober/%s\" % \\\n (PYTHON_INCLUDE_DIR, filename)\n\n p = subprocess.Popen(cmd, shell=True, stdout=STDOUT, stderr=STDERR)\n\n # p.wait() returns the process' return code, so 0 implies that\n # the compile & link succeeded.\n return not bool(p.wait())\n\n\ndef compile_and_run(filename, linker_options=\"\"):\n # Utility function that returns the stdout output from running the\n # compiled source file; None if the compile fails.\n cmd = \"cc -Wall -I%s -o ./prober/foo %s ./prober/%s\" % \\\n (PYTHON_INCLUDE_DIR, linker_options, filename)\n\n p = subprocess.Popen(cmd, shell=True, stdout=STDOUT, stderr=STDERR)\n\n if p.wait():\n # uh-oh, compile failed\n return None\n else:\n s = subprocess.Popen([\"./prober/foo\"],\n stdout=subprocess.PIPE).communicate()[0]\n return s.strip().decode()\n\n\ndef sniff_semtimedop():\n return does_build_succeed(\"semtimedop_test.c\")\n\n\ndef sniff_union_semun_defined():\n # AFAICT the semun union is supposed to be declared in one's code.\n # However, a lot of legacy code gets this wrong and some header files\n # define it, e.g.sys/sem.h on OS X where it's #ifdef-ed so that legacy\n # code won't break. On some systems, it appears and disappears based\n # on the #define value of _XOPEN_SOURCE.\n return does_build_succeed(\"sniff_union_semun_defined.c\")\n\n\ndef probe_semvmx():\n # At present, this is hardcoded and that seems fine on all systems I've tested.\n # https://github.com/osvenskan/sysv_ipc/issues/3\n semvmx = 32767\n\n return semvmx\n\n\ndef probe_page_size():\n DEFAULT_PAGE_SIZE = 4096\n\n page_size = compile_and_run(\"probe_page_size.c\")\n\n if page_size is None:\n page_size = DEFAULT_PAGE_SIZE\n print_bad_news(\"the value of PAGE_SIZE\", page_size)\n\n return page_size\n\n\ndef probe():\n d = {\"KEY_MAX\": \"LONG_MAX\",\n \"KEY_MIN\": \"LONG_MIN\"\n }\n\n # conditionals contains preprocessor #defines to be written to probe_results.h that might\n # already be defined on some platforms. Any symbol in this list will be surrounded with\n # preprocessor directives #ifndef/#endif in probe_results.h.\n # If a symbol is in this list but isn't written to probe_results.h, no harm done.\n conditionals = [\"_SEM_SEMUN_UNDEFINED\",\n # PAGE_SIZE is already #defined elsewhere on FreeBSD.\n \"PAGE_SIZE\",\n ]\n\n with open(\"VERSION\") as f:\n version = f.read().strip()\n\n d[\"SYSV_IPC_VERSION\"] = f'\"{version}\"'\n d[\"PAGE_SIZE\"] = probe_page_size()\n if sniff_semtimedop():\n d[\"SEMTIMEDOP_EXISTS\"] = \"\"\n d[\"SEMAPHORE_VALUE_MAX\"] = probe_semvmx()\n # Some (all?) Linux platforms #define _SEM_SEMUN_UNDEFINED if it's up\n # to my code to declare this union, so I use that flag as my standard.\n if not sniff_union_semun_defined():\n d[\"_SEM_SEMUN_UNDEFINED\"] = \"\"\n\n msg = \"\"\"/*\nThis header file was generated when you ran setup. Once created, the setup\nprocess won't overwrite it, so you can adjust the values by hand and\nrecompile if you need to.\n\nTo enable lots of debug output, add this line and re-run setup.py:\n#define SYSV_IPC_DEBUG\n\nTo recreate this file, just delete it and re-run setup.py.\n\nKEY_MIN, KEY_MAX and SEMAPHORE_VALUE_MAX are stored internally in longs, so\nyou should never #define them to anything larger than LONG_MAX regardless of\nwhat your operating system is capable of.\n\n*/\n\n\"\"\"\n\n filename = \"probe_results.h\"\n if not os.path.exists(filename):\n lines = []\n\n for key in d:\n if key in conditionals:\n lines.append(\"#ifndef %s\" % key)\n\n lines.append(\"#define %s\\t\\t%s\" % (key, d[key]))\n\n if key in conditionals:\n lines.append(\"#endif\")\n\n # A trailing '\\n' keeps compilers happy...\n with open(filename, \"w\") as f:\n f.write(msg + '\\n'.join(lines) + '\\n')\n\n return d\n\n\nif __name__ == \"__main__\":\n s = probe()\n print(s)\n" }, { "alpha_fraction": 0.5259740352630615, "alphanum_fraction": 0.5649350881576538, "avg_line_length": 11.833333015441895, "blob_id": "c0f37ae852d22ee679261f2947d34073893f998e", "content_id": "8ec64eb21bd83bfbb7d82fd2002062faf0de19ec", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 154, "license_type": "permissive", "max_line_length": 28, "num_lines": 12, "path": "/prober/sniff_union_semun_defined.c", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "//#define _XOPEN_SOURCE 500\n#include \"Python.h\"\n\n#include <sys/sem.h>\n\nint main(void) {\n union semun foo;\n \n foo.val = 42;\n \n return 0;\n}\n" }, { "alpha_fraction": 0.5802469253540039, "alphanum_fraction": 0.6172839403152466, "avg_line_length": 13.727272987365723, "blob_id": "f015f1fb83fc8294eecb786085ae6833a92f55f5", "content_id": "e165629c3ad902223bb6a5cbf29a18f6ab9ae5e5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 162, "license_type": "permissive", "max_line_length": 34, "num_lines": 11, "path": "/prober/semtimedop_test.c", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "//#define _XOPEN_SOURCE 500\n#include \"Python.h\"\n\n#include <sys/sem.h>\n#include <stdlib.h>\n\nint main(void) { \n semtimedop(0, NULL, 0, NULL); \n\n return 0;\n}\n" }, { "alpha_fraction": 0.7183840870857239, "alphanum_fraction": 0.7183840870857239, "avg_line_length": 31.226415634155273, "blob_id": "f30e898623717e72fc46faf971bf22e33469cfd9", "content_id": "b4ee63d945dd746483bc7ee3e990f21337b036bd", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1708, "license_type": "permissive", "max_line_length": 68, "num_lines": 53, "path": "/semaphore.h", "repo_name": "osvenskan/sysv_ipc", "src_encoding": "UTF-8", "text": "typedef struct {\n PyObject_HEAD\n key_t key;\n int id;\n short op_flags;\n} Semaphore;\n\n\n/* Object methods */\nPyObject *Semaphore_new(PyTypeObject *type, PyObject *, PyObject *);\nint Semaphore_init(Semaphore *, PyObject *, PyObject *);\nvoid Semaphore_dealloc(Semaphore *);\nPyObject *Semaphore_enter(Semaphore *);\nPyObject *Semaphore_exit(Semaphore *, PyObject *);\nPyObject *Semaphore_P(Semaphore *, PyObject *, PyObject *);\nPyObject *Semaphore_acquire(Semaphore *, PyObject *, PyObject *);\nPyObject *Semaphore_V(Semaphore *, PyObject *, PyObject *);\nPyObject *Semaphore_release(Semaphore *, PyObject *, PyObject *);\nPyObject *Semaphore_Z(Semaphore *, PyObject *, PyObject *);\nPyObject *Semaphore_remove(Semaphore *);\n\n/* Object attributes (read-write & read-only) */\nPyObject *sem_get_value(Semaphore *);\nint sem_set_value(Semaphore *self, PyObject *py_value);\n\nPyObject *sem_get_block(Semaphore *);\nint sem_set_block(Semaphore *self, PyObject *py_value);\n\nPyObject *sem_get_mode(Semaphore *);\nint sem_set_mode(Semaphore *, PyObject *);\n\nPyObject *sem_get_undo(Semaphore *);\nint sem_set_undo(Semaphore *self, PyObject *py_value);\n\nPyObject *sem_get_uid(Semaphore *);\nint sem_set_uid(Semaphore *, PyObject *);\n\nPyObject *sem_get_gid(Semaphore *);\nint sem_set_gid(Semaphore *, PyObject *);\n\nPyObject *sem_get_key(Semaphore *);\nPyObject *sem_get_c_uid(Semaphore *);\nPyObject *sem_get_c_gid(Semaphore *);\nPyObject *sem_get_last_pid(Semaphore *);\nPyObject *sem_get_waiting_for_nonzero(Semaphore *);\nPyObject *sem_get_waiting_for_zero(Semaphore *);\nPyObject *sem_get_o_time(Semaphore *);\n\nPyObject *sem_str(Semaphore *);\nPyObject *sem_repr(Semaphore *);\n\n/* Utility functions */\nPyObject *sem_remove(int);\n" } ]
25
desewenkdk/pythonProject
https://github.com/desewenkdk/pythonProject
23af8aabb9330f16e69f7af77841e3a95b95983f
3dc88219e2574e98aae12b930035040659549630
d67f3a5e1a74320a79cd71eb4e63d3dad6e706e9
refs/heads/master
2023-03-28T14:30:23.064544
2021-03-31T14:43:56
2021-03-31T14:43:56
353,381,024
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6206896305084229, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 5, "blob_id": "3c9668c0dd2d1d0b17e23b7a7d2acad7f28635c3", "content_id": "c8cdfc578485138048634bfa7fe0ed92c6de9c89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 29, "license_type": "no_license", "max_line_length": 9, "num_lines": 5, "path": "/readme.md", "repo_name": "desewenkdk/pythonProject", "src_encoding": "UTF-8", "text": "# README\n\n## test2\n\nREADME.md" }, { "alpha_fraction": 0.6746203899383545, "alphanum_fraction": 0.6832971572875977, "avg_line_length": 31.928571701049805, "blob_id": "6d448e639319afff1398ea9ec23e10dfe0212bfe", "content_id": "07e707667101059591d10a2a70fa4c4e6b429b33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 98, "num_lines": 28, "path": "/main.py", "repo_name": "desewenkdk/pythonProject", "src_encoding": "UTF-8", "text": "# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\ndef print_hi(name, text):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Hello, {name}') # Press Ctrl+F8 to toggle the breakpoint.\n print(f'Hello, {text}')\n print('Hello, World 3')\n\n print(\"Hello world 4\")\n print(\"Hello World 5\")\n\n #Updates in local master branch\n print(\"Hell World Updates on Local Master Branch \")\n\n\n #updates on main branch of GitHub\n print(\"Hello World Updates on Github\")\n \n #updates on main branch of GitHub\n print(\"Hell World Updates 2 on Github Only \")\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print_hi('WORLD', 'WORLD2')\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n" } ]
2
marlyn-x86/TilemapTown
https://github.com/marlyn-x86/TilemapTown
b4cf77c22c03f869d0908acc7ea3f79c69f6bb31
f1e6a1755c8239c3a2f4c86828f316524b6b40e7
2283955c06251006f8ba6ea9c7dc1529a89a22a5
refs/heads/master
2022-02-23T12:03:12.059214
2018-12-22T01:35:47
2018-12-22T01:35:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5868919491767883, "alphanum_fraction": 0.6002028584480286, "avg_line_length": 35.51129150390625, "blob_id": "63422c54265a190bac0e7fead8f7dd53348a4957", "content_id": "e2484235d2333c9a56bda992947b8e09719d2197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40418, "license_type": "no_license", "max_line_length": 337, "num_lines": 1107, "path": "/pyserver/tilemaptown_server/buildmap.py", "repo_name": "marlyn-x86/TilemapTown", "src_encoding": "UTF-8", "text": "# Tilemap Town\n# Copyright (C) 2017-2018 NovaSquirrel\n#\n# This program is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport json, asyncio, random, datetime\nfrom .buildglobal import *\n\nDirX = [ 1, 1, 0, -1, -1, -1, 0, 1]\nDirY = [ 0, 1, 1, 1, 0, -1, -1, -1]\n\n# Filtering chat text\ndef escapeTags(text):\n\treturn text.replace(\"&\", \"&amp;\").replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\n\ndef imageURLIsOkay(url):\n\tfor w in Config[\"Images\"][\"URLWhitelist\"]:\n\t\tif url.startswith(w):\n\t\t\treturn True\n\treturn False\n\ndef tileIsOkay(tile):\n\t# convert to a dictionary to check first if necessary\n\tif type(tile) == str and len(tile) and tile[0] == '{':\n\t\ttile = json.loads(tile)\n\n\t# Strings refer to tiles in tilesets and are\n\t# definitely OK as long as they're not excessively long.\n\tif type(tile) == str:\n\t\tif len(tile) <= 32:\n\t\t\treturn (True, None)\n\t\telse:\n\t\t\treturn (False, 'Identifier too long')\n\t# If it's not a string it must be a dictionary\n\tif type(tile) != dict:\n\t\treturn (False, 'Invalid type')\n\n\tif \"pic\" not in tile or len(tile[\"pic\"]) != 3:\n\t\treturn (False, 'No/invalid picture')\n\n\treturn (True, None)\n\nclass Map(object):\n\tdef __init__(self,width=100,height=100):\n\t\t# map stuff\n\t\tself.default_turf = \"grass\"\n\t\tself.start_pos = [5, 5]\n\t\tself.name = \"Map\"\n\t\tself.desc = \"\"\n\t\tself.id = 0\n\t\tself.flags = 0\n\t\tself.users = set()\n\n\t\tself.tags = {}\n\n\t\t# permissions\n\t\tself.owner = -1\n\t\tself.allow = 0\n\t\tself.deny = 0\n\t\tself.guest_deny = 0\n\n\t\t# map scripting\n\t\tself.has_script = False\n\t\t#loop = asyncio.get_event_loop()\n\t\t#self.script_queue = asyncio.Queue(loop=loop)\n\n\t\tself.blank_map(width, height)\n\n\tdef blank_map(self, width, height):\n\t\t\"\"\" Make a blank map of a given size \"\"\"\n\t\tself.width = width\n\t\tself.height = height\n\n\t\t# construct the map\n\t\tself.turfs = []\n\t\tself.objs = []\n\t\tfor x in range(0, width):\n\t\t\tself.turfs.append([None] * height)\n\t\t\tself.objs.append([None] * height)\n\n\tdef set_permission(self, uid, perm, value):\n\t\tif uid == None:\n\t\t\treturn\n\t\t# Start blank\n\t\tallow = 0\n\t\tdeny = 0\n\n\t\t# Get current value\n\t\tc = Database.cursor()\n\t\tc.execute('SELECT allow, deny FROM Map_Permission WHERE mid=? AND uid=?', (self.id, uid,))\n\t\tresult = c.fetchone()\n\t\tif result != None:\n\t\t\tallow = result[0]\n\t\t\tdeny = result[1]\n\n\t\t# Alter the permissions\n\t\tif value == True:\n\t\t\tallow |= perm\n\t\t\tdeny &= ~perm\n\t\telif value == False:\n\t\t\tallow &= ~perm\n\t\t\tdeny |= perm\n\t\telif value == None:\n\t\t\tallow &= ~perm\n\t\t\tdeny &= ~perm\n\n\t\t# Delete if permissions were removed\n\t\tif not (allow | deny):\n\t\t\tc.execute('DELETE FROM Map_Permission WHERE mid=? AND uid=?', (self.id, uid,))\n\t\t\treturn\n\n\t\t# Update or insert depending on needs\n\t\tif result != None:\n\t\t\tc.execute('UPDATE Map_Permission SET allow=?, deny=? WHERE mid=? AND uid=?', (allow, deny, self.id, uid,))\n\t\telse:\n\t\t\tc.execute(\"INSERT INTO Map_Permission (mid, uid, allow, deny) VALUES (?, ?, ?, ?)\", (self.id, uid, allow, deny,))\n\n\tdef has_permission(self, user, perm, default):\n\t\thas = default\n\t\tif self.allow & perm:\n\t\t\thas = True\n\t\tif self.deny & perm:\n\t\t\thas = False\n\n\t\t# If guest, apply guest_deny\n\t\tif user.db_id == None:\n\t\t\tif self.guest_deny & perm:\n\t\t\t\thas = False\n\t\t\treturn has\n\n\t\t# Search Map_Permission table\n\t\tc = Database.cursor()\n\t\tc.execute('SELECT allow, deny FROM Map_Permission WHERE mid=? AND uid=?', (self.id, user.db_id,))\n\t\tresult = c.fetchone()\n\t\tif result == None:\n\t\t\treturn has\n\t\t# Override the defaults\n\t\tif result[0] & perm:\n\t\t\thas = True\n\t\tif result[1] & perm:\n\t\t\thas = False\n\n\t\treturn has\n\n\tdef set_tag(self, name, value):\n\t\tself.tags[name] = value\n\n\tdef get_tag(self, name, default=None):\n\t\tif name in self.tags:\n\t\t\treturn self.tags[name]\n\t\treturn default\n\n\tdef load(self, mapId):\n\t\t\"\"\" Load a map from a file \"\"\"\n\t\tself.id = mapId\n\n\t\tc = Database.cursor()\n\n\t\tc.execute('SELECT name, desc, owner, flags, start_x, start_y, width, height, default_turf, allow, deny, guest_deny, tags, data FROM Map WHERE mid=?', (mapId,))\n\t\tresult = c.fetchone()\n\t\tif result == None:\n\t\t\treturn False\n\n\t\tself.name = result[0]\n\t\tself.desc = result[1]\n\t\tself.owner = result[2]\n\t\tself.flags = result[3]\n\t\tself.start_pos = [result[4], result[5]]\n\t\tself.width = result[6]\n\t\tself.height = result[7]\n\t\tself.default_turf = result[8]\n\t\tself.allow = result[9]\n\t\tself.deny = result[10]\n\t\tself.guest_deny = result[11]\n\t\tself.tags = json.loads(result[12])\n\n\t\t# Parse map data\n\t\ts = json.loads(result[13])\n\t\tself.blank_map(s[\"pos\"][2]+1, s[\"pos\"][3]+1)\n\t\tfor t in s[\"turf\"]:\n\t\t\tself.turfs[t[0]][t[1]] = t[2]\n\t\tfor o in s[\"obj\"]:\n\t\t\tself.objs[o[0]][o[1]] = o[2]\n\t\tmap = False\n\t\treturn True\n\n\tdef save(self):\n\t\t\"\"\" Save the map to a file \"\"\"\n\n\t\tc = Database.cursor()\n\n\t\t# Create map if it doesn't already exist\n\t\tc.execute('SELECT mid FROM Map WHERE mid=?', (self.id,))\n\t\tresult = c.fetchone()\n\t\tif result == None:\n\t\t\tc.execute(\"INSERT INTO Map (regtime, mid) VALUES (?, ?)\", (datetime.datetime.now(), self.id,))\n\n\t\t# Update the map\n\t\tvalues = (self.name, self.desc, self.owner, self.flags, self.start_pos[0], self.start_pos[1], self.width, self.height, self.default_turf, self.allow, self.deny, self.guest_deny, json.dumps(self.tags), json.dumps(self.map_section(0, 0, self.width-1, self.height-1)), self.id)\n\t\tc.execute(\"UPDATE Map SET name=?, desc=?, owner=?, flags=?, start_x=?, start_y=?, width=?, height=?, default_turf=?, allow=?, deny=?, guest_deny=?, tags=?, data=? WHERE mid=?\", values)\n\t\tDatabase.commit()\n\n\tdef map_section(self, x1, y1, x2, y2):\n\t\t\"\"\" Returns a section of map as a list of turfs and objects \"\"\"\n\t\t# clamp down the numbers\n\t\tx1 = min(self.width, max(0, x1))\n\t\ty1 = min(self.height, max(0, y1))\n\t\tx2 = min(self.width, max(0, x2))\n\t\ty2 = min(self.height, max(0, y2))\n\n\t\t# scan the map\n\t\tturfs = []\n\t\tobjs = []\n\t\tfor x in range(x1, x2+1):\n\t\t\tfor y in range(y1, y2+1):\n\t\t\t\tif self.turfs[x][y] != None:\n\t\t\t\t\tturfs.append([x, y, self.turfs[x][y]])\n\t\t\t\tif self.objs[x][y] != None:\n\t\t\t\t\tobjs.append([x, y, self.objs[x][y]])\n\t\treturn {'pos': [x1, y1, x2, y2], 'default': self.default_turf, 'turf': turfs, 'obj': objs}\n\n\tdef map_info(self, all_info=False):\n\t\t\"\"\" MAI message data \"\"\"\n\t\tout = {'name': self.name, 'id': self.id, 'owner': self.owner, 'default': self.default_turf, 'size': [self.width, self.height], 'public': self.flags & mapflag['public'] != 0, 'private': self.deny & permission['entry'] != 0, 'build_enabled': self.allow & permission['build'] != 0, 'full_sandbox': self.allow & permission['sandbox'] != 0}\n\t\tif all_info:\n\t\t\tout['start_pos'] = self.start_pos\n\t\treturn out\n\n\tdef broadcast(self, commandType, commandParams, remote_category=None, remote_only=False):\n\t\t\"\"\" Send a message to everyone on the map \"\"\"\n\t\tif not remote_only:\n\t\t\tfor client in self.users:\n\t\t\t\tclient.send(commandType, commandParams)\n\n\t\t\"\"\" Also send it to any registered listeners \"\"\"\n\t\tif remote_category != None and self.id in BotWatch[remote_category]:\n\t\t\tcommandParams['remote_map'] = self.id\n\t\t\tfor client in BotWatch[remote_category][self.id]:\n\t\t\t\tif (client.map_id != self.id) or remote_only: # don't send twice to people on the map\n\t\t\t\t\tclient.send(commandType, commandParams)\n\n\tdef who(self):\n\t\t\"\"\" WHO message data \"\"\"\n\t\tplayers = dict()\n\t\tfor client in self.users:\n\t\t\tplayers[str(client.id)] = client.who()\n\t\treturn players\n\n\tdef receive_command(self, client, command, arg):\n\t\t\"\"\" Add a command from the client to a queue, or just execute it \"\"\"\n\t\tself.execute_command(client, command, arg)\n\n\tdef execute_command(self, client, command, arg):\n\t\t\"\"\" Actually run a command from the client after being processed \"\"\"\n\t\tglobal ServerShutdown\n\t\tclient.idle_timer = 0\n\n\t\t# todo: use a dictionary instead of if/else chain\n\t\tif command == \"MOV\":\n\t\t\tself.broadcast(\"MOV\", {'id': client.id, 'from': arg[\"from\"], 'to': arg[\"to\"]}, remote_category=botwatch_type['move'])\n\t\t\tclient.moveTo(arg[\"to\"][0], arg[\"to\"][1])\n\t\telif command == \"CMD\":\n\t\t\t# separate into command and arguments\n\t\t\ttext = arg[\"text\"]\n\t\t\tspace = text.find(\" \")\n\t\t\tcommand2 = text.lower()\n\t\t\targ2 = \"\"\n\t\t\tif space >= 0:\n\t\t\t\tcommand2 = text[0:space].lower()\n\t\t\t\targ2 = text[space+1:]\n\n\t\t\tif command2 == \"nick\":\n\t\t\t\tif len(arg2) > 0 and not arg2.isspace():\n\t\t\t\t\tself.broadcast(\"MSG\", {'text': \"\\\"\"+client.name+\"\\\" is now known as \\\"\"+escapeTags(arg2)+\"\\\"\"})\n\t\t\t\t\tclient.name = escapeTags(arg2)\n\t\t\t\t\tself.broadcast(\"WHO\", {'add': client.who()}, remote_category=botwatch_type['entry']) # update client view\n\t\t\telif command2 == \"client_settings\":\n\t\t\t\tself.client_settings = arg2\n\t\t\telif command2 == \"tell\" or command2 == \"msg\" or command2 == \"p\":\n\t\t\t\tspace2 = arg2.find(\" \")\n\t\t\t\tif space2 >= 0:\n\t\t\t\t\tusername = arg2[0:space2].lower()\n\t\t\t\t\tprivtext = arg2[space2+1:]\n\t\t\t\t\tif privtext.isspace():\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Tell them what?'})\n\t\t\t\t\telse:\n\t\t\t\t\t\tu = findClientByUsername(username)\n\t\t\t\t\t\tif u:\n\t\t\t\t\t\t\tif not client.inBanList(u.ignore_list, 'message %s' % u.name):\n\t\t\t\t\t\t\t\tclient.send(\"PRI\", {'text': privtext, 'name':u.name, 'username': u.usernameOrId(), 'receive': False})\n\t\t\t\t\t\t\t\tu.send(\"PRI\", {'text': privtext, 'name':client.name, 'username': client.usernameOrId(), 'receive': True})\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tclient.failedToFind(username)\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'Private message who?'})\n\n\t\t\t# carrying\n\t\t\telif command2 == \"carry\":\n\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\tif u == None:\n\t\t\t\t\tclient.failedToFind(arg2)\n\t\t\t\t\treturn\n\t\t\t\tmy_username = client.usernameOrId()\n\t\t\t\tif my_username in u.requests:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You\\'ve already sent them a request'})\n\t\t\t\t\tu.requests[my_username][0] = 600 #renew\n\t\t\t\telif not client.inBanList(u.ignore_list, 'message %s' % u.name):\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'You requested to carry '+arg2})\n\t\t\t\t\tu.send(\"MSG\", {'text': client.nameAndUsername()+' wants to carry you', 'buttons': ['Accept', 'tpaccept '+my_username, 'Decline', 'tpdeny '+my_username]})\n\t\t\t\t\tu.requests[my_username] = [600, 'carry']\n\t\t\telif command2 == \"hopoff\":\n\t\t\t\tclient.dismount()\n\t\t\telif command2 == \"dropoff\":\n\t\t\t\tu = findClientByUsername(arg2, inside=client.passengers)\n\t\t\t\tif u:\n\t\t\t\t\tu.dismount()\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You aren\\'t carrying %s' % arg2})\n\t\t\telif command2 == \"carrywho\":\n\t\t\t\tif len(client.passengers):\n\t\t\t\t\tnames = ''\n\t\t\t\t\tfor u in client.passengers:\n\t\t\t\t\t\tif len(names) > 0:\n\t\t\t\t\t\t\tnames += ', '\n\t\t\t\t\t\tnames += '%s (%s)' % (u.name, u.usernameOrId())\n\t\t\t\t\tclient.send(\"MSG\", {'text': \"You are carrying %s\" % names})\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"MSG\", {'text': \"You aren\\'t carrying anything\"})\n\t\t\telif command2 == \"ridewho\":\n\t\t\t\tif client.vehicle:\n\t\t\t\t\tclient.send(\"MSG\", {'text': \"You are riding %s\" % client.vehicle.nameAndUsername()})\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"MSG\", {'text': \"You aren\\'t riding anything\"})\n\t\t\telif command2 == \"rideend\":\n\t\t\t\ttemp = set(client.passengers)\n\t\t\t\tfor u in temp:\n\t\t\t\t\tu.dismount()\n\n\t\t\telif command2 == \"tpa\":\n\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\tif u == None:\n\t\t\t\t\tclient.failedToFind(arg2)\n\t\t\t\t\treturn\n\t\t\t\tmy_username = client.usernameOrId()\n\t\t\t\tif my_username in u.requests:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You\\'ve already sent them a request'})\n\t\t\t\t\tu.requests[my_username][0] = 600 #renew\n\t\t\t\telif not client.inBanList(u.ignore_list, 'message %s' % u.name):\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'You requested a teleport to '+arg2})\n\t\t\t\t\tu.send(\"MSG\", {'text': client.nameAndUsername()+' wants to teleport to you', 'buttons': ['Accept', 'tpaccept '+my_username, 'Decline', 'tpdeny '+my_username]})\n\t\t\t\t\tu.requests[my_username] = [600, 'tpa']\n\n\t\t\telif command2 == \"tpahere\":\n\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\tif u == None:\n\t\t\t\t\tclient.failedToFind(arg2)\n\t\t\t\t\treturn\n\t\t\t\tmy_username = client.usernameOrId()\n\t\t\t\tif my_username in u.requests:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You\\'ve already sent them a request'})\n\t\t\t\t\tu.requests[my_username][0] = 600 #renew\n\t\t\t\telif not client.inBanList(u.ignore_list, 'message %s' % u.name):\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'You requested that '+arg2+' teleport to you'})\n\t\t\t\t\tu.send(\"MSG\", {'text': client.nameAndUsername()+' wants you to teleport to them', 'buttons': ['Accept', 'tpaccept '+my_username, 'Decline', 'tpdeny '+my_username]})\n\t\t\t\t\tu.requests[my_username] = [600, 'tpahere']\n\n\t\t\telif command2 == \"tpaccept\" or command2 == \"hopon\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\tif u == None:\n\t\t\t\t\tclient.failedToFind(arg2)\n\t\t\t\t\treturn\n\t\t\t\tif arg2 not in client.requests:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'No pending request from '+arg2})\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'You accepted a teleport request from '+arg2})\n\t\t\t\t\tu.send(\"MSG\", {'text': u.nameAndUsername()+\" accepted your request\"})\n\t\t\t\t\trequest = client.requests[arg2]\n\t\t\t\t\tif request[1] == 'tpa':\n\t\t\t\t\t\tu.switch_map(u.map_id, new_pos=[client.x, client.y])\n\t\t\t\t\telif request[1] == 'tpahere':\n\t\t\t\t\t\tclient.switch_map(u.map_id, new_pos=[u.x, u.y])\n\t\t\t\t\telif request[1] == 'carry':\n\t\t\t\t\t\tclient.ride(u)\n\t\t\t\t\tdel client.requests[arg2]\n\n\t\t\telif command2 == \"tpdeny\" or command2 == \"tpdecline\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\tif u == None:\n\t\t\t\t\tclient.failedToFind(arg2)\n\t\t\t\t\treturn\n\t\t\t\tif arg2 not in client.requests:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'No pending request from '+arg2})\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'You rejected a teleport request from '+arg2})\n\t\t\t\t\tu.send(\"MSG\", {'text': u.nameAndUsername()+\" rejected your request\"})\n\t\t\t\t\tdel client.requests[arg2]\n\n\t\t\telif command2 == \"tpcancel\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\tif u == None:\n\t\t\t\t\tclient.failedToFind(arg2)\n\t\t\t\t\treturn\n\t\t\t\tmy_username = client.usernameOrId()\n\t\t\t\tif my_username in u.requests:\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Canceled request to '+arg2})\n\t\t\t\t\tdel u.requests[my_username]\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'No request to cancel'})\n\n\t\t\telif command2 == \"time\":\n\t\t\t\t\tclient.send(\"MSG\", {'text': datetime.datetime.today().strftime(\"Now it's %m/%d/%Y, %I:%M %p\")})\n\n\t\t\telif command2 == \"away\":\n\t\t\t\tif len(arg2) < 1:\n\t\t\t\t\tclient.away = False\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'You are no longer marked as away'})\n\t\t\t\telse:\n\t\t\t\t\tclient.away = arg2\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'You are now marked as away (\"%s\")' % arg2})\n\n\t\t\telif command2 == \"roll\":\n\t\t\t\tparam = arg2.split('d')\n\t\t\t\tif len(param) != 2:\n\t\t\t\t\tparam = arg2.split(' ')\n\t\t\t\tif len(param) != 2 or (not param[0].isnumeric()) or (not param[1].isnumeric()):\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'Syntax: /roll dice sides'})\n\t\t\t\telse:\n\t\t\t\t\tdice = int(param[0])\n\t\t\t\t\tsides = int(param[1])\n\t\t\t\t\tsum = 0\n\t\t\t\t\tif dice < 1 or dice > 1000:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Bad number of dice'})\n\t\t\t\t\t\treturn\n\t\t\t\t\tif sides < 1 or sides > 1000000000:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Bad number of sides'})\n\t\t\t\t\t\treturn\n\t\t\t\t\tfor i in range(dice):\n\t\t\t\t\t\tsum += random.randint(1, sides)\t\t\t\t\n\t\t\t\t\tself.broadcast(\"MSG\", {'text': client.name+\" rolled %dd%d and got %d\"%(dice, sides, sum)})\n\n\t\t\telif command2 == \"mapid\":\n\t\t\t\tclient.send(\"MSG\", {'text': 'Map ID is %d' % self.id})\n\n\t\t\telif command2 == \"newmap\":\n\t\t\t\tif client.username:\n\t\t\t\t\t# Definitely change this to find the new map ID a better way, like making SQLite decide it\n\t\t\t\t\tnew_id = 1\n\t\t\t\t\twhile mapIdExists(new_id):\n\t\t\t\t\t\tnew_id +=1\n\t\t\t\t\t\tif new_id > Config[\"Server\"][\"MaxDBMaps\"] and Config[\"Server\"][\"MaxDBMaps\"] > 0:\n\t\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'There are too many maps'})\n\t\t\t\t\t\t\treturn\n\t\t\t\t\ttry:\n\t\t\t\t\t\tclient.switch_map(int(new_id))\n\t\t\t\t\t\tclient.map.owner = client.db_id\n\t\t\t\t\t\tclient.send(\"MSG\", {'text': 'Welcome to your new map (id %d)' % new_id})\n\t\t\t\t\texcept:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Couldn\\'t switch to the new map'})\n\t\t\t\t\t\traise\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You must be registered to make a new map.'})\n\n\t\t\t# maybe combine the list add/remove/list commands together?\n\t\t\telif command2 == \"ignore\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tclient.ignore_list.add(arg2)\n\t\t\t\tclient.send(\"MSG\", {'text': '\\\"%s\\\" added to ignore list' % arg2})\n\t\t\telif command2 == \"unignore\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tif arg2 in client.ignore_list:\n\t\t\t\t\tclient.ignore_list.remove(arg2)\n\t\t\t\tclient.send(\"MSG\", {'text': '\\\"%s\\\" removed from ignore list' % arg2})\n\t\t\telif command2 == \"ignorelist\":\n\t\t\t\tclient.send(\"MSG\", {'text': 'Ignore list: '+str(client.ignore_list)})\n\n\t\t\telif command2 == \"watch\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tif arg2 in client.watch_list:\n\t\t\t\t\tclient.watch_list.remove(arg2)\n\t\t\t\tclient.send(\"MSG\", {'text': '\\\"%s\\\" added to watch list' % arg2})\n\t\t\telif command2 == \"unwatch\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tclient.watch_list.remove(arg2)\n\t\t\t\tclient.send(\"MSG\", {'text': '\\\"%s\\\" removed from watch list' % arg2})\n\t\t\telif command2 == \"watchlist\":\n\t\t\t\tclient.send(\"MSG\", {'text': 'Watch list: '+str(client.watch_list)})\n\n\n\n\n\t\t\telif command2 in [\"grant\", \"deny\", \"revoke\"]:\n\t\t\t\tif client.mustBeOwner(True):\n\t\t\t\t\t# Check syntax\n\t\t\t\t\tparam = arg2.lower().split(' ')\n\t\t\t\t\tif len(param) < 2:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Must specify a permission and a username'})\n\t\t\t\t\t\treturn\n\t\t\t\t\t# Has to be a valid permission\n\t\t\t\t\tif param[0] not in permission:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': '\"%s\" Not a valid permission' % param[0]})\n\t\t\t\t\t\treturn\n\t\t\t\t\tpermission_value = permission[param[0]]\n\t\t\t\t\t\n\t\t\t\t\t# Special usernames for map defaults\n\t\t\t\t\tif param[1] == '!default':\n\t\t\t\t\t\tif command2 == \"grant\":\n\t\t\t\t\t\t\tself.allow |= permission_value\n\t\t\t\t\t\t\tself.deny &= ~permission_value\n\t\t\t\t\t\telif command2 == \"deny\":\n\t\t\t\t\t\t\tself.allow &= ~permission_value\n\t\t\t\t\t\t\tself.deny |= permission_value\n\t\t\t\t\t\telif command2 == \"revoke\":\n\t\t\t\t\t\t\tself.allow &= ~permission_value\n\t\t\t\t\t\t\tself.deny &= ~permission_value\n\t\t\t\t\t\tself.broadcast(\"MSG\", {'text': \"%s sets the default \\\"%s\\\" permission to [b]%s[/b]\" % (client.nameAndUsername(), param[0], command2)})\n\t\t\t\t\t\treturn\n\n\t\t\t\t\tif param[1] == '!guest':\n\t\t\t\t\t\tif command2 == \"deny\":\n\t\t\t\t\t\t\tself.guest_deny |= permission_value\n\t\t\t\t\t\telif command2 == \"revoke\":\n\t\t\t\t\t\t\tself.guest_deny &= ~permission_value\n\t\t\t\t\t\tself.broadcast(\"MSG\", {'text': \"%s sets the guest \\\"%s\\\" permission to [b]%s[/b]\" % (client.nameAndUsername(), param[0], command2)})\n\t\t\t\t\t\treturn\n\n\t\t\t\t\t# Has to be a user that exists\n\t\t\t\t\tuid = findDBIdByUsername(param[1])\n\t\t\t\t\tif uid == None:\n\t\t\t\t\t\tclient.failedToFind(param[1])\n\t\t\t\t\t\treturn\n\n\t\t\t\t\t# Finally we know it's valid\n\t\t\t\t\tvalue = None\n\t\t\t\t\tif command2 == \"grant\":\n\t\t\t\t\t\tvalue = True\n\t\t\t\t\tif command2 == \"deny\":\n\t\t\t\t\t\tvalue = False\n\t\t\t\t\tself.set_permission(uid, permission_value, value)\n\t\t\t\t\tself.broadcast(\"MSG\", {'text': \"%s sets %s's \\\"%s\\\" permission to [b]%s[/b]\" % (client.nameAndUsername(), param[1], param[0], command2)})\n\n\t\t\telif command2 == \"permlist\":\n\t\t\t\tc = Database.cursor()\n\t\t\t\tperms = \"Defaults: \"\n\n\t\t\t\t# List map default permissions\n\t\t\t\tfor k,v in permission.items():\n\t\t\t\t\tif (self.allow & v) == v:\n\t\t\t\t\t\tperms += \"+\"+k+\" \"\n\t\t\t\t\tif (self.deny & v) == v:\n\t\t\t\t\t\tperms += \"-\"+k+\" \"\n\t\t\t\t\tif (self.guest_deny & v) == v:\n\t\t\t\t\t\tperms += \"-\"+k+\"(guest) \"\n\n\t\t\t\tperms += \"[ul]\"\n\t\t\t\tfor row in c.execute('SELECT username, allow, deny FROM Map_Permission mp, User u WHERE mp.mid=? AND mp.uid=u.uid', (self.id,)):\n\t\t\t\t\tperms += \"[li][b]\"+row[0] + \"[/b]: \"\n\t\t\t\t\tfor k,v in permission.items():\n\t\t\t\t\t\tif (row[1] & v) == v: # allow\n\t\t\t\t\t\t\tperms += \"+\"+k+\" \"\n\t\t\t\t\t\tif (row[2] & v) == v: #deny\n\t\t\t\t\t\t\tperms += \"-\"+k+\" \"\n\t\t\t\t\tperms += \"[/li]\"\n\t\t\t\tperms += \"[/ul]\"\n\t\t\t\tclient.send(\"MSG\", {'text': perms})\n\n\t\t\telif command2 == \"mymaps\":\n\t\t\t\tif client.db_id == None:\n\t\t\t\t\treturn\n\t\t\t\tc = Database.cursor()\n\t\t\t\tmaps = \"My maps: [ul]\"\n\t\t\t\tfor row in c.execute('SELECT m.mid, m.name FROM Map m WHERE m.owner=?', (client.db_id,)):\n\t\t\t\t\tmaps += \"[li][b]%s[/b] [command]map %d[/command][/li]\" % (row[1], row[0])\n\t\t\t\tmaps += \"[/ul]\"\n\t\t\t\tclient.send(\"MSG\", {'text': maps})\n\n\t\t\telif command2 == \"publicmaps\":\n\t\t\t\tc = Database.cursor()\n\t\t\t\tmaps = \"Public maps: [ul]\"\n\t\t\t\tfor row in c.execute('SELECT m.mid, m.name, u.username FROM Map m, User u WHERE m.owner=u.uid and (m.flags&1)!=0'):\n\t\t\t\t\tmaps += \"[li][b]%s[/b] (%s) [command]map %d[/command][/li]\" % (row[1], row[2], row[0])\n\t\t\t\tmaps += \"[/ul]\"\n\t\t\t\tclient.send(\"MSG\", {'text': maps})\n\n\t\t\telif command2 == \"mapname\":\n\t\t\t\tif client.mustBeOwner(False):\n\t\t\t\t\tself.name = arg2\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Map name set to \\\"%s\\\"' % self.name})\n\t\t\telif command2 == \"mapdesc\":\n\t\t\t\tif client.mustBeOwner(False):\n\t\t\t\t\tself.desc = arg2\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Map description set to \\\"%s\\\"' % self.desc})\n\t\t\telif command2 == \"mapowner\":\n\t\t\t\tif client.mustBeOwner(False):\n\t\t\t\t\tnewowner = findDBIdByUsername(arg2)\n\t\t\t\t\tif newowner:\n\t\t\t\t\t\tself.owner = newowner\n\t\t\t\t\t\tclient.send(\"MSG\", {'text': 'Map owner set to \\\"%s\\\"' % self.owner})\n\t\t\t\t\telse:\n\t\t\t\t\t\tclient.send(\"MSG\", {'text': 'Nonexistent account'})\n\n\t\t\telif command2 == \"mapprivacy\":\n\t\t\t\tif client.mustBeOwner(False):\n\t\t\t\t\tif arg2 == \"public\":\n\t\t\t\t\t\tself.deny &= ~permission['entry']\n\t\t\t\t\t\tself.flags |= mapflag['public']\n\t\t\t\t\telif arg2 == \"private\":\n\t\t\t\t\t\tself.deny |= permission['entry']\n\t\t\t\t\t\tself.flags &= ~mapflag['public']\n\t\t\t\t\telif arg2 == \"unlisted\":\n\t\t\t\t\t\tself.deny &= ~permission['entry']\n\t\t\t\t\t\tself.flags &= ~mapflag['public']\n\t\t\t\t\telse:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Map privacy must be public, private, or unlisted'})\n\t\t\telif command2 == \"mapprotect\":\n\t\t\t\tif client.mustBeOwner(False):\n\t\t\t\t\tif arg2 == \"off\":\n\t\t\t\t\t\tself.allow |= permission['sandbox']\n\t\t\t\t\telif arg2 == \"on\":\n\t\t\t\t\t\tself.allow &= ~permission['sandbox']\n\t\t\t\t\telse:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Map building must be on or off'})\n\t\t\telif command2 == \"mapbuild\":\n\t\t\t\tif client.mustBeOwner(True):\n\t\t\t\t\tif arg2 == \"on\":\n\t\t\t\t\t\tself.allow |= permission['build']\n\t\t\t\t\telif arg2 == \"off\":\n\t\t\t\t\t\tself.allow &= ~permission['build']\n\t\t\t\t\telse:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Map building must be on or off'})\n\t\t\telif command2 == \"defaultfloor\":\n\t\t\t\tif client.mustBeOwner(False):\n\t\t\t\t\tself.default_turf = arg2\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Map floor changed to %s' % arg2})\n\t\t\telif command2 == \"mapspawn\":\n\t\t\t\tif client.mustBeOwner(False):\n\t\t\t\t\tself.start_pos = [client.x, client.y]\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Map start changed to %d,%d' % (client.x, client.y)})\n\n\t\t\telif command2 == \"listeners\":\n\t\t\t\tout = ''\n\t\t\t\tfor i in botwatch_type.keys():\n\t\t\t\t\tc = botwatch_type[i]\n\t\t\t\t\tif self.id in BotWatch[c]:\n\t\t\t\t\t\tfor u in BotWatch[c][self.id]:\n\t\t\t\t\t\t\tout += '%s (%s), ' % (u.username, i)\n\t\t\t\tclient.send(\"MSG\", {'text': 'Listeners here: ' + out})\n\n\t\t\telif command2 == \"listen\":\n\t\t\t\tif client.db_id == None:\n\t\t\t\t\treturn\n\t\t\t\tparams = arg2.split()\n\t\t\t\tcategories = set(params[0].split(','))\n\t\t\t\tmaps = set([int(x) for x in params[1].split(',')])\n\t\t\t\tfor c in categories:\n\t\t\t\t\t# find category number from name\n\t\t\t\t\tif c not in botwatch_type:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid listen category: %s' % c})\n\t\t\t\t\t\treturn\n\t\t\t\t\tcategory = botwatch_type[c]\n\n\t\t\t\t\tfor m in maps:\n\t\t\t\t\t\tcursor = Database.cursor()\n\t\t\t\t\t\tcursor.execute('SELECT allow FROM Map_Permission WHERE mid=? AND uid=?', (m, client.db_id,))\n\t\t\t\t\t\tresult = cursor.fetchone()\n\t\t\t\t\t\tif (result == None) or (result[0] & permission['map_bot'] == 0):\n\t\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Don\\'t have permission to listen on map: %d' % m})\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tif m not in BotWatch[category]:\n\t\t\t\t\t\t\tBotWatch[category][m] = set()\n\t\t\t\t\t\tBotWatch[category][m].add(client)\n\t\t\t\t\t\tclient.listening_maps.add((category, m))\n\n\t\t\t\t\t\t# Send initial data\n\t\t\t\t\t\tif c == 'build':\n\t\t\t\t\t\t\tmap = getMapById(m)\n\t\t\t\t\t\t\tdata = map.map_info()\n\t\t\t\t\t\t\tdata['remote_map'] = m\n\t\t\t\t\t\t\tclient.send(\"MAI\", data)\n\n\t\t\t\t\t\t\tdata = map.map_section(0, 0, map.width-1, map.height-1)\n\t\t\t\t\t\t\tdata['remote_map'] = m\n\t\t\t\t\t\t\tclient.send(\"MAP\", data)\n\t\t\t\t\t\telif c == 'entry':\n\t\t\t\t\t\t\tclient.send(\"WHO\", {'list': getMapById(m).who(), 'remote_map': m})\n\n\t\t\t\tclient.send(\"MSG\", {'text': 'Listening on maps now: ' + str(client.listening_maps)})\n\n\t\t\telif command2 == \"unlisten\":\n\t\t\t\tif client.db_id == None:\n\t\t\t\t\treturn\n\t\t\t\tparams = arg2.split()\n\t\t\t\tcategories = set(params[0].split(','))\n\t\t\t\tmaps = [int(x) for x in params[1].split(',')]\n\t\t\t\tfor c in categories:\n\t\t\t\t\t# find category number from name\n\t\t\t\t\tif c not in botwatch_type:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid listen category: \"%s\"' % c})\n\t\t\t\t\t\treturn\n\t\t\t\t\tcategory = botwatch_type[c]\n\n\t\t\t\t\tfor m in maps:\n\t\t\t\t\t\tif (m in BotWatch[category]) and (client in BotWatch[category][m]):\n\t\t\t\t\t\t\tBotWatch[category][m].remove(client)\n\t\t\t\t\t\t\tif not len(BotWatch[category][m]):\n\t\t\t\t\t\t\t\tdel BotWatch[category][m]\n\t\t\t\t\t\tif (category, m) in client.listening_maps:\n\t\t\t\t\t\t\tclient.listening_maps.remove((category, m))\n\t\t\t\tclient.send(\"MSG\", {'text': 'Stopped listening on maps: ' + str(client.listening_maps)})\n\n\t\t\telif command2 == \"kick\" or command2 == \"kickban\":\n\t\t\t\targ2 = arg2.lower()\n\t\t\t\tif client.mustBeOwner(True):\n\t\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\t\tif u != None:\n\t\t\t\t\t\tif u.map_id == client.map_id:\n\t\t\t\t\t\t\tclient.send(\"MSG\", {'text': 'Kicked '+u.nameAndUsername()})\n\t\t\t\t\t\t\tu.send(\"MSG\", {'text': 'Kicked by '+client.nameAndUsername()})\n\t\t\t\t\t\t\tu.send_home()\n\t\t\t\t\t\t\tif command2 == \"kickban\":\n\t\t\t\t\t\t\t\tself.set_permission(findDBIdByUsername(arg2), permission['entry'], False)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'User not on this map'})\n\t\t\t\t\telse:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'User not found'})\n\n\t\t\telif command2 == \"goback\":\n\t\t\t\tif len(client.tp_history) > 0:\n\t\t\t\t\tpos = client.tp_history.pop()\n\t\t\t\t\tclient.switch_map(pos[0], new_pos=[pos[1], pos[2]], update_history=False)\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'Nothing in teleport history'})\n\n\t\t\telif command2 == \"sethome\":\n\t\t\t\tclient.home = [client.map_id, client.x, client.y]\n\t\t\t\tclient.send(\"MSG\", {'text': 'Home set'})\n\t\t\telif command2 == \"home\":\n\t\t\t\tif client.home == None:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You don\\'t have a home set'})\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Teleported to your home'})\n\t\t\t\t\tclient.send_home()\n\t\t\telif command2 == \"map\":\n\t\t\t\ttry:\n\t\t\t\t\tif mapIdExists(int(arg2)):\n\t\t\t\t\t\tif client.switch_map(int(arg2)):\n\t\t\t\t\t\t\tclient.send(\"MSG\", {'text': 'Teleported to map %s' % arg2})\n\t\t\t\t\telse:\n\t\t\t\t\t\tclient.send(\"MSG\", {'text': 'Map %s doesn\\'t exist' % arg2})\n\t\t\t\texcept:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'Couldn\\'t go to map %s' % arg2})\n\t\t\telif command2 == \"saveme\":\n\t\t\t\tif client.username == None:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You are not logged in'})\n\t\t\t\telse:\n\t\t\t\t\tclient.save()\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Account saved'})\n\t\t\telif command2 == \"changepass\":\n\t\t\t\tif client.username == None:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'You are not logged in'})\n\t\t\t\telif len(arg2):\n\t\t\t\t\tclient.changepass(arg2)\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Password changed'})\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'No password given'})\n\t\t\telif command2 == \"register\":\n\t\t\t\tif client.username != None:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'Register fail, you already registered'})\n\t\t\t\telse:\n\t\t\t\t\tparams = arg2.split()\n\t\t\t\t\tif len(params) != 2:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Syntax is: /register username password'})\n\t\t\t\t\telse:\n\t\t\t\t\t\tif client.register(filterUsername(params[0]), params[1]):\n\t\t\t\t\t\t\tself.broadcast(\"MSG\", {'text': client.name+\" has now registered\"})\n\t\t\t\t\t\t\tself.broadcast(\"WHO\", {'add': client.who()}) # update client view, probably just for the username\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Register fail, account already exists'})\n\t\t\telif command2 == \"login\":\n\t\t\t\tparams = arg2.split()\n\t\t\t\tif len(params) != 2:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'Syntax is: /login username password'})\n\t\t\t\telse:\n\t\t\t\t\tclient.login(filterUsername(params[0]), params[1])\n\t\t\telif command2 == \"userpic\":\n\t\t\t\targ2 = arg2.split(' ')\n\t\t\t\tsuccess = False\n\n\t\t\t\tif len(arg2) == 1:\n\t\t\t\t\tdefaults = {'bunny': [0, 2, 25], 'cat': [0, 2, 26], 'hamster': [0, 8, 25], 'fire': [0, 4,26]}\n\t\t\t\t\tif arg2[0] in defaults:\n\t\t\t\t\t\tclient.pic = defaults[arg2[0]];\n\t\t\t\t\t\tsuccess = True\n\t\t\t\t\t# temporary thing to allow custom avatars\n\t\t\t\t\telse:\n\t\t\t\t\t\tif arg2[0].startswith(\"http\"):\n\t\t\t\t\t\t\tif imageURLIsOkay(arg2[0]):\n\t\t\t\t\t\t\t\tclient.pic = [arg2[0], 0, 0];\n\t\t\t\t\t\t\t\tsuccess = True\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'URL doesn\\'t match any whitelisted sites'})\n\t\t\t\t\t\t\t\treturn\n\t\t\t\telif len(arg2) == 2:\n\t\t\t\t\tif arg2[0].isnumeric() and arg2[1].isnumeric():\n\t\t\t\t\t\tclient.pic = [0, int(arg2[0]), int(arg2[1])]\n\t\t\t\t\t\tsuccess = True\n\t\t\t\tif success:\n\t\t\t\t\tself.broadcast(\"WHO\", {'add': client.who()}) # update client view\n\t\t\t\telse:\n\t\t\t\t\tclient.send(\"ERR\", {'text': 'Syntax is: /userpic sheet x y'})\n\n\t\t\telif command2 == \"gwho\":\n\t\t\t\tnames = ''\n\t\t\t\tfor u in AllClients:\n\t\t\t\t\tif len(names) > 0:\n\t\t\t\t\t\tnames += ', '\n\t\t\t\t\tnames += u.nameAndUsername()\n\t\t\t\tclient.send(\"MSG\", {'text': 'List of users connected: '+names})\n\t\t\telif command2 == \"who\":\n\t\t\t\tnames = ''\n\t\t\t\tfor u in self.users:\n\t\t\t\t\tif len(names) > 0:\n\t\t\t\t\t\tnames += ', '\n\t\t\t\t\tnames += u.nameAndUsername()\n\t\t\t\tclient.send(\"MSG\", {'text': 'List of users here: '+names})\n\n\t\t\telif command2 == \"whereare\" or command2 == \"wa\":\n\t\t\t\tnames = 'Whereare: [ul]'\n\t\t\t\tfor m in AllMaps:\n\t\t\t\t\tif m.flags & mapflag['public'] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnames += '[li][b]%s[/b] (%d): ' % (m.name, len(m.users))\n\t\t\t\t\tfor u in m.users:\n\t\t\t\t\t\tnames += u.nameAndUsername()+', '\n\t\t\t\t\tnames = names.rstrip(', ') + ' [command]map %d[/command][/li]' % m.id\n\t\t\t\tnames += '[/ul]'\n\n\t\t\t\tclient.send(\"MSG\", {'text': names})\n\n\t\t\telif command2 == \"savemap\":\n\t\t\t\tself.save()\n\t\t\t\tself.broadcast(\"MSG\", {'text': client.name+\" saved the map\"})\n\n\t\t\t# Server admin commands\n\t\t\telif command2 == \"broadcast\":\n\t\t\t\tif client.mustBeServerAdmin() and len(arg2) > 0:\n\t\t\t\t\tbroadcastToAll(\"Admin broadcast: \"+arg2)\n\t\t\telif command2 == \"kill\":\n\t\t\t\tif client.mustBeServerAdmin():\n\t\t\t\t\tu = findClientByUsername(arg2)\n\t\t\t\t\tif u != None:\n\t\t\t\t\t\tclient.send(\"MSG\", {'text': 'Killed '+u.nameAndUsername()})\n\t\t\t\t\t\tu.send(\"MSG\", {'text': 'Killed by '+client.nameAndUsername()})\n\t\t\t\t\t\tu.disconnect()\n\t\t\telif command2 == \"shutdown\":\n\t\t\t\tif client.mustBeServerAdmin():\n\t\t\t\t\tif arg2 == \"cancel\":\n\t\t\t\t\t\tServerShutdown[0] = -1\n\t\t\t\t\t\tbroadcastToAll(\"Server shutdown canceled\")\n\t\t\t\t\telif arg2.isnumeric():\n\t\t\t\t\t\tServerShutdown[0] = int(arg2)\n\t\t\t\t\t\tbroadcastToAll(\"Server shutdown in %d seconds! (started by %s)\" % (ServerShutdown[0], client.name))\n\t\t\telse:\n\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid command?'})\n\n\t\telif command == \"BAG\":\n\t\t\tif client.db_id != None:\n\t\t\t\tc = Database.cursor()\n\t\t\t\tif \"create\" in arg:\n\t\t\t\t\t# restrict type variable\n\t\t\t\t\tif arg['create']['type'] < 0 or arg['create']['type'] > 6:\n\t\t\t\t\t\targ['create']['type'] = 0\n\t\t\t\t\tc.execute(\"INSERT INTO Asset_Info (creator, owner, name, type, regtime, flags) VALUES (?, ?, ?, ?, ?, ?)\", (client.db_id, client.db_id, arg['create']['name'], arg['create']['type'], datetime.datetime.now(), 0))\n\t\t\t\t\tc.execute('SELECT last_insert_rowid()')\n\t\t\t\t\tclient.send(\"BAG\", {'update': {'id': c.fetchone()[0], 'name': arg['create']['name'], 'type': arg['create']['type']}})\n\n\t\t\t\telif \"clone\" in arg:\n\t\t\t\t\tc.execute('SELECT name, desc, type, flags, creator, folder, data FROM Asset_Info WHERE owner=? AND aid=?', (client.db_id, arg['clone']))\n\t\t\t\t\trow = c.fetchone()\n\t\t\t\t\tif row == None:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid item ID'})\n\t\t\t\t\t\treturn\n\n\t\t\t\t\tc.execute(\"INSERT INTO Asset_Info (name, desc, type, flags, creator, folder, data, owner, regtime) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", \\\n (row[0], row[1], row[2], row[3], row[4], row[5], row[6], client.db_id, datetime.datetime.now()))\n\t\t\t\t\tc.execute('SELECT last_insert_rowid()')\n\t\t\t\t\tclient.send(\"BAG\", {'update': {'id': c.fetchone()[0], 'name': row[0], 'desc': row[1], 'type': row[2], 'flags': row[3], 'folder': row[5], 'data': row[6]}})\n\n\t\t\t\telif \"update\" in arg:\n\t\t\t\t\t# get the initial data\n\t\t\t\t\tc.execute('SELECT name, desc, flags, folder, data, type FROM Asset_Info WHERE owner=? AND aid=?', (client.db_id, arg['update']['id']))\n\t\t\t\t\tresult = c.fetchone()\n\t\t\t\t\tif result == None:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid item ID'})\n\t\t\t\t\t\treturn\n\t\t\t\t\tout = {'name': result[0], 'desc': result[1], 'flags': result[2], 'folder': result[3], 'data': result[4]}\n\t\t\t\t\tasset_type = result[5]\n\t\t\t\t\tif asset_type == 2 and \"data\" in arg['update'] and not imageURLIsOkay(arg['update']['data']):\n\t\t\t\t\t\tclient.send(\"ERR\", {'text', 'Image asset URL doesn\\'t match any whitelisted sites'})\n\t\t\t\t\t\treturn\n\t\t\t\t\tif asset_type == 3 and \"data\" in arg['update']:\n\t\t\t\t\t\ttile_test = tileIsOkay(arg['update']['data'])\n\t\t\t\t\t\tif not tile_test[0]:\n\t\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Tile [tt]%s[/tt] rejected (%s)' % (arg['update']['data'], tile_test[1])})\n\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t# overwrite any specified columns\n\t\t\t\t\tfor key, value in arg['update'].items():\n\t\t\t\t\t\tout[key] = value\n\t\t\t\t\t\tif type(out[key]) == dict:\n\t\t\t\t\t\t\tout[key] = json.dumps(out[key]);\n\t\t\t\t\tc.execute('UPDATE Asset_Info SET name=?, desc=?, flags=?, folder=?, data=? WHERE owner=? AND aid=?', (out['name'], out['desc'], out['flags'], out['folder'], out['data'], client.db_id, arg['update']['id']))\n\n\t\t\t\t\t# send back confirmation\n\t\t\t\t\tclient.send(\"BAG\", {'update': arg['update']})\n\n\t\t\t\telif \"delete\" in arg:\n\t\t\t\t\t# move deleted contents of a deleted folder outside the folder\n\t\t\t\t\tc.execute('SELECT folder FROM Asset_Info WHERE owner=? AND aid=?', (client.db_id, arg['delete']))\n\t\t\t\t\tresult = c.fetchone()\n\t\t\t\t\tif result == None:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid item ID'})\n\t\t\t\t\t\treturn\n\t\t\t\t\t# probably better to handle this with a foreign key constraint and cascade?\n\t\t\t\t\t# it's NOT updated client-side but it shouldn't matter\n\t\t\t\t\tc.execute('UPDATE Asset_Info SET folder=? WHERE owner=? AND folder=?', (result[0], client.db_id, arg['delete']))\n\n\t\t\t\t\t# actually delete\n\t\t\t\t\tc.execute('DELETE FROM Asset_Info WHERE owner=? AND aid=?', (client.db_id, arg['delete']))\n\t\t\t\t\tclient.send(\"BAG\", {'remove': arg['delete']})\n\t\t\telse:\n\t\t\t\tclient.send(\"ERR\", {'text': 'Guests don\\'t have an inventory currently. Use [tt]/register username password[/tt]'})\n\n\t\telif command == \"EML\": #mail\n\t\t\tif client.db_id != None:\n\t\t\t\tc = Database.cursor()\n\t\t\t\tif \"send\" in arg:\n\t\t\t\t\t# todo: definitely needs some limits in place to prevent abuse!\n\n\t\t\t\t\t# get a list of all the people to mail\n\t\t\t\t\trecipient_id = set([findDBIdByUsername(x) for x in arg['send']['to']])\n\t\t\t\t\trecipient_string = ','.join([str(x) for x in recipient_id])\n\n\t\t\t\t\tif any([x == None for x in recipient_id]):\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Couldn\\'t find one or more users you wanted to mail'})\n\t\t\t\t\t\treturn\n\n\t\t\t\t\t# let the client know who sent it, since the 'send' argument will get passed along directly\n\t\t\t\t\targ['send']['from'] = client.username\n\n\t\t\t\t\t# send everyone their mail\n\t\t\t\t\tfor id in recipient_id:\n\t\t\t\t\t\tif id == None:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tc.execute(\"INSERT INTO Mail (uid, sender, recipients, subject, contents, time, flags) VALUES (?, ?, ?, ?, ?, ?, ?)\", (id, client.db_id, recipient_string, arg['send']['subject'], arg['send']['contents'], datetime.datetime.now(), 0))\n\n\t\t\t\t\t\t# is that person online? tell them!\n\t\t\t\t\t\tfind = findClientByDBId(id)\n\t\t\t\t\t\tif find:\n\t\t\t\t\t\t\targ['send']['id'] = c.execute('SELECT last_insert_rowid()').fetchone()[0]\n\t\t\t\t\t\t\tfind.send(\"EML\", {'receive': arg['send']})\n\n\t\t\t\t\tclient.send(\"EML\", {'sent': {'subject': arg['send']['subject']}}) #acknowledge\n\t\t\t\t\tclient.send(\"MSG\", {'text': 'Sent mail to %d users' % len(recipient_id)})\n\n\t\t\t\telif \"read\" in arg:\n\t\t\t\t\tc.execute('UPDATE Mail SET flags=1 WHERE uid=? AND id=?', (client.db_id, arg['read']))\n\t\t\t\telif \"delete\" in arg:\n\t\t\t\t\tc.execute('DELETE FROM Mail WHERE uid=? AND id=?', (client.db_id, arg['delete']))\n\n\t\t\telse:\n\t\t\t\tclient.send(\"ERR\", {'text': 'Guests don\\'t have mail. Use [tt]/register username password[/tt]'})\n\n\t\telif command == \"MSG\":\n\t\t\ttext = arg[\"text\"]\n\t\t\tself.broadcast(\"MSG\", {'name': client.name, 'username': client.usernameOrId(), 'text': escapeTags(text)}, remote_category=botwatch_type['chat'])\n\n\t\telif command == \"TSD\":\n\t\t\tc = Database.cursor()\n\t\t\tc.execute('SELECT data FROM Asset_Info WHERE type=4 AND aid=?', (arg['id'],))\n\t\t\tresult = c.fetchone()\n\t\t\tif result == None:\n\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid item ID'})\n\t\t\telse:\n\t\t\t\tclient.send(\"TSD\", {'id': arg['id'], 'data': result[0]})\n\t\telif command == \"IMG\":\n\t\t\tc = Database.cursor()\n\t\t\tc.execute('SELECT data FROM Asset_Info WHERE type=2 AND aid=?', (arg['id'],))\n\t\t\tresult = c.fetchone()\n\t\t\tif result == None:\n\t\t\t\tclient.send(\"ERR\", {'text': 'Invalid item ID'})\n\t\t\telse:\n\t\t\t\tclient.send(\"IMG\", {'id': arg['id'], 'url': result[0]})\n\n\t\telif command == \"MAI\":\n\t\t\tsend_all_info = client.mustBeOwner(True, giveError=False)\n\t\t\tclient.send(\"MAI\", self.map.map_info(all_info=send_all_info))\n\t\telif command == \"DEL\":\n\t\t\tx1 = arg[\"pos\"][0]\n\t\t\ty1 = arg[\"pos\"][1]\n\t\t\tx2 = arg[\"pos\"][2]\n\t\t\ty2 = arg[\"pos\"][3]\n\t\t\tif self.has_permission(client, permission['build'], True) or client.mustBeOwner(True, giveError=False):\n\t\t\t\tfor x in range(x1, x2+1):\n\t\t\t\t\tfor y in range(y1, y2+1):\n\t\t\t\t\t\tif arg[\"turf\"]:\n\t\t\t\t\t\t\tself.turfs[x][y] = None;\n\t\t\t\t\t\tif arg[\"obj\"]:\n\t\t\t\t\t\t\tself.objs[x][y] = None;\n\t\t\t\tself.broadcast(\"MAP\", self.map_section(x1, y1, x2, y2))\n\n\t\t\t\t# make username available to listeners\n\t\t\t\targ['username'] = client.usernameOrId()\n\t\t\t\tself.broadcast(\"DEL\", arg, remote_only=True, remote_category=botwatch_type['build'])\n\t\t\telse:\n\t\t\t\tclient.send(\"MAP\", self.map_section(x1, y1, x2, y2))\n\t\t\t\tclient.send(\"ERR\", {'text': 'Building is disabled on this map'})\n\t\telif command == \"PUT\":\n\t\t\tx = arg[\"pos\"][0]\n\t\t\ty = arg[\"pos\"][1]\n\t\t\tif self.has_permission(client, permission['build'], True) or client.mustBeOwner(True, giveError=False):\n\t\t\t\t# verify the the tiles you're attempting to put down are actually good\n\t\t\t\tif arg[\"obj\"]: #object\n\t\t\t\t\ttile_test = [tileIsOkay(x) for x in arg[\"atom\"]]\n\t\t\t\t\tif all(x[0] for x in tile_test): # all tiles pass the test\n\t\t\t\t\t\tself.objs[x][y] = arg[\"atom\"]\n\t\t\t\t\t\tself.broadcast(\"MAP\", self.map_section(x, y, x, y))\n\t\t\t\t\telse:\n\t\t\t\t\t\t# todo: give a reason?\n\t\t\t\t\t\tclient.send(\"MAP\", self.map_section(x, y, x, y))\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Placed objects rejected'})\n\t\t\t\telse: #turf\n\t\t\t\t\ttile_test = tileIsOkay(arg[\"atom\"])\n\t\t\t\t\tif tile_test[0]:\n\t\t\t\t\t\tself.turfs[x][y] = arg[\"atom\"]\n\t\t\t\t\t\tself.broadcast(\"MAP\", self.map_section(x, y, x, y))\n\n\t\t\t\t\t\t# make username available to listeners\n\t\t\t\t\t\targ['username'] = client.usernameOrId()\n\t\t\t\t\t\tself.broadcast(\"PUT\", arg, remote_only=True, remote_category=botwatch_type['build'])\n\t\t\t\t\telse:\n\t\t\t\t\t\tclient.send(\"MAP\", self.map_section(x, y, x, y))\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Tile [tt]%s[/tt] rejected (%s)' % (arg[\"atom\"], tile_test[1])})\n\t\t\telse:\n\t\t\t\tclient.send(\"MAP\", self.map_section(x, y, x, y))\n\t\t\t\tclient.send(\"ERR\", {'text': 'Building is disabled on this map'})\n\t\telif command == \"BLK\":\n\t\t\tif self.has_permission(client, permission['bulk_build'], False) or client.mustBeOwner(True, giveError=False):\n\t\t\t\t# verify the tiles\n\t\t\t\tfor turf in arg[\"turf\"]:\n\t\t\t\t\tif not tileIsOkay(turf[2])[0]:\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Bad turf in bulk build'})\n\t\t\t\t\t\treturn\n\t\t\t\tfor obj in arg[\"obj\"]:\n\t\t\t\t\ttile_test = [tileIsOkay(x) for x in obj[2]]\n\t\t\t\t\tif any(not x[0] for x in tile_test): # any tiles don't pass the test\n\t\t\t\t\t\tclient.send(\"ERR\", {'text': 'Bad obj in bulk build'})\n\t\t\t\t\t\treturn\n\t\t\t\t# make username available to other clients\n\t\t\t\targ['username'] = client.usernameOrId()\n\n\t\t\t\t# place the tiles\n\t\t\t\tfor turf in arg[\"turf\"]:\n\t\t\t\t\tx = turf[0]\n\t\t\t\t\ty = turf[1]\n\t\t\t\t\ta = turf[2]\n\t\t\t\t\twidth = 1\n\t\t\t\t\theight = 1\n\t\t\t\t\tif len(turf) == 5:\n\t\t\t\t\t\twidth = turf[3]\n\t\t\t\t\t\theight = turf[4]\n\t\t\t\t\tfor w in range(0, width):\n\t\t\t\t\t\tfor h in range(0, height):\n\t\t\t\t\t\t\tself.turfs[x+w][y+h] = a\n\t\t\t\t# place the object lists\n\t\t\t\tfor obj in arg[\"obj\"]:\n\t\t\t\t\tx = obj[0]\n\t\t\t\t\ty = obj[1]\n\t\t\t\t\ta = obj[2]\n\t\t\t\t\twidth = 1\n\t\t\t\t\theight = 1\n\t\t\t\t\tif len(turf) == 5:\n\t\t\t\t\t\twidth = turf[3]\n\t\t\t\t\t\theight = turf[4]\n\t\t\t\t\tfor w in range(0, width):\n\t\t\t\t\t\tfor h in range(0, height):\n\t\t\t\t\t\t\tself.objs[x+w][y+h] = a\n\t\t\t\tself.broadcast(\"BLK\", arg, remote_category=botwatch_type['build'])\n\t\t\telse:\n\t\t\t\tclient.send(\"ERR\", {'text': 'Bulk building is disabled on this map'})\n\n\tdef clean_up(self):\n\t\t\"\"\" Clean up everything before a map unload \"\"\"\n\t\tpass\n" } ]
1
budurli/spider
https://github.com/budurli/spider
9384cdfb5a4b72f3e4571b557e5092db9e97120f
69bef0c8351cd888345770ad0c51f48046e23118
57f580267fb77133eccc1a54b1856954dba2bb9e
refs/heads/master
2016-08-05T10:59:49.486807
2013-05-13T14:40:47
2013-05-13T14:40:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7520661354064941, "alphanum_fraction": 0.7603305578231812, "avg_line_length": 23.200000762939453, "blob_id": "5e7a183db26141ae5d140636306eb7fee7c8532c", "content_id": "efe488d17e9075f2002a05e4318700dd9f1ca585", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/spider/__init__.py", "repo_name": "budurli/spider", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom base import BaseSpider\nfrom crawler import Crawler, Parser\nfrom mirror import MirrorSpider\n" }, { "alpha_fraction": 0.6424999833106995, "alphanum_fraction": 0.643750011920929, "avg_line_length": 25.66666603088379, "blob_id": "d46dbc320fb106d6d0ee3cc96585857603b78369", "content_id": "8aa8add92bf282ed36738575b8bd626539d914a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 78, "num_lines": 30, "path": "/spider/mirror.py", "repo_name": "budurli/spider", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\n\nfrom crawler import Crawler\n\n\nhere = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)\n\nPROJECT_ROOT = here('..')\n\nroot = lambda *x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)\n\n\nclass MirrorSpider(Crawler):\n \"\"\"\n \"Зеркало\" сайта. Копирует всю доступную структуру в указанную папку\n \"\"\"\n\n def __init__(self, base_url, directory_name=None):\n super(MirrorSpider, self).__init__(base_url)\n self.directory_name = directory_name or (PROJECT_ROOT + 'mirror')\n\n def create_tree(self, path):\n os.makedirs(self.directory_name + path)\n\n def handle_response(self, response):\n self.crawled.add(response.url)\n if u'text/html' in response.headers.get('content-type'):\n print response.url\n" }, { "alpha_fraction": 0.5764241218566895, "alphanum_fraction": 0.5809362530708313, "avg_line_length": 25.863636016845703, "blob_id": "a58c05d8e8e351bddfca14a70cc94f8a0ba6b634", "content_id": "5fbf185b7c4d7594d339cbefcfc017cfff039520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1773, "license_type": "no_license", "max_line_length": 80, "num_lines": 66, "path": "/spider/crawler.py", "repo_name": "budurli/spider", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport re\n\nfrom gevent.pool import Pool\nimport requests\n\nfrom spider.base import BaseSpider\n\n\nclass Crawler(BaseSpider):\n name = 'Crawler'\n already_was_in = set()\n crawled = set()\n max_requests = 30\n\n def __init__(self, base_url):\n super(Crawler, self).__init__(base_url)\n\n def start(self):\n self.crawl(self.base_url)\n\n def handle_response(self, response):\n self.crawled.add(response.url)\n print u'%s - %s ' % (self.name, response.url)\n\n def check_urls(self, urls):\n def fetch(url):\n response = requests.request('GET', url, timeout=5.0)\n if 200 == response.status_code and response.url not in self.crawled:\n self.handle_response(response)\n\n pool = Pool(self.max_requests)\n\n for url in urls:\n pool.spawn(fetch, url)\n pool.join()\n\n def crawl(self, list_of_urls):\n\n if isinstance(list_of_urls, str):\n list_of_urls = [list_of_urls]\n\n for url in list_of_urls:\n self.crawled.add(url)\n new_urls = set(self.get_page_tree(url)) - self.crawled\n self.check_urls(new_urls)\n self.crawl(new_urls)\n\n def add_list_to_sitemap(self, list_of_urls):\n self.site_tree |= set(list_of_urls)\n\n\nclass Parser(Crawler):\n name = 'Parser'\n\n def __init__(self, base_url, urls_in_regulars):\n super(Parser, self).__init__(base_url)\n self.urls_to_parse = \"(\" + \")|(\".join(urls_in_regulars) + \")\"\n\n def handle_response(self, response):\n self.crawled.add(response.url)\n if re.match(self.urls_to_parse, response.url):\n self.parse(response)\n\n def parse(self, response):\n print u'%s - %s ' % (self.name, response.url)\n" }, { "alpha_fraction": 0.6090534925460815, "alphanum_fraction": 0.6378600597381592, "avg_line_length": 17.769229888916016, "blob_id": "a0e0593d1c43bc0a9abcbbe0515427df0d19d6ac", "content_id": "3224d8c4542791e3701cd53939dcbf0c7d86e987", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/test.py", "repo_name": "budurli/spider", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom spider import Parser, Crawler\n\nurls = [\n 'http://uslugi24.ru/catalogue/(.*?)'\n]\n\na = Parser(base_url='http://uslugi24.ru', urls_in_regulars=urls)\nb = Crawler('http://uslugi24.ru/catalogue')\n\na.start()\nb.start()" }, { "alpha_fraction": 0.5320033431053162, "alphanum_fraction": 0.5403158664703369, "avg_line_length": 22.153846740722656, "blob_id": "e0e5f10eb3c6c38e3a59847cc00229a1732a47ad", "content_id": "9e50ceeb6858ab4dce9f8116e0b8b225e4b39dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1203, "license_type": "no_license", "max_line_length": 60, "num_lines": 52, "path": "/spider/base.py", "repo_name": "budurli/spider", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport re\n\nimport gevent.monkey\n\ngevent.monkey.patch_socket()\n\nimport requests\n\nhref_pattern = re.compile(r'href=\"(.*?)\"')\n\n\nclass BaseSpider(object):\n name = 'BaseSpider'\n site_tree = set()\n allowed_domains = set()\n\n def __init__(self, base_url):\n self.base_url = base_url\n self.url_alive(self.base_url)\n self.allowed_domains.add(base_url)\n self.site_tree.add(base_url)\n\n def url_alive(self, url):\n success = requests.get(url)\n return success.status_code in [200, 301, 302]\n\n def get_page_tree(self, url):\n result = set()\n\n if self.base_url not in url:\n this_url = self.base_url + url\n else:\n this_url = url\n\n try:\n page = requests.get(this_url)\n\n for href in href_pattern.findall(page.text):\n\n if href.startswith('/'):\n href = self.base_url + href\n\n for domain in list(self.allowed_domains):\n if domain in href:\n result.add(href)\n\n except Exception as err:\n print u'Error %s with %s' % (str(err), this_url)\n\n return list(result)" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.7234042286872864, "avg_line_length": 10.75, "blob_id": "f83c545c2d8b89786a2851e875837e952d986dbe", "content_id": "712b66568527f25eb7f9856255a0b40006df5d4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 31, "num_lines": 4, "path": "/README.md", "repo_name": "budurli/spider", "src_encoding": "UTF-8", "text": "spider\n======\n\nSomething like internet crawler\n" } ]
6
stevenrjanssens/drizzlepac
https://github.com/stevenrjanssens/drizzlepac
a7da6637f288d7adc133adc5eb31bd10f13ceb78
57294a8d5d4745185646b0015a37d11996a059fc
48eca0ad120e5cf5da3f95c96d4b34e471e210aa
refs/heads/master
2021-01-25T08:07:16.555366
2017-06-05T19:07:55
2017-06-05T19:07:55
93,677,691
0
0
null
2017-06-07T20:41:04
2017-05-26T17:07:39
2017-06-07T17:52:37
null
[ { "alpha_fraction": 0.6736183166503906, "alphanum_fraction": 0.6823737025260925, "avg_line_length": 56.91549301147461, "blob_id": "e1c5e8c24fbbe465a01b0404f53ad4eee6627475", "content_id": "870ff9ff0546c6e58a2ced6d9868bd53689dbc48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 16455, "license_type": "no_license", "max_line_length": 469, "num_lines": 284, "path": "/lib/drizzlepac/htmlhelp/drizcr.html", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n \n <title>Step 7: Cosmic-ray identification &#8212; DrizzlePac 2.1.16 (05-June-2017) documentation</title>\n \n <link rel=\"stylesheet\" href=\"_static/stsci_sphinx.css\" type=\"text/css\" />\n <link rel=\"stylesheet\" href=\"_static/pygments.css\" type=\"text/css\" />\n \n <script type=\"text/javascript\">\n var DOCUMENTATION_OPTIONS = {\n URL_ROOT: './',\n VERSION: '2.1.16 (05-June-2017)',\n COLLAPSE_INDEX: false,\n FILE_SUFFIX: '.html',\n HAS_SOURCE: true,\n SOURCELINK_SUFFIX: '.txt'\n };\n </script>\n <script type=\"text/javascript\" src=\"_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"search.html\" />\n <link rel=\"next\" title=\"Utilities\" href=\"util.html\" />\n <link rel=\"prev\" title=\"Step 6: Blotting the Median Image\" href=\"ablot.html\" /> \n </head>\n <body role=\"document\">\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"genindex.html\" title=\"General Index\"\n accesskey=\"I\">index</a></li>\n <li class=\"right\" >\n <a href=\"py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"right\" >\n <a href=\"util.html\" title=\"Utilities\"\n accesskey=\"N\">next</a> |</li>\n <li class=\"right\" >\n <a href=\"ablot.html\" title=\"Step 6: Blotting the Median Image\"\n accesskey=\"P\">previous</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li> \n </ul>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n <p class=\"logo\"><a href=\"index.html\">\n <img class=\"logo\" src=\"_static/stsci_logo.png\" alt=\"Logo\"/>\n </a></p>\n <h4>Previous topic</h4>\n <p class=\"topless\"><a href=\"ablot.html\"\n title=\"previous chapter\">Step 6: Blotting the Median Image</a></p>\n <h4>Next topic</h4>\n <p class=\"topless\"><a href=\"util.html\"\n title=\"next chapter\">Utilities</a></p>\n <div role=\"note\" aria-label=\"source link\">\n <h3>This Page</h3>\n <ul class=\"this-page-menu\">\n <li><a href=\"_sources/drizcr.rst.txt\"\n rel=\"nofollow\">Show Source</a></li>\n </ul>\n </div>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3>Quick search</h3>\n <form class=\"search\" action=\"search.html\" method=\"get\">\n <div><input type=\"text\" name=\"q\" /></div>\n <div><input type=\"submit\" value=\"Go\" /></div>\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n </form>\n</div>\n<script type=\"text/javascript\">$('#searchbox').show(0);</script>\n </div>\n </div>\n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n <div class=\"body\" role=\"main\">\n \n <div class=\"section\" id=\"step-7-cosmic-ray-identification\">\n<span id=\"drizcr\"></span><h1>Step 7: Cosmic-ray identification<a class=\"headerlink\" href=\"#step-7-cosmic-ray-identification\" title=\"Permalink to this headline\">¶</a></h1>\n<p>The cosmic rays and bad pixels are now identified by comparing the input images with the associated blotted,median-cleaned images created.</p>\n<span class=\"target\" id=\"module-drizzlepac.drizCR\"></span><p>Mask blemishes in dithered data by comparison of an image with a model\nimage and the derivative of the model image.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Authors:</th><td class=\"field-body\">Warren Hack</td>\n</tr>\n<tr class=\"field-even field\"><th class=\"field-name\">License:</th><td class=\"field-body\"><a class=\"reference external\" href=\"http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE\">http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE</a></td>\n</tr>\n</tbody>\n</table>\n<dl class=\"function\">\n<dt id=\"drizzlepac.drizCR.createCorrFile\">\n<code class=\"descclassname\">drizzlepac.drizCR.</code><code class=\"descname\">createCorrFile</code><span class=\"sig-paren\">(</span><em>outfile</em>, <em>arrlist</em>, <em>template</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/drizCR.html#createCorrFile\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.drizCR.createCorrFile\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Create a _cor file with the same format as the original input image</p>\n<p>The DQ array will be replaced with the mask array used to create the _cor\nfile.</p>\n</dd></dl>\n\n<dl class=\"function\">\n<dt id=\"drizzlepac.drizCR.drizCR\">\n<code class=\"descclassname\">drizzlepac.drizCR.</code><code class=\"descname\">drizCR</code><span class=\"sig-paren\">(</span><em>input=None</em>, <em>configObj=None</em>, <em>editpars=False</em>, <em>**inputDict</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/drizCR.html#drizCR\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.drizCR.drizCR\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>The blotted median images that are now transformed back into the original\nreference frame, get compared to the original input images to detect any\nspurious pixels (which may include pixels impacted by cosmic rays) in\neach input. Those spurious pixels then get flagged as &#8216;bad&#8217; in the output\ncosmic ray mask files, which get used as input for the final combination\nso that they do not show up in the final product.\nThe identified bad pixels get flagged by updating the input mask files.\nOptionally, copies of the original images with the bad pixels removed\ncan be created through the use of the <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">driz_cr_corr</span></code> parameter.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Parameters:</th><td class=\"field-body\"><p class=\"first\"><strong id=\"input\">input</strong> : str or list of str (Default = None)</p>\n<blockquote>\n<div><p>A python list of blotted median image filenames, or just a single\nfilename.</p>\n</div></blockquote>\n<p><strong id=\"configObj\">configObj</strong> : configObject (Default = None)</p>\n<blockquote>\n<div><p>An instance of <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">configObject</span></code> which overrides default parameter settings.</p>\n</div></blockquote>\n<p><strong id=\"editpars\">editpars</strong> : bool (Default = False)</p>\n<blockquote>\n<div><p>A parameter that allows user to edit input parameters by hand in the GUI.\nTrue to use the GUI to edit parameters.</p>\n</div></blockquote>\n<p><strong id=\"inputDict\">inputDict</strong> : dict, optional</p>\n<blockquote>\n<div><p>An optional list of parameters specified by the user, which can also\nbe used to override the defaults.</p>\n</div></blockquote>\n</td>\n</tr>\n<tr class=\"field-even field\"><th class=\"field-name\" colspan=\"2\">Other Parameters:</th></tr>\n<tr class=\"field-even field\"><td>&nbsp;</td><td class=\"field-body\"><p class=\"first\"><strong>driz_cr</strong> : bool (Default = False)</p>\n<blockquote>\n<div><p>Perform cosmic-ray detection? If set to <code class=\"docutils literal\"><span class=\"pre\">True</span></code>, cosmic-rays will be\ndetected and used to create cosmic-ray masks based on the algorithms\nfrom <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">deriv</span></code> and <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">driz_cr</span></code>.</p>\n</div></blockquote>\n<p><strong>driz_cr_corr</strong> : bool (Default = False)</p>\n<blockquote>\n<div><p>Create a cosmic-ray cleaned input image? I set to <a class=\"reference external\" href=\"https://docs.python.org/2/library/constants.html#True\" title=\"(in Python v2.7)\"><code class=\"xref py py-obj docutils literal\"><span class=\"pre\">True</span></code></a>, a cosmic-ray\ncleaned <code class=\"docutils literal\"><span class=\"pre\">_cor</span></code> image will be generated directly from the input image,\nand a corresponding <code class=\"docutils literal\"><span class=\"pre\">_crmask</span></code> file will be written to document detected\npixels affected by cosmic-rays.</p>\n</div></blockquote>\n<p><strong>driz_cr_snr</strong> : list of floats (Default = &#8216;3.5 3.0&#8217;)</p>\n<blockquote>\n<div><p>The values for this parameter specify the signal-to-noise ratios for\nthe <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">driz_cr</span></code> task to be used in detecting cosmic rays.\nSee the help file for <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">driz_cr</span></code> for further discussion of this parameter.</p>\n</div></blockquote>\n<p><strong>driz_cr_grow</strong> : int (Default = 1)</p>\n<blockquote>\n<div><p>The radius, in pixels, around each detected cosmic-ray,\nin which more stringent detection criteria for additional cosmic\nrays will be used.</p>\n</div></blockquote>\n<p><strong>driz_cr_ctegrow</strong> : int (Default = 0)</p>\n<blockquote>\n<div><p>Length, in pixels, of the CTE tail that should be masked in\nthe drizzled output.</p>\n</div></blockquote>\n<p><strong>driz_cr_scale</strong> : str (Default = &#8216;1.2 0.7&#8217;)</p>\n<blockquote class=\"last\">\n<div><p>Scaling factor applied to the derivative in <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">driz_cr</span></code> when detecting\ncosmic-rays. See the help file for <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">driz_cr</span></code> for further discussion\nof this parameter.</p>\n</div></blockquote>\n</td>\n</tr>\n</tbody>\n</table>\n<p class=\"rubric\">Notes</p>\n<p>These tasks are designed to work together seemlessly when run in the\nfull <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">AstroDrizzle</span></code> interface. More advanced users may wish to create\nspecialized scripts for their own datasets, making use of only a subset\nof the predefined AstroDrizzle tasks, or add additional processing,\nwhich may be usefull for their particular data. In these cases, individual\naccess to the tasks is important.</p>\n<p>Something to keep in mind is that the full <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">AstroDrizzle</span></code> interface will\nmake backup copies of your original files and place them in\nthe <code class=\"docutils literal\"><span class=\"pre\">OrIg/</span></code> directory of your current working directory. If you are\nworking with the stand alone interfaces, it is assumed that the user\nhas already taken care of backing up their original datafiles as the\ninput file with be directly altered.</p>\n<p class=\"rubric\">Examples</p>\n<p>Basic example of how to call drizCR yourself from a python command line using the default parameters for the task.</p>\n<div class=\"highlight-default\"><div class=\"highlight\"><pre><span></span><span class=\"gp\">&gt;&gt;&gt; </span><span class=\"kn\">from</span> <span class=\"nn\">drizzlepac</span> <span class=\"k\">import</span> <span class=\"n\">drizCR</span>\n<span class=\"gp\">&gt;&gt;&gt; </span><span class=\"n\">drizCR</span><span class=\"o\">.</span><span class=\"n\">drizCR</span><span class=\"p\">(</span><span class=\"s1\">&#39;*flt.fits&#39;</span><span class=\"p\">)</span>\n</pre></div>\n</div>\n</dd></dl>\n\n<dl class=\"function\">\n<dt id=\"drizzlepac.drizCR.getHelpAsString\">\n<code class=\"descclassname\">drizzlepac.drizCR.</code><code class=\"descname\">getHelpAsString</code><span class=\"sig-paren\">(</span><em>docstring=False</em>, <em>show_ver=True</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/drizCR.html#getHelpAsString\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.drizCR.getHelpAsString\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>return useful help from a file in the script directory called\n__taskname__.help</p>\n</dd></dl>\n\n<dl class=\"function\">\n<dt id=\"drizzlepac.drizCR.help\">\n<code class=\"descclassname\">drizzlepac.drizCR.</code><code class=\"descname\">help</code><span class=\"sig-paren\">(</span><em>file=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/drizCR.html#help\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.drizCR.help\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Print out syntax help for running astrodrizzle</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Parameters:</th><td class=\"field-body\"><p class=\"first\"><strong id=\"file\">file</strong> : str (Default = None)</p>\n<blockquote class=\"last\">\n<div><p>If given, write out help to the filename specified by this parameter\nAny previously existing file with this name will be deleted before\nwriting out the help.</p>\n</div></blockquote>\n</td>\n</tr>\n</tbody>\n</table>\n</dd></dl>\n\n<dl class=\"function\">\n<dt id=\"drizzlepac.drizCR.run\">\n<code class=\"descclassname\">drizzlepac.drizCR.</code><code class=\"descname\">run</code><span class=\"sig-paren\">(</span><em>configObj</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/drizCR.html#run\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.drizCR.run\" title=\"Permalink to this definition\">¶</a></dt>\n<dd></dd></dl>\n\n<dl class=\"function\">\n<dt id=\"drizzlepac.drizCR.rundrizCR\">\n<code class=\"descclassname\">drizzlepac.drizCR.</code><code class=\"descname\">rundrizCR</code><span class=\"sig-paren\">(</span><em>imgObjList</em>, <em>configObj</em>, <em>procSteps=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/drizCR.html#rundrizCR\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.drizCR.rundrizCR\" title=\"Permalink to this definition\">¶</a></dt>\n<dd></dd></dl>\n\n<dl class=\"function\">\n<dt id=\"drizzlepac.drizCR.setDefaults\">\n<code class=\"descclassname\">drizzlepac.drizCR.</code><code class=\"descname\">setDefaults</code><span class=\"sig-paren\">(</span><em>configObj={}</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/drizCR.html#setDefaults\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.drizCR.setDefaults\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return a dictionary of the default parameters\nwhich also been updated with the user overrides.</p>\n</dd></dl>\n\n</div>\n\n\n </div>\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"genindex.html\" title=\"General Index\"\n >index</a></li>\n <li class=\"right\" >\n <a href=\"py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"right\" >\n <a href=\"util.html\" title=\"Utilities\"\n >next</a> |</li>\n <li class=\"right\" >\n <a href=\"ablot.html\" title=\"Step 6: Blotting the Median Image\"\n >previous</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li> \n </ul>\n </div>\n <div class=\"footer\" role=\"contentinfo\">\n &#169; Copyright 2017, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom, Mihai Cara.\n Created using <a href=\"http://sphinx-doc.org/\">Sphinx</a> 1.5.1.\n </div>\n </body>\n</html>" }, { "alpha_fraction": 0.5919459462165833, "alphanum_fraction": 0.6019637584686279, "avg_line_length": 151.6876983642578, "blob_id": "7571c39334fbd0a132f6cd91b74014425385f253", "content_id": "cd8fbf0e65f82538ac610cbde97875f10d64cffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 196050, "license_type": "no_license", "max_line_length": 1246, "num_lines": 1284, "path": "/lib/drizzlepac/htmlhelp/_modules/drizzlepac/tweakutils.html", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n \n <title>drizzlepac.tweakutils &#8212; DrizzlePac 2.1.16 (05-June-2017) documentation</title>\n \n <link rel=\"stylesheet\" href=\"../../_static/stsci_sphinx.css\" type=\"text/css\" />\n <link rel=\"stylesheet\" href=\"../../_static/pygments.css\" type=\"text/css\" />\n \n <script type=\"text/javascript\">\n var DOCUMENTATION_OPTIONS = {\n URL_ROOT: '../../',\n VERSION: '2.1.16 (05-June-2017)',\n COLLAPSE_INDEX: false,\n FILE_SUFFIX: '.html',\n HAS_SOURCE: true,\n SOURCELINK_SUFFIX: '.txt'\n };\n </script>\n <script type=\"text/javascript\" src=\"../../_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"../../_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"../../_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"../../genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"../../search.html\" /> \n </head>\n <body role=\"document\">\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"../../genindex.html\" title=\"General Index\"\n accesskey=\"I\">index</a></li>\n <li class=\"right\" >\n <a href=\"../../py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"../../index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"../index.html\" accesskey=\"U\">Module code</a> &#187;</li> \n </ul>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n <p class=\"logo\"><a href=\"../../index.html\">\n <img class=\"logo\" src=\"../../_static/stsci_logo.png\" alt=\"Logo\"/>\n </a></p>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3>Quick search</h3>\n <form class=\"search\" action=\"../../search.html\" method=\"get\">\n <div><input type=\"text\" name=\"q\" /></div>\n <div><input type=\"submit\" value=\"Go\" /></div>\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n </form>\n</div>\n<script type=\"text/javascript\">$('#searchbox').show(0);</script>\n </div>\n </div>\n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n <div class=\"body\" role=\"main\">\n \n <h1>Source code for drizzlepac.tweakutils</h1><div class=\"highlight\"><pre>\n<span></span><span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\">:Authors: Warren Hack</span>\n\n<span class=\"sd\">:License: `&lt;http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE&gt;`_</span>\n\n<span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"kn\">from</span> <span class=\"nn\">__future__</span> <span class=\"k\">import</span> <span class=\"n\">absolute_import</span><span class=\"p\">,</span> <span class=\"n\">division</span><span class=\"p\">,</span> <span class=\"n\">print_function</span>\n<span class=\"kn\">import</span> <span class=\"nn\">string</span><span class=\"o\">,</span><span class=\"nn\">os</span>\n\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"kn\">import</span> <span class=\"nn\">stsci.ndimage</span> <span class=\"k\">as</span> <span class=\"nn\">ndimage</span>\n\n<span class=\"kn\">from</span> <span class=\"nn\">stsci.tools</span> <span class=\"k\">import</span> <span class=\"n\">asnutil</span><span class=\"p\">,</span> <span class=\"n\">irafglob</span><span class=\"p\">,</span> <span class=\"n\">parseinput</span><span class=\"p\">,</span> <span class=\"n\">fileutil</span>\n<span class=\"kn\">from</span> <span class=\"nn\">astropy.io</span> <span class=\"k\">import</span> <span class=\"n\">fits</span>\n<span class=\"kn\">import</span> <span class=\"nn\">astropy.coordinates</span> <span class=\"k\">as</span> <span class=\"nn\">coords</span>\n<span class=\"kn\">import</span> <span class=\"nn\">astropy.units</span> <span class=\"k\">as</span> <span class=\"nn\">u</span>\n\n<span class=\"kn\">import</span> <span class=\"nn\">stsci.imagestats</span> <span class=\"k\">as</span> <span class=\"nn\">imagestats</span>\n\n<span class=\"kn\">from</span> <span class=\"nn\">.</span> <span class=\"k\">import</span> <span class=\"n\">findobj</span>\n<span class=\"kn\">from</span> <span class=\"nn\">.</span> <span class=\"k\">import</span> <span class=\"n\">cdriz</span>\n\n<span class=\"k\">def</span> <span class=\"nf\">parse_input</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">,</span> <span class=\"n\">prodonly</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span> <span class=\"n\">sort_wildcards</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">):</span>\n <span class=\"n\">catlist</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">,</span> <span class=\"nb\">list</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"p\">(</span><span class=\"s1\">&#39;_asn&#39;</span> <span class=\"ow\">in</span> <span class=\"nb\">input</span> <span class=\"ow\">or</span> <span class=\"s1\">&#39;_asc&#39;</span> <span class=\"ow\">in</span> <span class=\"nb\">input</span><span class=\"p\">):</span>\n <span class=\"c1\"># Input is an association table</span>\n <span class=\"c1\"># Get the input files</span>\n <span class=\"n\">oldasndict</span> <span class=\"o\">=</span> <span class=\"n\">asnutil</span><span class=\"o\">.</span><span class=\"n\">readASNTable</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">,</span> <span class=\"n\">prodonly</span><span class=\"o\">=</span><span class=\"n\">prodonly</span><span class=\"p\">)</span>\n <span class=\"n\">filelist</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">buildRootname</span><span class=\"p\">(</span><span class=\"n\">fname</span><span class=\"p\">)</span> <span class=\"k\">for</span> <span class=\"n\">fname</span> <span class=\"ow\">in</span> <span class=\"n\">oldasndict</span><span class=\"p\">[</span><span class=\"s1\">&#39;order&#39;</span><span class=\"p\">]]</span>\n\n <span class=\"k\">elif</span> <span class=\"ow\">not</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">,</span> <span class=\"nb\">list</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"nb\">input</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;@&#39;</span><span class=\"p\">:</span>\n <span class=\"c1\"># input is an @ file</span>\n <span class=\"n\">f</span> <span class=\"o\">=</span> <span class=\"nb\">open</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:])</span>\n <span class=\"c1\"># Read the first line in order to determine whether</span>\n <span class=\"c1\"># catalog files have been specified in a second column...</span>\n <span class=\"n\">line</span> <span class=\"o\">=</span> <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">readline</span><span class=\"p\">()</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"c1\"># Parse the @-file with irafglob to extract the input filename</span>\n <span class=\"n\">filelist</span> <span class=\"o\">=</span> <span class=\"n\">irafglob</span><span class=\"o\">.</span><span class=\"n\">irafglob</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">,</span> <span class=\"n\">atfile</span><span class=\"o\">=</span><span class=\"n\">atfile_sci</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"n\">line</span><span class=\"p\">)</span>\n <span class=\"c1\"># If there are additional columns for catalog files...</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">line</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">())</span> <span class=\"o\">&gt;</span> <span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"c1\"># ...parse out the names of the catalog files as well</span>\n <span class=\"n\">catlist</span><span class=\"p\">,</span><span class=\"n\">catdict</span> <span class=\"o\">=</span> <span class=\"n\">parse_atfile_cat</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">)</span>\n <span class=\"k\">elif</span> <span class=\"p\">(</span><span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">,</span> <span class=\"nb\">list</span><span class=\"p\">)):</span>\n <span class=\"c1\"># input a python list</span>\n <span class=\"n\">filelist</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">fn</span> <span class=\"ow\">in</span> <span class=\"nb\">input</span><span class=\"p\">:</span>\n <span class=\"n\">flist</span><span class=\"p\">,</span> <span class=\"n\">output</span> <span class=\"o\">=</span> <span class=\"n\">parse_input</span><span class=\"p\">(</span><span class=\"n\">fn</span><span class=\"p\">,</span> <span class=\"n\">prodonly</span><span class=\"o\">=</span><span class=\"n\">prodonly</span><span class=\"p\">)</span>\n <span class=\"c1\"># if wild-cards are given, sort for uniform usage:</span>\n <span class=\"k\">if</span> <span class=\"n\">fn</span><span class=\"o\">.</span><span class=\"n\">find</span><span class=\"p\">(</span><span class=\"s1\">&#39;*&#39;</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"o\">-</span><span class=\"mi\">1</span> <span class=\"ow\">and</span> <span class=\"n\">sort_wildcards</span><span class=\"p\">:</span>\n <span class=\"n\">flist</span><span class=\"o\">.</span><span class=\"n\">sort</span><span class=\"p\">()</span>\n <span class=\"n\">filelist</span> <span class=\"o\">+=</span> <span class=\"n\">flist</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># input is either a string or something unrecognizable, so give it a try:</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">filelist</span><span class=\"p\">,</span> <span class=\"n\">output</span> <span class=\"o\">=</span> <span class=\"n\">parseinput</span><span class=\"o\">.</span><span class=\"n\">parseinput</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">)</span>\n <span class=\"c1\"># if wild-cards are given, sort for uniform usage:</span>\n <span class=\"k\">if</span> <span class=\"nb\">input</span><span class=\"o\">.</span><span class=\"n\">find</span><span class=\"p\">(</span><span class=\"s1\">&#39;*&#39;</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"o\">-</span><span class=\"mi\">1</span> <span class=\"ow\">and</span> <span class=\"n\">sort_wildcards</span><span class=\"p\">:</span>\n <span class=\"n\">filelist</span><span class=\"o\">.</span><span class=\"n\">sort</span><span class=\"p\">()</span>\n <span class=\"k\">except</span> <span class=\"ne\">IOError</span><span class=\"p\">:</span> <span class=\"k\">raise</span>\n\n <span class=\"k\">return</span> <span class=\"n\">filelist</span><span class=\"p\">,</span><span class=\"n\">catlist</span>\n\n<span class=\"k\">def</span> <span class=\"nf\">atfile_sci</span><span class=\"p\">(</span><span class=\"n\">line</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"n\">line</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">]:</span>\n <span class=\"n\">lspl</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">lspl</span> <span class=\"o\">=</span> <span class=\"n\">line</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">()[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"n\">lspl</span>\n\n<div class=\"viewcode-block\" id=\"parse_atfile_cat\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.parse_atfile_cat\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">parse_atfile_cat</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Return the list of catalog filenames specified as part of the input @-file</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\"># input is an @ file</span>\n <span class=\"n\">f</span> <span class=\"o\">=</span> <span class=\"nb\">open</span><span class=\"p\">(</span><span class=\"nb\">input</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:])</span>\n <span class=\"n\">catlist</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"n\">catdict</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"k\">for</span> <span class=\"n\">line</span> <span class=\"ow\">in</span> <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">readlines</span><span class=\"p\">():</span>\n <span class=\"k\">if</span> <span class=\"n\">line</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;#&#39;</span> <span class=\"ow\">or</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">line</span><span class=\"o\">.</span><span class=\"n\">strip</span><span class=\"p\">())</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"k\">continue</span>\n <span class=\"n\">lspl</span> <span class=\"o\">=</span> <span class=\"n\">line</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">lspl</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">catdict</span><span class=\"p\">[</span><span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]]</span> <span class=\"o\">=</span> <span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:]</span>\n <span class=\"n\">catlist</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:])</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">catdict</span><span class=\"p\">[</span><span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]]</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">catlist</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"kc\">None</span><span class=\"p\">)</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"n\">catlist</span><span class=\"p\">,</span><span class=\"n\">catdict</span></div>\n\n<span class=\"c1\">#</span>\n<span class=\"c1\"># functions to help work with configobj input</span>\n<span class=\"c1\">#</span>\n<span class=\"k\">def</span> <span class=\"nf\">get_configobj_root</span><span class=\"p\">(</span><span class=\"n\">configobj</span><span class=\"p\">):</span>\n <span class=\"n\">kwargs</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"k\">for</span> <span class=\"n\">key</span> <span class=\"ow\">in</span> <span class=\"n\">configobj</span><span class=\"p\">:</span>\n <span class=\"c1\"># Only copy in those entries which start with lower case letters</span>\n <span class=\"c1\"># since sections are all upper-case for this task</span>\n <span class=\"k\">if</span> <span class=\"n\">key</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">islower</span><span class=\"p\">():</span> <span class=\"n\">kwargs</span><span class=\"p\">[</span><span class=\"n\">key</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">configobj</span><span class=\"p\">[</span><span class=\"n\">key</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"n\">kwargs</span>\n\n\n<span class=\"k\">def</span> <span class=\"nf\">ndfind</span><span class=\"p\">(</span><span class=\"n\">array</span><span class=\"p\">,</span> <span class=\"n\">hmin</span><span class=\"p\">,</span> <span class=\"n\">fwhm</span><span class=\"p\">,</span> <span class=\"n\">skymode</span><span class=\"p\">,</span>\n <span class=\"n\">sharplim</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"mf\">0.2</span><span class=\"p\">,</span><span class=\"mf\">1.0</span><span class=\"p\">],</span> <span class=\"n\">roundlim</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">],</span> <span class=\"n\">minpix</span><span class=\"o\">=</span><span class=\"mi\">5</span><span class=\"p\">,</span>\n <span class=\"n\">peakmin</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">peakmax</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">fluxmin</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">fluxmax</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span>\n <span class=\"n\">nsigma</span><span class=\"o\">=</span><span class=\"mf\">1.5</span><span class=\"p\">,</span> <span class=\"n\">ratio</span><span class=\"o\">=</span><span class=\"mf\">1.0</span><span class=\"p\">,</span> <span class=\"n\">theta</span><span class=\"o\">=</span><span class=\"mf\">0.0</span><span class=\"p\">,</span>\n <span class=\"n\">mask</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">use_sharp_round</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">):</span>\n <span class=\"n\">star_list</span><span class=\"p\">,</span><span class=\"n\">fluxes</span><span class=\"o\">=</span> <span class=\"n\">findobj</span><span class=\"o\">.</span><span class=\"n\">findstars</span><span class=\"p\">(</span><span class=\"n\">array</span><span class=\"p\">,</span> <span class=\"n\">fwhm</span><span class=\"p\">,</span> <span class=\"n\">hmin</span><span class=\"p\">,</span> <span class=\"n\">skymode</span><span class=\"p\">,</span>\n <span class=\"n\">peakmin</span><span class=\"o\">=</span><span class=\"n\">peakmin</span><span class=\"p\">,</span> <span class=\"n\">peakmax</span><span class=\"o\">=</span><span class=\"n\">peakmax</span><span class=\"p\">,</span>\n <span class=\"n\">fluxmin</span><span class=\"o\">=</span><span class=\"n\">fluxmin</span><span class=\"p\">,</span> <span class=\"n\">fluxmax</span><span class=\"o\">=</span><span class=\"n\">fluxmax</span><span class=\"p\">,</span>\n <span class=\"n\">ratio</span><span class=\"o\">=</span><span class=\"n\">ratio</span><span class=\"p\">,</span> <span class=\"n\">nsigma</span><span class=\"o\">=</span><span class=\"n\">nsigma</span><span class=\"p\">,</span> <span class=\"n\">theta</span><span class=\"o\">=</span><span class=\"n\">theta</span><span class=\"p\">,</span>\n <span class=\"n\">use_sharp_round</span><span class=\"o\">=</span><span class=\"n\">use_sharp_round</span><span class=\"p\">,</span>\n <span class=\"n\">mask</span><span class=\"o\">=</span><span class=\"n\">mask</span><span class=\"p\">,</span>\n <span class=\"n\">sharplo</span><span class=\"o\">=</span><span class=\"n\">sharplim</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span> <span class=\"n\">sharphi</span><span class=\"o\">=</span><span class=\"n\">sharplim</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span>\n <span class=\"n\">roundlo</span><span class=\"o\">=</span><span class=\"n\">roundlim</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span> <span class=\"n\">roundhi</span><span class=\"o\">=</span><span class=\"n\">roundlim</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">])</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">star_list</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;No valid sources found...&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"nb\">tuple</span><span class=\"p\">([[]</span> <span class=\"k\">for</span> <span class=\"n\">i</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">7</span> <span class=\"k\">if</span> <span class=\"n\">use_sharp_round</span> <span class=\"k\">else</span> <span class=\"mi\">4</span><span class=\"p\">)])</span>\n <span class=\"n\">star_arr</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">(</span><span class=\"n\">star_list</span><span class=\"p\">)</span>\n <span class=\"n\">fluxes</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">(</span><span class=\"n\">fluxes</span><span class=\"p\">,</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">use_sharp_round</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"p\">(</span><span class=\"n\">star_arr</span><span class=\"p\">[:,</span><span class=\"mi\">0</span><span class=\"p\">],</span> <span class=\"n\">star_arr</span><span class=\"p\">[:,</span><span class=\"mi\">1</span><span class=\"p\">],</span> <span class=\"n\">fluxes</span><span class=\"p\">,</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"n\">star_arr</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]),</span>\n <span class=\"n\">star_arr</span><span class=\"p\">[:,</span><span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"n\">star_arr</span><span class=\"p\">[:,</span><span class=\"mi\">3</span><span class=\"p\">],</span> <span class=\"n\">star_arr</span><span class=\"p\">[:,</span><span class=\"mi\">4</span><span class=\"p\">])</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"p\">(</span><span class=\"n\">star_arr</span><span class=\"p\">[:,</span><span class=\"mi\">0</span><span class=\"p\">],</span> <span class=\"n\">star_arr</span><span class=\"p\">[:,</span><span class=\"mi\">1</span><span class=\"p\">],</span> <span class=\"n\">fluxes</span><span class=\"p\">,</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"n\">star_arr</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]),</span> <span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"kc\">None</span><span class=\"p\">)</span>\n\n\n<span class=\"c1\"># Object finding algorithm based on NDIMAGE routines</span>\n<div class=\"viewcode-block\" id=\"ndfind_old\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.ndfind_old\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">ndfind_old</span><span class=\"p\">(</span><span class=\"n\">array</span><span class=\"p\">,</span><span class=\"n\">hmin</span><span class=\"p\">,</span><span class=\"n\">fwhm</span><span class=\"p\">,</span><span class=\"n\">sharplim</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"mf\">0.2</span><span class=\"p\">,</span><span class=\"mf\">1.0</span><span class=\"p\">],</span><span class=\"n\">roundlim</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">],</span><span class=\"n\">minpix</span><span class=\"o\">=</span><span class=\"mi\">5</span><span class=\"p\">,</span><span class=\"n\">datamax</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Source finding algorithm based on NDIMAGE routines</span>\n\n<span class=\"sd\"> This function provides a simple replacement for the DAOFIND task.</span>\n\n<span class=\"sd\"> Parameters</span>\n<span class=\"sd\"> ----------</span>\n<span class=\"sd\"> array : arr</span>\n<span class=\"sd\"> Input image as numpy array</span>\n<span class=\"sd\"> hmin : float</span>\n<span class=\"sd\"> Limit for source detection in pixel values</span>\n<span class=\"sd\"> fwhm : float</span>\n<span class=\"sd\"> Full-width half-maximum of the PSF in the image</span>\n<span class=\"sd\"> minpix : int</span>\n<span class=\"sd\"> Minimum number of pixels for any valid source</span>\n<span class=\"sd\"> sharplim : tuple</span>\n<span class=\"sd\"> [Not used at this time]</span>\n<span class=\"sd\"> roundlim : tuple</span>\n<span class=\"sd\"> [Not used at this time]</span>\n<span class=\"sd\"> datamax : float</span>\n<span class=\"sd\"> Maximum good pixel value found in any detected source</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> x : arr</span>\n<span class=\"sd\"> Array of detected source X positions (in array coordinates, 0-based)</span>\n<span class=\"sd\"> y : arr</span>\n<span class=\"sd\"> Array of detected source Y positions (in array coordinates, 0-based)</span>\n<span class=\"sd\"> flux : arr</span>\n<span class=\"sd\"> Array of detected source fluxes in pixel values</span>\n<span class=\"sd\"> id : arr</span>\n<span class=\"sd\"> Array of detected source ID numbers</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\">#cimg,c1 = idlgauss_convolve(array,sigma)</span>\n <span class=\"c1\">#cimg = np.abs(ndimage.gaussian_laplace(array,fwhm))</span>\n <span class=\"n\">cimg</span> <span class=\"o\">=</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"o\">*</span><span class=\"n\">ndimage</span><span class=\"o\">.</span><span class=\"n\">gaussian_laplace</span><span class=\"p\">(</span><span class=\"n\">array</span><span class=\"p\">,</span><span class=\"n\">fwhm</span><span class=\"p\">)</span>\n <span class=\"n\">cimg</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">clip</span><span class=\"p\">(</span><span class=\"n\">cimg</span><span class=\"p\">,</span><span class=\"mi\">0</span><span class=\"p\">,</span><span class=\"n\">cimg</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">())</span>\n <span class=\"c1\">#cimg = ndimage.gaussian_filter(array,fwhm)</span>\n\n <span class=\"n\">climit</span> <span class=\"o\">=</span> <span class=\"n\">hmin</span> <span class=\"o\">/</span> <span class=\"n\">fwhm</span>\n <span class=\"n\">cmask</span> <span class=\"o\">=</span> <span class=\"n\">cimg</span> <span class=\"o\">&gt;=</span> <span class=\"n\">climit</span>\n <span class=\"n\">gwidth</span> <span class=\"o\">=</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">fwhm</span><span class=\"o\">+</span><span class=\"mf\">0.5</span><span class=\"p\">)</span>\n <span class=\"n\">gradius</span> <span class=\"o\">=</span> <span class=\"n\">gwidth</span><span class=\"o\">//</span><span class=\"mi\">2</span>\n\n <span class=\"c1\">#cmask = cimg &gt;= hmin</span>\n <span class=\"c1\"># find and label sources</span>\n <span class=\"n\">ckern</span> <span class=\"o\">=</span> <span class=\"n\">ndimage</span><span class=\"o\">.</span><span class=\"n\">generate_binary_structure</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">)</span>\n <span class=\"n\">clabeled</span><span class=\"p\">,</span><span class=\"n\">cnum</span> <span class=\"o\">=</span> <span class=\"n\">ndimage</span><span class=\"o\">.</span><span class=\"n\">label</span><span class=\"p\">(</span><span class=\"n\">cmask</span><span class=\"p\">,</span><span class=\"n\">structure</span><span class=\"o\">=</span><span class=\"n\">ckern</span><span class=\"p\">)</span>\n <span class=\"n\">cobjs</span> <span class=\"o\">=</span> <span class=\"n\">ndimage</span><span class=\"o\">.</span><span class=\"n\">find_objects</span><span class=\"p\">(</span><span class=\"n\">clabeled</span><span class=\"p\">)</span>\n <span class=\"n\">xpos</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"n\">ypos</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"n\">flux</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">s</span> <span class=\"ow\">in</span> <span class=\"n\">cobjs</span><span class=\"p\">:</span>\n <span class=\"n\">nmask</span> <span class=\"o\">=</span> <span class=\"n\">cmask</span><span class=\"p\">[</span><span class=\"n\">s</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">nmask</span> <span class=\"o\">&gt;=</span> <span class=\"n\">minpix</span><span class=\"p\">:</span> <span class=\"c1\"># eliminate spurious detections</span>\n <span class=\"n\">imgsect</span> <span class=\"o\">=</span> <span class=\"n\">array</span><span class=\"p\">[</span><span class=\"n\">s</span><span class=\"p\">]</span><span class=\"o\">*</span><span class=\"n\">cmask</span><span class=\"p\">[</span><span class=\"n\">s</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">datamax</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"p\">(</span><span class=\"n\">imgsect</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span> <span class=\"o\">&gt;</span> <span class=\"n\">datamax</span><span class=\"p\">):</span>\n <span class=\"k\">continue</span> <span class=\"c1\"># skip any source with pixel value &gt; datamax</span>\n <span class=\"n\">cimgsect</span> <span class=\"o\">=</span> <span class=\"n\">cimg</span><span class=\"p\">[</span><span class=\"n\">s</span><span class=\"p\">]</span><span class=\"o\">*</span><span class=\"n\">cmask</span><span class=\"p\">[</span><span class=\"n\">s</span><span class=\"p\">]</span>\n\n <span class=\"n\">maxposind</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">where</span><span class=\"p\">(</span><span class=\"n\">cimgsect</span><span class=\"o\">==</span><span class=\"n\">cimgsect</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">())</span>\n <span class=\"n\">maxpos</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">maxposind</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">][</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">maxposind</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">][</span><span class=\"mi\">0</span><span class=\"p\">])</span>\n <span class=\"n\">yr0</span> <span class=\"o\">=</span> <span class=\"n\">maxpos</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">-</span><span class=\"n\">gradius</span>\n <span class=\"n\">yr1</span> <span class=\"o\">=</span> <span class=\"n\">maxpos</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">gradius</span>\n <span class=\"k\">if</span> <span class=\"n\">yr0</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"n\">yr0</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">if</span> <span class=\"n\">yr1</span> <span class=\"o\">&gt;</span> <span class=\"n\">cimgsect</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]:</span> <span class=\"n\">yr1</span> <span class=\"o\">=</span> <span class=\"n\">cimgsect</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">xr0</span> <span class=\"o\">=</span> <span class=\"n\">maxpos</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">-</span> <span class=\"n\">gradius</span>\n <span class=\"n\">xr1</span> <span class=\"o\">=</span> <span class=\"n\">maxpos</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"n\">gradius</span>\n <span class=\"k\">if</span> <span class=\"n\">xr0</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"n\">xr0</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">if</span> <span class=\"n\">xr1</span> <span class=\"o\">&gt;</span> <span class=\"n\">cimgsect</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]:</span> <span class=\"n\">xr1</span> <span class=\"o\">=</span> <span class=\"n\">cimgsect</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n\n <span class=\"n\">yx</span> <span class=\"o\">=</span> <span class=\"n\">ndimage</span><span class=\"o\">.</span><span class=\"n\">center_of_mass</span><span class=\"p\">(</span><span class=\"n\">cimgsect</span><span class=\"p\">[</span><span class=\"n\">yr0</span><span class=\"p\">:</span><span class=\"n\">yr1</span><span class=\"p\">,</span><span class=\"n\">xr0</span><span class=\"p\">:</span><span class=\"n\">xr1</span><span class=\"p\">])</span>\n <span class=\"c1\"># convert position to chip position in (0-based) X,Y</span>\n <span class=\"n\">xpos</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">yx</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">s</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">start</span><span class=\"o\">+</span><span class=\"n\">yr0</span><span class=\"p\">)</span>\n <span class=\"n\">ypos</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">yx</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">s</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">start</span><span class=\"o\">+</span><span class=\"n\">xr0</span><span class=\"p\">)</span>\n <span class=\"n\">flux</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">array</span><span class=\"p\">[</span><span class=\"n\">s</span><span class=\"p\">]</span><span class=\"o\">*</span><span class=\"n\">cmask</span><span class=\"p\">[</span><span class=\"n\">s</span><span class=\"p\">])</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">())</span>\n <span class=\"c1\"># Still need to implement sharpness and roundness limits</span>\n <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">(</span><span class=\"n\">xpos</span><span class=\"p\">),</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">(</span><span class=\"n\">ypos</span><span class=\"p\">),</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">(</span><span class=\"n\">flux</span><span class=\"p\">),</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">xpos</span><span class=\"p\">))</span></div>\n\n<div class=\"viewcode-block\" id=\"isfloat\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.isfloat\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">isfloat</span><span class=\"p\">(</span><span class=\"n\">value</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Return True if all characters are part of a floating point value</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">x</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">value</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"kc\">True</span>\n <span class=\"k\">except</span> <span class=\"ne\">ValueError</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"kc\">False</span></div>\n\n<div class=\"viewcode-block\" id=\"parse_skypos\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.parse_skypos\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">parse_skypos</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">,</span><span class=\"n\">dec</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Function to parse RA and Dec input values and turn them into decimal degrees</span>\n\n<span class=\"sd\"> Input formats could be:</span>\n<span class=\"sd\"> [&quot;nn&quot;,&quot;nn&quot;,&quot;nn.nn&quot;]</span>\n<span class=\"sd\"> &quot;nn nn nn.nnn&quot;</span>\n<span class=\"sd\"> &quot;nn:nn:nn.nn&quot;</span>\n<span class=\"sd\"> &quot;nnH nnM nn.nnS&quot; or &quot;nnD nnM nn.nnS&quot;</span>\n<span class=\"sd\"> nn.nnnnnnnn</span>\n<span class=\"sd\"> &quot;nn.nnnnnnn&quot;</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">rval</span> <span class=\"o\">=</span> <span class=\"n\">make_val_float</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">)</span>\n <span class=\"n\">dval</span> <span class=\"o\">=</span> <span class=\"n\">make_val_float</span><span class=\"p\">(</span><span class=\"n\">dec</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">rval</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">rval</span><span class=\"p\">,</span><span class=\"n\">dval</span> <span class=\"o\">=</span> <span class=\"n\">radec_hmstodd</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">,</span><span class=\"n\">dec</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">rval</span><span class=\"p\">,</span><span class=\"n\">dval</span></div>\n\n<span class=\"k\">def</span> <span class=\"nf\">make_val_float</span><span class=\"p\">(</span><span class=\"n\">val</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">val</span><span class=\"p\">,</span><span class=\"nb\">float</span><span class=\"p\">):</span>\n <span class=\"n\">rval</span> <span class=\"o\">=</span> <span class=\"n\">val</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">rval</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">val</span><span class=\"p\">)</span>\n <span class=\"k\">except</span> <span class=\"ne\">ValueError</span><span class=\"p\">:</span>\n <span class=\"n\">rval</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">return</span> <span class=\"n\">rval</span>\n\n<div class=\"viewcode-block\" id=\"radec_hmstodd\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.radec_hmstodd\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">radec_hmstodd</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">,</span><span class=\"n\">dec</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Function to convert HMS values into decimal degrees.</span>\n\n<span class=\"sd\"> This function relies on the astropy.coordinates package to perform the</span>\n<span class=\"sd\"> conversion to decimal degrees.</span>\n\n<span class=\"sd\"> Parameters</span>\n<span class=\"sd\"> ----------</span>\n<span class=\"sd\"> ra : list or array</span>\n<span class=\"sd\"> List or array of input RA positions</span>\n<span class=\"sd\"> dec : list or array</span>\n<span class=\"sd\"> List or array of input Dec positions</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> pos : arr</span>\n<span class=\"sd\"> Array of RA,Dec positions in decimal degrees</span>\n\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> -----</span>\n<span class=\"sd\"> This function supports any specification of RA and Dec as HMS or DMS;</span>\n<span class=\"sd\"> specifically, the formats::</span>\n\n<span class=\"sd\"> [&quot;nn&quot;,&quot;nn&quot;,&quot;nn.nn&quot;]</span>\n<span class=\"sd\"> &quot;nn nn nn.nnn&quot;</span>\n<span class=\"sd\"> &quot;nn:nn:nn.nn&quot;</span>\n<span class=\"sd\"> &quot;nnH nnM nn.nnS&quot; or &quot;nnD nnM nn.nnS&quot;</span>\n\n<span class=\"sd\"> See Also</span>\n<span class=\"sd\"> --------</span>\n<span class=\"sd\"> astropy.coordinates</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">hmstrans</span> <span class=\"o\">=</span> <span class=\"n\">string</span><span class=\"o\">.</span><span class=\"n\">maketrans</span><span class=\"p\">(</span><span class=\"n\">string</span><span class=\"o\">.</span><span class=\"n\">ascii_letters</span><span class=\"p\">,</span><span class=\"s1\">&#39; &#39;</span><span class=\"o\">*</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">string</span><span class=\"o\">.</span><span class=\"n\">ascii_letters</span><span class=\"p\">))</span>\n\n <span class=\"k\">if</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">,</span><span class=\"nb\">list</span><span class=\"p\">):</span>\n <span class=\"n\">rastr</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;:&#39;</span><span class=\"o\">.</span><span class=\"n\">join</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">)</span>\n <span class=\"k\">elif</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">,</span><span class=\"nb\">float</span><span class=\"p\">):</span>\n <span class=\"n\">rastr</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">pos_ra</span> <span class=\"o\">=</span> <span class=\"n\">ra</span>\n <span class=\"k\">elif</span> <span class=\"n\">ra</span><span class=\"o\">.</span><span class=\"n\">find</span><span class=\"p\">(</span><span class=\"s1\">&#39;:&#39;</span><span class=\"p\">)</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"c1\"># convert any non-numeric characters to spaces (we already know the units)</span>\n <span class=\"n\">rastr</span> <span class=\"o\">=</span> <span class=\"n\">ra</span><span class=\"o\">.</span><span class=\"n\">translate</span><span class=\"p\">(</span><span class=\"n\">hmstrans</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">strip</span><span class=\"p\">()</span>\n <span class=\"n\">rastr</span> <span class=\"o\">=</span> <span class=\"n\">rastr</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">)</span>\n <span class=\"c1\"># convert &#39;nn nn nn.nn&#39; to final &#39;nn:nn:nn.nn&#39; string</span>\n <span class=\"n\">rastr</span> <span class=\"o\">=</span> <span class=\"n\">rastr</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;:&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">rastr</span> <span class=\"o\">=</span> <span class=\"n\">ra</span>\n\n <span class=\"k\">if</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">dec</span><span class=\"p\">,</span><span class=\"nb\">list</span><span class=\"p\">):</span>\n <span class=\"n\">decstr</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;:&#39;</span><span class=\"o\">.</span><span class=\"n\">join</span><span class=\"p\">(</span><span class=\"n\">dec</span><span class=\"p\">)</span>\n <span class=\"k\">elif</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">dec</span><span class=\"p\">,</span><span class=\"nb\">float</span><span class=\"p\">):</span>\n <span class=\"n\">decstr</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">pos_dec</span> <span class=\"o\">=</span> <span class=\"n\">dec</span>\n <span class=\"k\">elif</span> <span class=\"n\">dec</span><span class=\"o\">.</span><span class=\"n\">find</span><span class=\"p\">(</span><span class=\"s1\">&#39;:&#39;</span><span class=\"p\">)</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">decstr</span> <span class=\"o\">=</span> <span class=\"n\">dec</span><span class=\"o\">.</span><span class=\"n\">translate</span><span class=\"p\">(</span><span class=\"n\">hmstrans</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">strip</span><span class=\"p\">()</span>\n <span class=\"n\">decstr</span> <span class=\"o\">=</span> <span class=\"n\">decstr</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">)</span>\n <span class=\"n\">decstr</span> <span class=\"o\">=</span> <span class=\"n\">decstr</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;:&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">decstr</span> <span class=\"o\">=</span> <span class=\"n\">dec</span>\n\n <span class=\"k\">if</span> <span class=\"n\">rastr</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">pos</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">pos_ra</span><span class=\"p\">,</span><span class=\"n\">pos_dec</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\">#pos = coords.Position(rastr+&#39; &#39;+decstr,units=&#39;hours&#39;)</span>\n <span class=\"c1\">#return pos.dd()</span>\n <span class=\"n\">pos_coord</span> <span class=\"o\">=</span> <span class=\"n\">coords</span><span class=\"o\">.</span><span class=\"n\">SkyCoord</span><span class=\"p\">(</span><span class=\"n\">rastr</span><span class=\"o\">+</span><span class=\"s1\">&#39; &#39;</span><span class=\"o\">+</span><span class=\"n\">decstr</span><span class=\"p\">,</span><span class=\"n\">unit</span><span class=\"o\">=</span><span class=\"p\">(</span><span class=\"n\">u</span><span class=\"o\">.</span><span class=\"n\">hourangle</span><span class=\"p\">,</span><span class=\"n\">u</span><span class=\"o\">.</span><span class=\"n\">deg</span><span class=\"p\">))</span>\n <span class=\"n\">pos</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">pos_coord</span><span class=\"o\">.</span><span class=\"n\">ra</span><span class=\"o\">.</span><span class=\"n\">deg</span><span class=\"p\">,</span><span class=\"n\">pos_coord</span><span class=\"o\">.</span><span class=\"n\">dec</span><span class=\"o\">.</span><span class=\"n\">deg</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">pos</span></div>\n\n\n<div class=\"viewcode-block\" id=\"parse_exclusions\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.parse_exclusions\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">parse_exclusions</span><span class=\"p\">(</span><span class=\"n\">exclusions</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Read in exclusion definitions from file named by &#39;exclusions&#39;</span>\n<span class=\"sd\"> and return a list of positions and distances</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">fname</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">osfn</span><span class=\"p\">(</span><span class=\"n\">exclusions</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">exists</span><span class=\"p\">(</span><span class=\"n\">fname</span><span class=\"p\">):</span>\n <span class=\"n\">fobj</span> <span class=\"o\">=</span> <span class=\"nb\">open</span><span class=\"p\">(</span><span class=\"n\">fname</span><span class=\"p\">)</span>\n <span class=\"n\">flines</span> <span class=\"o\">=</span> <span class=\"n\">fobj</span><span class=\"o\">.</span><span class=\"n\">readlines</span><span class=\"p\">()</span>\n <span class=\"n\">fobj</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;No valid exclusions file &quot;&#39;</span><span class=\"p\">,</span><span class=\"n\">fname</span><span class=\"p\">,</span><span class=\"s1\">&#39;&quot; could be found!&#39;</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;Skipping application of exclusions files to source catalogs.&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"kc\">None</span>\n\n <span class=\"c1\"># Parse out lines which can be interpreted as positions and distances</span>\n <span class=\"n\">exclusion_list</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"n\">units</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">for</span> <span class=\"n\">line</span> <span class=\"ow\">in</span> <span class=\"n\">flines</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">line</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;#&#39;</span> <span class=\"ow\">or</span> <span class=\"s1\">&#39;global&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">line</span><span class=\"p\">[:</span><span class=\"mi\">6</span><span class=\"p\">]:</span>\n <span class=\"k\">continue</span>\n <span class=\"c1\"># Only interpret the part of the line prior to the comment</span>\n <span class=\"c1\"># if a comment has been attached to the line</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;#&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">line</span><span class=\"p\">:</span>\n <span class=\"n\">line</span> <span class=\"o\">=</span> <span class=\"n\">line</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;#&#39;</span><span class=\"p\">)[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">rstrip</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">units</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">units</span><span class=\"o\">=</span><span class=\"s1\">&#39;pixels&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">line</span><span class=\"p\">[:</span><span class=\"mi\">3</span><span class=\"p\">]</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"s1\">&#39;fk4&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;fk5&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;sky&#39;</span><span class=\"p\">]:</span>\n <span class=\"n\">units</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;sky&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">line</span><span class=\"p\">[:</span><span class=\"mi\">5</span><span class=\"p\">]</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"s1\">&#39;image&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;physi&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;pixel&#39;</span><span class=\"p\">]:</span>\n <span class=\"n\">units</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;pixels&#39;</span>\n <span class=\"k\">continue</span>\n\n <span class=\"k\">if</span> <span class=\"s1\">&#39;circle(&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">line</span><span class=\"p\">:</span>\n <span class=\"n\">nline</span> <span class=\"o\">=</span> <span class=\"n\">line</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;circle(&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">nline</span> <span class=\"o\">=</span> <span class=\"n\">nline</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;)&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">nline</span> <span class=\"o\">=</span> <span class=\"n\">nline</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;&quot;&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">vals</span> <span class=\"o\">=</span> <span class=\"n\">nline</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;:&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]:</span>\n <span class=\"n\">posval</span> <span class=\"o\">=</span> <span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"s1\">&#39; &#39;</span><span class=\"o\">+</span><span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">posval</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]),</span><span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]))</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># Try to interpret unformatted line</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;,&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">line</span><span class=\"p\">:</span>\n <span class=\"n\">split_tok</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;,&#39;</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">split_tok</span><span class=\"o\">=</span><span class=\"s1\">&#39; &#39;</span>\n <span class=\"n\">vals</span> <span class=\"o\">=</span> <span class=\"n\">line</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"n\">split_tok</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"mi\">3</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;:&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]:</span>\n <span class=\"n\">posval</span> <span class=\"o\">=</span> <span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"s1\">&#39; &#39;</span><span class=\"o\">+</span><span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">posval</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]),</span><span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]))</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">continue</span>\n <span class=\"n\">exclusion_list</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">({</span><span class=\"s1\">&#39;pos&#39;</span><span class=\"p\">:</span><span class=\"n\">posval</span><span class=\"p\">,</span><span class=\"s1\">&#39;distance&#39;</span><span class=\"p\">:</span><span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">]),</span>\n <span class=\"s1\">&#39;units&#39;</span><span class=\"p\">:</span><span class=\"n\">units</span><span class=\"p\">})</span>\n <span class=\"k\">return</span> <span class=\"n\">exclusion_list</span></div>\n\n<div class=\"viewcode-block\" id=\"parse_colname\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.parse_colname\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">parse_colname</span><span class=\"p\">(</span><span class=\"n\">colname</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Common function to interpret input column names provided by the user.</span>\n\n<span class=\"sd\"> This function translates column specification provided by the user</span>\n<span class=\"sd\"> into a column number.</span>\n\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> -----</span>\n<span class=\"sd\"> This function will understand the following inputs::</span>\n\n<span class=\"sd\"> &#39;1,2,3&#39; or &#39;c1,c2,c3&#39; or [&#39;c1&#39;,&#39;c2&#39;,&#39;c3&#39;]</span>\n<span class=\"sd\"> &#39;1-3&#39; or &#39;c1-c3&#39;</span>\n<span class=\"sd\"> &#39;1:3&#39; or &#39;c1:c3&#39;</span>\n<span class=\"sd\"> &#39;1 2 3&#39; or &#39;c1 c2 c3&#39;</span>\n<span class=\"sd\"> &#39;1&#39; or &#39;c1&#39;</span>\n<span class=\"sd\"> 1</span>\n\n<span class=\"sd\"> Parameters</span>\n<span class=\"sd\"> ----------</span>\n<span class=\"sd\"> colname :</span>\n<span class=\"sd\"> Column name or names to be interpreted</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> cols : list</span>\n<span class=\"sd\"> The return value will be a list of strings.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">if</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">colname</span><span class=\"p\">,</span><span class=\"nb\">list</span><span class=\"p\">):</span>\n <span class=\"n\">cname</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"k\">for</span> <span class=\"n\">c</span> <span class=\"ow\">in</span> <span class=\"n\">colname</span><span class=\"p\">:</span>\n <span class=\"n\">cname</span> <span class=\"o\">+=</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">c</span><span class=\"p\">)</span><span class=\"o\">+</span><span class=\"s1\">&#39;,&#39;</span>\n <span class=\"n\">cname</span> <span class=\"o\">=</span> <span class=\"n\">cname</span><span class=\"o\">.</span><span class=\"n\">rstrip</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">elif</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">colname</span><span class=\"p\">,</span><span class=\"nb\">int</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">colname</span><span class=\"o\">.</span><span class=\"n\">isdigit</span><span class=\"p\">():</span>\n <span class=\"n\">cname</span> <span class=\"o\">=</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">colname</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">cname</span> <span class=\"o\">=</span> <span class=\"n\">colname</span>\n\n <span class=\"k\">if</span> <span class=\"s1\">&#39;c&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">cname</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]:</span> <span class=\"n\">cname</span> <span class=\"o\">=</span> <span class=\"n\">cname</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;c&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">)</span>\n\n <span class=\"n\">ctok</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">cols</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;-&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">cname</span><span class=\"p\">:</span>\n <span class=\"n\">ctok</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;-&#39;</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;:&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">cname</span><span class=\"p\">:</span>\n <span class=\"n\">ctok</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;:&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">ctok</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">cnums</span> <span class=\"o\">=</span> <span class=\"n\">cname</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"n\">ctok</span><span class=\"p\">)</span>\n <span class=\"n\">c</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">cnums</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]),</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">cnums</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">])</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">))</span>\n <span class=\"n\">cols</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">i</span> <span class=\"ow\">in</span> <span class=\"n\">c</span><span class=\"p\">:</span>\n <span class=\"n\">cols</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">i</span><span class=\"p\">))</span>\n\n <span class=\"k\">if</span> <span class=\"n\">cols</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">ctok</span> <span class=\"o\">=</span> <span class=\"s1\">&#39; &#39;</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;,&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">cname</span><span class=\"p\">:</span>\n <span class=\"n\">ctok</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;,&#39;</span>\n <span class=\"n\">cols</span> <span class=\"o\">=</span> <span class=\"n\">cname</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"n\">ctok</span><span class=\"p\">)</span>\n\n <span class=\"k\">return</span> <span class=\"n\">cols</span></div>\n\n<div class=\"viewcode-block\" id=\"readcols\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.readcols\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">readcols</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span> <span class=\"n\">cols</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Function which reads specified columns from either FITS tables or</span>\n<span class=\"sd\"> ASCII files</span>\n\n<span class=\"sd\"> This function reads in the columns specified by the user into numpy arrays</span>\n<span class=\"sd\"> regardless of the format of the input table (ASCII or FITS table).</span>\n\n<span class=\"sd\"> Parameters</span>\n<span class=\"sd\"> ----------</span>\n<span class=\"sd\"> infile : string</span>\n<span class=\"sd\"> Filename of the input file</span>\n<span class=\"sd\"> cols : string or list of strings</span>\n<span class=\"sd\"> Columns to be read into arrays</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> outarr : array</span>\n<span class=\"sd\"> Numpy array or arrays of columns from the table</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">if</span> <span class=\"n\">infile</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">,</span><span class=\"s2\">&quot;None&quot;</span><span class=\"p\">,</span><span class=\"s2\">&quot;INDEF&quot;</span><span class=\"p\">]:</span>\n <span class=\"k\">return</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"n\">infile</span><span class=\"o\">.</span><span class=\"n\">endswith</span><span class=\"p\">(</span><span class=\"s1\">&#39;.fits&#39;</span><span class=\"p\">):</span>\n <span class=\"n\">outarr</span> <span class=\"o\">=</span> <span class=\"n\">read_FITS_cols</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span><span class=\"n\">cols</span><span class=\"o\">=</span><span class=\"n\">cols</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">outarr</span> <span class=\"o\">=</span> <span class=\"n\">read_ASCII_cols</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span><span class=\"n\">cols</span><span class=\"o\">=</span><span class=\"n\">cols</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">outarr</span></div>\n\n<div class=\"viewcode-block\" id=\"read_FITS_cols\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.read_FITS_cols\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">read_FITS_cols</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span><span class=\"n\">cols</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Read columns from FITS table</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">ftab</span> <span class=\"o\">=</span> <span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">open</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">extnum</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">extfound</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n <span class=\"k\">for</span> <span class=\"n\">extn</span> <span class=\"ow\">in</span> <span class=\"n\">ftab</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;tfields&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">extn</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">:</span>\n <span class=\"n\">extfound</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n <span class=\"k\">break</span>\n <span class=\"n\">extnum</span> <span class=\"o\">+=</span> <span class=\"mi\">1</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">extfound</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;ERROR: No catalog table found in &#39;</span><span class=\"p\">,</span><span class=\"n\">infile</span><span class=\"p\">)</span>\n <span class=\"n\">ftab</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span>\n <span class=\"c1\"># Now, read columns from the table in this extension</span>\n <span class=\"c1\"># if no column names were provided by user, simply read in all columns from table</span>\n <span class=\"k\">if</span> <span class=\"n\">cols</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;INDEF&#39;</span><span class=\"p\">]:</span>\n <span class=\"n\">cols</span> <span class=\"o\">=</span> <span class=\"n\">ftab</span><span class=\"p\">[</span><span class=\"n\">extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">names</span>\n <span class=\"c1\"># Define the output</span>\n <span class=\"n\">outarr</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">c</span> <span class=\"ow\">in</span> <span class=\"n\">cols</span><span class=\"p\">:</span>\n <span class=\"n\">outarr</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">ftab</span><span class=\"p\">[</span><span class=\"n\">extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">field</span><span class=\"p\">(</span><span class=\"n\">c</span><span class=\"p\">))</span>\n\n <span class=\"n\">ftab</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"n\">outarr</span></div>\n\n<div class=\"viewcode-block\" id=\"read_ASCII_cols\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.read_ASCII_cols\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">read_ASCII_cols</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span><span class=\"n\">cols</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">2</span><span class=\"p\">,</span><span class=\"mi\">3</span><span class=\"p\">]):</span>\n <span class=\"sd\">&quot;&quot;&quot; Interpret input ASCII file to return arrays for specified columns.</span>\n\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> -----</span>\n<span class=\"sd\"> The specification of the columns should be expected to have lists for</span>\n<span class=\"sd\"> each &#39;column&#39;, with all columns in each list combined into a single entry.</span>\n<span class=\"sd\"> For example::</span>\n\n<span class=\"sd\"> cols = [&#39;1,2,3&#39;,&#39;4,5,6&#39;,7]</span>\n\n<span class=\"sd\"> where &#39;1,2,3&#39; represent the X/RA values, &#39;4,5,6&#39; represent the Y/Dec values</span>\n<span class=\"sd\"> and 7 represents the flux value for a total of 3 requested columns of data</span>\n<span class=\"sd\"> to be returned.</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> outarr : list of arrays</span>\n<span class=\"sd\"> The return value will be a list of numpy arrays, one for each &#39;column&#39;.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\"># build dictionary representing format of each row</span>\n <span class=\"c1\"># Format of dictionary: {&#39;colname&#39;:col_number,...}</span>\n <span class=\"c1\"># This provides the mapping between column name and column number</span>\n <span class=\"n\">coldict</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"n\">fin</span> <span class=\"o\">=</span> <span class=\"nb\">open</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span><span class=\"s1\">&#39;r&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">flines</span> <span class=\"o\">=</span> <span class=\"n\">fin</span><span class=\"o\">.</span><span class=\"n\">readlines</span><span class=\"p\">()</span>\n <span class=\"n\">fin</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n\n <span class=\"k\">for</span> <span class=\"n\">l</span> <span class=\"ow\">in</span> <span class=\"n\">flines</span><span class=\"p\">:</span> <span class=\"c1\"># interpret each line from catalog file</span>\n <span class=\"k\">if</span> <span class=\"n\">l</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">lstrip</span><span class=\"p\">()</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;#&#39;</span> <span class=\"ow\">or</span> <span class=\"n\">l</span><span class=\"o\">.</span><span class=\"n\">lstrip</span><span class=\"p\">()</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">continue</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># convert first row of data into column definitions using indices</span>\n <span class=\"n\">numcols</span> <span class=\"o\">=</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">l</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">())</span>\n <span class=\"n\">colnames</span> <span class=\"o\">=</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"n\">numcols</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">name</span> <span class=\"ow\">in</span> <span class=\"n\">colnames</span><span class=\"p\">:</span>\n <span class=\"n\">coldict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">name</span><span class=\"p\">)]</span> <span class=\"o\">=</span> <span class=\"n\">name</span><span class=\"o\">-</span><span class=\"mi\">1</span>\n <span class=\"k\">break</span>\n <span class=\"n\">numcols</span> <span class=\"o\">=</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">cols</span><span class=\"p\">)</span>\n <span class=\"n\">outarr</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">col</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"n\">numcols</span><span class=\"p\">):</span>\n <span class=\"n\">outarr</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">([])</span>\n <span class=\"n\">convert_radec</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n\n <span class=\"c1\"># Now, map specified columns to columns in file and populate output arrays</span>\n <span class=\"c1\"># Open catalog file</span>\n <span class=\"n\">fin</span> <span class=\"o\">=</span> <span class=\"nb\">open</span><span class=\"p\">(</span><span class=\"n\">infile</span><span class=\"p\">,</span><span class=\"s1\">&#39;r&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">l</span> <span class=\"ow\">in</span> <span class=\"n\">fin</span><span class=\"o\">.</span><span class=\"n\">readlines</span><span class=\"p\">():</span> <span class=\"c1\"># interpret each line from catalog file</span>\n <span class=\"k\">if</span> <span class=\"n\">l</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;#&#39;</span> <span class=\"ow\">or</span> <span class=\"n\">l</span><span class=\"o\">.</span><span class=\"n\">lstrip</span><span class=\"p\">()</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">continue</span>\n <span class=\"n\">l</span> <span class=\"o\">=</span> <span class=\"n\">l</span><span class=\"o\">.</span><span class=\"n\">strip</span><span class=\"p\">()</span>\n <span class=\"c1\"># skip blank lines, comment lines, or lines with</span>\n <span class=\"c1\"># fewer columns than requested by user</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">l</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"mi\">0</span> <span class=\"ow\">or</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">l</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">())</span> <span class=\"o\">&lt;</span> <span class=\"n\">numcols</span> <span class=\"ow\">or</span> <span class=\"p\">(</span>\n <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">l</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"mi\">0</span> <span class=\"ow\">and</span> <span class=\"p\">(</span><span class=\"n\">l</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;#&#39;</span> <span class=\"ow\">or</span> <span class=\"s2\">&quot;INDEF&quot;</span> <span class=\"ow\">in</span> <span class=\"n\">l</span><span class=\"p\">)</span>\n <span class=\"p\">):</span> <span class=\"k\">continue</span>\n <span class=\"n\">lspl</span> <span class=\"o\">=</span> <span class=\"n\">l</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">()</span>\n <span class=\"n\">nsplit</span> <span class=\"o\">=</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">lspl</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># For each &#39;column&#39; requested by user, pull data from row</span>\n <span class=\"k\">for</span> <span class=\"n\">c</span><span class=\"p\">,</span><span class=\"n\">i</span> <span class=\"ow\">in</span> <span class=\"nb\">zip</span><span class=\"p\">(</span><span class=\"n\">cols</span><span class=\"p\">,</span><span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"n\">numcols</span><span class=\"p\">))):</span>\n <span class=\"n\">cnames</span> <span class=\"o\">=</span> <span class=\"n\">parse_colname</span><span class=\"p\">(</span><span class=\"n\">c</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">cnames</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"c1\"># interpret multi-column specification as one value</span>\n <span class=\"n\">outval</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"k\">for</span> <span class=\"n\">cn</span> <span class=\"ow\">in</span> <span class=\"n\">cnames</span><span class=\"p\">:</span>\n <span class=\"n\">cnum</span> <span class=\"o\">=</span> <span class=\"n\">coldict</span><span class=\"p\">[</span><span class=\"n\">cn</span><span class=\"p\">]</span>\n <span class=\"n\">cval</span> <span class=\"o\">=</span> <span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"n\">cnum</span><span class=\"p\">]</span>\n <span class=\"n\">outval</span> <span class=\"o\">+=</span> <span class=\"n\">cval</span><span class=\"o\">+</span><span class=\"s1\">&#39; &#39;</span>\n <span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">outval</span><span class=\"p\">)</span>\n <span class=\"n\">convert_radec</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\">#pull single value from row for this column</span>\n <span class=\"n\">cnum</span> <span class=\"o\">=</span> <span class=\"n\">coldict</span><span class=\"p\">[</span><span class=\"n\">cnames</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]]</span>\n <span class=\"k\">if</span> <span class=\"n\">isfloat</span><span class=\"p\">(</span><span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"n\">cnum</span><span class=\"p\">]):</span>\n <span class=\"n\">cval</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"n\">cnum</span><span class=\"p\">])</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">cval</span> <span class=\"o\">=</span> <span class=\"n\">lspl</span><span class=\"p\">[</span><span class=\"n\">cnum</span><span class=\"p\">]</span>\n <span class=\"c1\"># Check for multi-column values given as &quot;nn:nn:nn.s&quot;</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;:&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">cval</span><span class=\"p\">:</span>\n <span class=\"n\">cval</span> <span class=\"o\">=</span> <span class=\"n\">cval</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;:&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39; &#39;</span><span class=\"p\">)</span>\n <span class=\"n\">convert_radec</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n <span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">cval</span><span class=\"p\">)</span>\n\n <span class=\"n\">fin</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"c1\"># convert multi-column RA/Dec specifications</span>\n <span class=\"k\">if</span> <span class=\"n\">convert_radec</span><span class=\"p\">:</span>\n <span class=\"n\">outra</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"n\">outdec</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">ra</span><span class=\"p\">,</span><span class=\"n\">dec</span> <span class=\"ow\">in</span> <span class=\"nb\">zip</span><span class=\"p\">(</span><span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]):</span>\n <span class=\"n\">radd</span><span class=\"p\">,</span><span class=\"n\">decdd</span> <span class=\"o\">=</span> <span class=\"n\">radec_hmstodd</span><span class=\"p\">(</span><span class=\"n\">ra</span><span class=\"p\">,</span><span class=\"n\">dec</span><span class=\"p\">)</span>\n <span class=\"n\">outra</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">radd</span><span class=\"p\">)</span>\n <span class=\"n\">outdec</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">decdd</span><span class=\"p\">)</span>\n <span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">outra</span>\n <span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">outdec</span>\n\n <span class=\"c1\"># convert all lists to numpy arrays</span>\n <span class=\"k\">for</span> <span class=\"n\">c</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">outarr</span><span class=\"p\">)):</span>\n <span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"n\">c</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">(</span><span class=\"n\">outarr</span><span class=\"p\">[</span><span class=\"n\">c</span><span class=\"p\">])</span>\n <span class=\"k\">return</span> <span class=\"n\">outarr</span></div>\n\n<div class=\"viewcode-block\" id=\"write_shiftfile\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.write_shiftfile\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">write_shiftfile</span><span class=\"p\">(</span><span class=\"n\">image_list</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">,</span><span class=\"n\">outwcs</span><span class=\"o\">=</span><span class=\"s1\">&#39;tweak_wcs.fits&#39;</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Write out a shiftfile for a given list of input Image class objects</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">rows</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"n\">nrows</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">for</span> <span class=\"n\">img</span> <span class=\"ow\">in</span> <span class=\"n\">image_list</span><span class=\"p\">:</span>\n <span class=\"n\">row</span> <span class=\"o\">=</span> <span class=\"n\">img</span><span class=\"o\">.</span><span class=\"n\">get_shiftfile_row</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">row</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">rows</span> <span class=\"o\">+=</span> <span class=\"n\">row</span>\n <span class=\"n\">nrows</span> <span class=\"o\">+=</span> <span class=\"mi\">1</span>\n <span class=\"k\">if</span> <span class=\"n\">nrows</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"c1\"># If there are no fits to report, do not write out a file</span>\n <span class=\"k\">return</span>\n\n <span class=\"c1\"># write out reference WCS now</span>\n <span class=\"k\">if</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">exists</span><span class=\"p\">(</span><span class=\"n\">outwcs</span><span class=\"p\">):</span>\n <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">remove</span><span class=\"p\">(</span><span class=\"n\">outwcs</span><span class=\"p\">)</span>\n <span class=\"n\">p</span> <span class=\"o\">=</span> <span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">HDUList</span><span class=\"p\">()</span>\n <span class=\"n\">p</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">PrimaryHDU</span><span class=\"p\">())</span>\n <span class=\"n\">p</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">createWcsHDU</span><span class=\"p\">(</span><span class=\"n\">image_list</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">refWCS</span><span class=\"p\">))</span>\n <span class=\"n\">p</span><span class=\"o\">.</span><span class=\"n\">writeto</span><span class=\"p\">(</span><span class=\"n\">outwcs</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># Write out shiftfile to go with reference WCS</span>\n <span class=\"k\">if</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">exists</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">):</span>\n <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">remove</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">)</span>\n <span class=\"n\">f</span> <span class=\"o\">=</span> <span class=\"nb\">open</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">,</span><span class=\"s1\">&#39;w&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">write</span><span class=\"p\">(</span><span class=\"s1\">&#39;# frame: output</span><span class=\"se\">\\n</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">write</span><span class=\"p\">(</span><span class=\"s1\">&#39;# refimage: </span><span class=\"si\">%s</span><span class=\"s1\">[wcs]</span><span class=\"se\">\\n</span><span class=\"s1\">&#39;</span><span class=\"o\">%</span><span class=\"n\">outwcs</span><span class=\"p\">)</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">write</span><span class=\"p\">(</span><span class=\"s1\">&#39;# form: delta</span><span class=\"se\">\\n</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">write</span><span class=\"p\">(</span><span class=\"s1\">&#39;# units: pixels</span><span class=\"se\">\\n</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">write</span><span class=\"p\">(</span><span class=\"n\">rows</span><span class=\"p\">)</span>\n <span class=\"n\">f</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;Writing out shiftfile :&#39;</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">)</span></div>\n\n<div class=\"viewcode-block\" id=\"createWcsHDU\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.createWcsHDU\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">createWcsHDU</span><span class=\"p\">(</span><span class=\"n\">wcs</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Generate a WCS header object that can be used to populate a reference WCS HDU.</span>\n\n<span class=\"sd\"> For most applications, stwcs.wcsutil.HSTWCS.wcs2header()</span>\n<span class=\"sd\"> will work just as well.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">header</span> <span class=\"o\">=</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">to_header</span><span class=\"p\">()</span>\n\n <span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;EXTNAME&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;WCS&#39;</span>\n <span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;EXTVER&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n\n <span class=\"c1\"># Now, update original image size information</span>\n <span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;NPIX1&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">_naxis1</span><span class=\"p\">,</span> <span class=\"s2\">&quot;Length of array axis 1&quot;</span><span class=\"p\">)</span>\n <span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;NPIX2&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">_naxis2</span><span class=\"p\">,</span> <span class=\"s2\">&quot;Length of array axis 2&quot;</span><span class=\"p\">)</span>\n <span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;PIXVALUE&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"mf\">0.0</span><span class=\"p\">,</span> <span class=\"s2\">&quot;values of pixels in array&quot;</span><span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"nb\">hasattr</span><span class=\"p\">(</span><span class=\"n\">wcs</span><span class=\"p\">,</span> <span class=\"s1\">&#39;orientat&#39;</span><span class=\"p\">):</span>\n <span class=\"n\">orientat</span> <span class=\"o\">=</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">orientat</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># find orientat from CD or PC matrix</span>\n <span class=\"k\">if</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">has_cd</span><span class=\"p\">():</span>\n <span class=\"n\">cd12</span> <span class=\"o\">=</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">cd</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">][</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"n\">cd22</span> <span class=\"o\">=</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">cd</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">][</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"k\">elif</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">has_pc</span><span class=\"p\">():</span>\n <span class=\"n\">cd12</span> <span class=\"o\">=</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">cdelt</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">*</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">pc</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">][</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"n\">cd22</span> <span class=\"o\">=</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">cdelt</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">*</span> <span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">pc</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">][</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span><span class=\"p\">(</span><span class=\"s2\">&quot;Invalid WCS: WCS does not contain neither &quot;</span>\n <span class=\"s2\">&quot;a CD nor a PC matrix.&quot;</span><span class=\"p\">)</span>\n <span class=\"n\">orientat</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">rad2deg</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arctan2</span><span class=\"p\">(</span><span class=\"n\">cd12</span><span class=\"p\">,</span><span class=\"n\">cd22</span><span class=\"p\">))</span>\n <span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;ORIENTAT&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">orientat</span><span class=\"p\">,</span> <span class=\"s2\">&quot;position angle of &quot;</span>\n <span class=\"s2\">&quot;image y axis (deg. e of n)&quot;</span><span class=\"p\">)</span>\n\n <span class=\"k\">return</span> <span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">ImageHDU</span><span class=\"p\">(</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">header</span><span class=\"p\">)</span></div>\n\n<span class=\"c1\">#</span>\n<span class=\"c1\"># Code used for testing source finding algorithms</span>\n<span class=\"c1\">#</span>\n<span class=\"k\">def</span> <span class=\"nf\">idlgauss_convolve</span><span class=\"p\">(</span><span class=\"n\">image</span><span class=\"p\">,</span><span class=\"n\">fwhm</span><span class=\"p\">):</span>\n <span class=\"n\">sigmatofwhm</span> <span class=\"o\">=</span> <span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sqrt</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">log</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"p\">))</span>\n <span class=\"n\">radius</span> <span class=\"o\">=</span> <span class=\"mf\">1.5</span> <span class=\"o\">*</span> <span class=\"n\">fwhm</span> <span class=\"o\">/</span> <span class=\"n\">sigmatofwhm</span> <span class=\"c1\"># Radius is 1.5 sigma</span>\n <span class=\"k\">if</span> <span class=\"n\">radius</span> <span class=\"o\">&lt;</span> <span class=\"mf\">1.0</span><span class=\"p\">:</span>\n <span class=\"n\">radius</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span>\n <span class=\"n\">fwhm</span> <span class=\"o\">=</span> <span class=\"n\">sigmatofwhm</span><span class=\"o\">/</span><span class=\"mf\">1.5</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span> <span class=\"s2\">&quot;WARNING!!! Radius of convolution box smaller than one.&quot;</span> <span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span> <span class=\"s2\">&quot;Setting the &#39;fwhm&#39; to minimum value, </span><span class=\"si\">%f</span><span class=\"s2\">.&quot;</span> <span class=\"o\">%</span><span class=\"n\">fwhm</span> <span class=\"p\">)</span>\n <span class=\"n\">sigsq</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">fwhm</span><span class=\"o\">/</span><span class=\"n\">sigmatofwhm</span><span class=\"p\">)</span><span class=\"o\">**</span><span class=\"mi\">2</span> <span class=\"c1\"># sigma squared</span>\n <span class=\"n\">nhalf</span> <span class=\"o\">=</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">radius</span><span class=\"p\">)</span> <span class=\"c1\"># Center of the kernel</span>\n <span class=\"n\">nbox</span> <span class=\"o\">=</span> <span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">nhalf</span><span class=\"o\">+</span><span class=\"mi\">1</span> <span class=\"c1\"># Number of pixels inside of convolution box</span>\n <span class=\"n\">middle</span> <span class=\"o\">=</span> <span class=\"n\">nhalf</span> <span class=\"c1\"># Index of central pixel</span>\n\n <span class=\"n\">kern_y</span><span class=\"p\">,</span> <span class=\"n\">kern_x</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ix_</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"n\">nbox</span><span class=\"p\">),</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"n\">nbox</span><span class=\"p\">))</span> <span class=\"c1\"># x,y coordinates of the kernel</span>\n <span class=\"n\">g</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">kern_x</span><span class=\"o\">-</span><span class=\"n\">nhalf</span><span class=\"p\">)</span><span class=\"o\">**</span><span class=\"mi\">2</span><span class=\"o\">+</span><span class=\"p\">(</span><span class=\"n\">kern_y</span><span class=\"o\">-</span><span class=\"n\">nhalf</span><span class=\"p\">)</span><span class=\"o\">**</span><span class=\"mi\">2</span> <span class=\"c1\"># Compute the square of the distance to the center</span>\n <span class=\"n\">mask</span> <span class=\"o\">=</span> <span class=\"n\">g</span> <span class=\"o\">&lt;=</span> <span class=\"n\">radius</span><span class=\"o\">**</span><span class=\"mi\">2</span> <span class=\"c1\"># We make a mask to select the inner circle of radius &quot;radius&quot;</span>\n <span class=\"n\">nmask</span> <span class=\"o\">=</span> <span class=\"n\">mask</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">()</span> <span class=\"c1\"># The number of pixels in the mask within the inner circle.</span>\n <span class=\"n\">g</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">exp</span><span class=\"p\">(</span><span class=\"o\">-</span><span class=\"mf\">0.5</span><span class=\"o\">*</span><span class=\"n\">g</span><span class=\"o\">/</span><span class=\"n\">sigsq</span><span class=\"p\">)</span> <span class=\"c1\"># We make the 2D gaussian profile</span>\n\n <span class=\"c1\">###</span>\n <span class=\"c1\"># Convolving the image with a kernel representing a gaussian (which is assumed to be the psf)</span>\n <span class=\"c1\">###</span>\n <span class=\"n\">c</span> <span class=\"o\">=</span> <span class=\"n\">g</span><span class=\"o\">*</span><span class=\"n\">mask</span> <span class=\"c1\"># For the kernel, values further than &quot;radius&quot; are equal to zero</span>\n <span class=\"n\">c</span><span class=\"p\">[</span><span class=\"n\">mask</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">c</span><span class=\"p\">[</span><span class=\"n\">mask</span><span class=\"p\">]</span> <span class=\"o\">-</span> <span class=\"n\">c</span><span class=\"p\">[</span><span class=\"n\">mask</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">mean</span><span class=\"p\">())</span><span class=\"o\">/</span><span class=\"p\">(</span><span class=\"n\">c</span><span class=\"p\">[</span><span class=\"n\">mask</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">var</span><span class=\"p\">()</span> <span class=\"o\">*</span> <span class=\"n\">nmask</span><span class=\"p\">)</span> <span class=\"c1\"># We normalize the gaussian kernel</span>\n\n <span class=\"n\">c1</span> <span class=\"o\">=</span> <span class=\"n\">g</span><span class=\"p\">[</span><span class=\"n\">nhalf</span><span class=\"p\">]</span> <span class=\"c1\"># c1 will be used to the test the roundness</span>\n <span class=\"n\">sumc1</span> <span class=\"o\">=</span> <span class=\"n\">c1</span><span class=\"o\">.</span><span class=\"n\">mean</span><span class=\"p\">()</span>\n <span class=\"n\">sumc1sq</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">c1</span><span class=\"o\">**</span><span class=\"mi\">2</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">()</span> <span class=\"o\">-</span> <span class=\"n\">sumc1</span>\n <span class=\"n\">c1</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">c1</span><span class=\"o\">-</span><span class=\"n\">c1</span><span class=\"o\">.</span><span class=\"n\">mean</span><span class=\"p\">())</span><span class=\"o\">/</span><span class=\"p\">((</span><span class=\"n\">c1</span><span class=\"o\">**</span><span class=\"mi\">2</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">()</span> <span class=\"o\">-</span> <span class=\"n\">c1</span><span class=\"o\">.</span><span class=\"n\">mean</span><span class=\"p\">())</span>\n\n <span class=\"n\">h</span> <span class=\"o\">=</span> <span class=\"n\">ndimage</span><span class=\"o\">.</span><span class=\"n\">convolve</span><span class=\"p\">(</span><span class=\"n\">image</span><span class=\"p\">,</span><span class=\"n\">c</span><span class=\"p\">,</span><span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;constant&#39;</span><span class=\"p\">,</span><span class=\"n\">cval</span><span class=\"o\">=</span><span class=\"mf\">0.0</span><span class=\"p\">)</span> <span class=\"c1\"># Convolve image with kernel &quot;c&quot;</span>\n <span class=\"n\">h</span><span class=\"p\">[:</span><span class=\"n\">nhalf</span><span class=\"p\">,:]</span> <span class=\"o\">=</span> <span class=\"mi\">0</span> <span class=\"c1\"># Set the sides to zero in order to avoid border effects</span>\n <span class=\"n\">h</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"n\">nhalf</span><span class=\"p\">:,:]</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">h</span><span class=\"p\">[:,:</span><span class=\"n\">nhalf</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">h</span><span class=\"p\">[:,</span><span class=\"o\">-</span><span class=\"n\">nhalf</span><span class=\"p\">:]</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n\n <span class=\"k\">return</span> <span class=\"n\">h</span><span class=\"p\">,</span><span class=\"n\">c1</span>\n\n<div class=\"viewcode-block\" id=\"gauss_array\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.gauss_array\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">gauss_array</span><span class=\"p\">(</span><span class=\"n\">nx</span><span class=\"p\">,</span> <span class=\"n\">ny</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">fwhm</span><span class=\"o\">=</span><span class=\"mf\">1.0</span><span class=\"p\">,</span> <span class=\"n\">sigma_x</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">sigma_y</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">zero_norm</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Computes the 2D Gaussian with size nx*ny.</span>\n\n<span class=\"sd\"> Parameters</span>\n<span class=\"sd\"> ----------</span>\n<span class=\"sd\"> nx : int</span>\n<span class=\"sd\"> ny : int [Default: None]</span>\n<span class=\"sd\"> Size of output array for the generated Gaussian. If ny == None,</span>\n<span class=\"sd\"> output will be an array nx X nx pixels.</span>\n\n<span class=\"sd\"> fwhm : float [Default: 1.0]</span>\n<span class=\"sd\"> Full-width, half-maximum of the Gaussian to be generated</span>\n\n<span class=\"sd\"> sigma_x : float [Default: None]</span>\n<span class=\"sd\"> sigma_y : float [Default: None]</span>\n<span class=\"sd\"> Sigma_x and sigma_y are the stddev of the Gaussian functions.</span>\n\n<span class=\"sd\"> zero_norm : bool [Default: False]</span>\n<span class=\"sd\"> The kernel will be normalized to a sum of 1 when True.</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> gauss_arr : array</span>\n<span class=\"sd\"> A numpy array with the generated gaussian function</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"k\">if</span> <span class=\"n\">ny</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">ny</span> <span class=\"o\">=</span> <span class=\"n\">nx</span>\n\n <span class=\"k\">if</span> <span class=\"n\">sigma_x</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">fwhm</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;A value for either &quot;fwhm&quot; or &quot;sigma_x&quot; needs to be specified!&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># Convert input FWHM into sigma</span>\n <span class=\"n\">sigma_x</span> <span class=\"o\">=</span> <span class=\"n\">fwhm</span><span class=\"o\">/</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sqrt</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">log</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"p\">)))</span>\n <span class=\"k\">if</span> <span class=\"n\">sigma_y</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"n\">sigma_y</span> <span class=\"o\">=</span> <span class=\"n\">sigma_x</span>\n\n <span class=\"n\">xradius</span> <span class=\"o\">=</span> <span class=\"n\">nx</span><span class=\"o\">//</span><span class=\"mi\">2</span>\n <span class=\"n\">yradius</span> <span class=\"o\">=</span> <span class=\"n\">ny</span><span class=\"o\">//</span><span class=\"mi\">2</span>\n\n <span class=\"c1\"># Create grids of distance from center in X and Y</span>\n <span class=\"n\">xarr</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">abs</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"o\">-</span><span class=\"n\">xradius</span><span class=\"p\">,</span><span class=\"n\">xradius</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">))</span>\n <span class=\"n\">yarr</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">abs</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"o\">-</span><span class=\"n\">yradius</span><span class=\"p\">,</span><span class=\"n\">yradius</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">))</span>\n <span class=\"n\">hnx</span> <span class=\"o\">=</span> <span class=\"n\">gauss</span><span class=\"p\">(</span><span class=\"n\">xarr</span><span class=\"p\">,</span><span class=\"n\">sigma_x</span><span class=\"p\">)</span>\n <span class=\"n\">hny</span> <span class=\"o\">=</span> <span class=\"n\">gauss</span><span class=\"p\">(</span><span class=\"n\">yarr</span><span class=\"p\">,</span><span class=\"n\">sigma_y</span><span class=\"p\">)</span>\n <span class=\"n\">hny</span> <span class=\"o\">=</span> <span class=\"n\">hny</span><span class=\"o\">.</span><span class=\"n\">reshape</span><span class=\"p\">((</span><span class=\"n\">ny</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">))</span>\n <span class=\"n\">h</span> <span class=\"o\">=</span> <span class=\"n\">hnx</span><span class=\"o\">*</span><span class=\"n\">hny</span>\n\n <span class=\"c1\"># Normalize gaussian kernel to a sum of 1</span>\n <span class=\"n\">h</span> <span class=\"o\">=</span> <span class=\"n\">h</span> <span class=\"o\">/</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">abs</span><span class=\"p\">(</span><span class=\"n\">h</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">zero_norm</span><span class=\"p\">:</span>\n <span class=\"n\">h</span> <span class=\"o\">-=</span> <span class=\"n\">h</span><span class=\"o\">.</span><span class=\"n\">mean</span><span class=\"p\">()</span>\n\n <span class=\"k\">return</span> <span class=\"n\">h</span></div>\n\n<div class=\"viewcode-block\" id=\"gauss\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.gauss\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">gauss</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">,</span><span class=\"n\">sigma</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Compute 1-D value of gaussian at position x relative to center.&quot;&quot;&quot;</span>\n <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">exp</span><span class=\"p\">(</span><span class=\"o\">-</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">power</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">,</span><span class=\"mi\">2</span><span class=\"p\">)</span><span class=\"o\">/</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">power</span><span class=\"p\">(</span><span class=\"n\">sigma</span><span class=\"p\">,</span><span class=\"mi\">2</span><span class=\"p\">)))</span> <span class=\"o\">/</span> <span class=\"p\">(</span><span class=\"n\">sigma</span><span class=\"o\">*</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sqrt</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"o\">*</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">pi</span><span class=\"p\">))</span></div>\n\n\n<span class=\"c1\">#### Plotting Utilities for drizzlepac</span>\n<div class=\"viewcode-block\" id=\"make_vector_plot\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.make_vector_plot\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">make_vector_plot</span><span class=\"p\">(</span><span class=\"n\">coordfile</span><span class=\"p\">,</span><span class=\"n\">columns</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">2</span><span class=\"p\">,</span><span class=\"mi\">3</span><span class=\"p\">,</span><span class=\"mi\">4</span><span class=\"p\">],</span><span class=\"n\">data</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">figure_id</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span>\n <span class=\"n\">title</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">axes</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">every</span><span class=\"o\">=</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"n\">labelsize</span><span class=\"o\">=</span><span class=\"mi\">8</span><span class=\"p\">,</span> <span class=\"n\">ylimit</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span>\n <span class=\"n\">limit</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">xlower</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">ylower</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">output</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">headl</span><span class=\"o\">=</span><span class=\"mi\">4</span><span class=\"p\">,</span><span class=\"n\">headw</span><span class=\"o\">=</span><span class=\"mi\">3</span><span class=\"p\">,</span>\n <span class=\"n\">xsh</span><span class=\"o\">=</span><span class=\"mf\">0.0</span><span class=\"p\">,</span><span class=\"n\">ysh</span><span class=\"o\">=</span><span class=\"mf\">0.0</span><span class=\"p\">,</span><span class=\"n\">fit</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">scale</span><span class=\"o\">=</span><span class=\"mf\">1.0</span><span class=\"p\">,</span><span class=\"n\">vector</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">,</span><span class=\"n\">textscale</span><span class=\"o\">=</span><span class=\"mi\">5</span><span class=\"p\">,</span>\n <span class=\"n\">append</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span><span class=\"n\">linfit</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span><span class=\"n\">rms</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">,</span> <span class=\"n\">plotname</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Convert a XYXYMATCH file into a vector plot or set of residuals plots.</span>\n\n<span class=\"sd\"> This function provides a single interface for generating either a vector</span>\n<span class=\"sd\"> plot of residuals or a set of 4 plots showing residuals. The data being</span>\n<span class=\"sd\"> plotted can also be adjusted for a linear fit on-the-fly.</span>\n\n<span class=\"sd\"> Parameters</span>\n<span class=\"sd\"> ----------</span>\n<span class=\"sd\"> coordfile : string</span>\n<span class=\"sd\"> Name of file with matched sets of coordinates. This input file can</span>\n<span class=\"sd\"> be a file compatible for use with IRAF&#39;s geomap.</span>\n<span class=\"sd\"> columns : list [Default: [0,1,2,3]]</span>\n<span class=\"sd\"> Column numbers for the X,Y positions from each image</span>\n<span class=\"sd\"> data : list of arrays</span>\n<span class=\"sd\"> If specified, this can be used to input matched data directly</span>\n<span class=\"sd\"> title : string</span>\n<span class=\"sd\"> Title to be used for the generated plot</span>\n<span class=\"sd\"> axes : list</span>\n<span class=\"sd\"> List of X and Y min/max values to customize the plot axes</span>\n<span class=\"sd\"> every : int [Default: 1]</span>\n<span class=\"sd\"> Slice value for the data to be plotted</span>\n<span class=\"sd\"> limit : float</span>\n<span class=\"sd\"> Radial offset limit for selecting which sources are included in the plot</span>\n<span class=\"sd\"> labelsize : int [Default: 8] or str</span>\n<span class=\"sd\"> Font size to use for tick labels, either in font points or as a string</span>\n<span class=\"sd\"> understood by tick_params().</span>\n<span class=\"sd\"> ylimit : float</span>\n<span class=\"sd\"> Limit to use for Y range of plots.</span>\n<span class=\"sd\"> xlower : float</span>\n<span class=\"sd\"> ylower : float</span>\n<span class=\"sd\"> Limit in X and/or Y offset for selecting which sources are included in the plot</span>\n<span class=\"sd\"> output : string</span>\n<span class=\"sd\"> Filename of output file for generated plot</span>\n<span class=\"sd\"> headl : int [Default: 4]</span>\n<span class=\"sd\"> Length of arrow head to be used in vector plot</span>\n<span class=\"sd\"> headw : int [Default: 3]</span>\n<span class=\"sd\"> Width of arrow head to be used in vector plot</span>\n<span class=\"sd\"> xsh : float</span>\n<span class=\"sd\"> ysh : float</span>\n<span class=\"sd\"> Shift in X and Y from linear fit to be applied to source positions</span>\n<span class=\"sd\"> from the first image</span>\n<span class=\"sd\"> scale : float</span>\n<span class=\"sd\"> Scale from linear fit to be applied to source positions from the</span>\n<span class=\"sd\"> first image</span>\n<span class=\"sd\"> fit : array</span>\n<span class=\"sd\"> Array of linear coefficients for rotation (and scale?) in X and Y from</span>\n<span class=\"sd\"> a linear fit to be applied to source positions from the first image</span>\n<span class=\"sd\"> vector : bool [Default: True]</span>\n<span class=\"sd\"> Specifies whether or not to generate a vector plot. If False, task</span>\n<span class=\"sd\"> will generate a set of 4 residuals plots instead</span>\n<span class=\"sd\"> textscale : int [Default: 5]</span>\n<span class=\"sd\"> Scale factor for text used for labelling the generated plot</span>\n<span class=\"sd\"> append : bool [Default: False]</span>\n<span class=\"sd\"> If True, will overplot new plot on any pre-existing plot</span>\n<span class=\"sd\"> linfit : bool [Default: False]</span>\n<span class=\"sd\"> If True, a linear fit to the residuals will be generated and</span>\n<span class=\"sd\"> added to the generated residuals plots</span>\n<span class=\"sd\"> rms : bool [Default: True]</span>\n<span class=\"sd\"> Specifies whether or not to report the RMS of the residuals as a</span>\n<span class=\"sd\"> label on the generated plot(s).</span>\n<span class=\"sd\"> plotname : str [Default: None]</span>\n<span class=\"sd\"> Write out plot to a file with this name if specified.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"kn\">from</span> <span class=\"nn\">matplotlib</span> <span class=\"k\">import</span> <span class=\"n\">pyplot</span> <span class=\"k\">as</span> <span class=\"n\">plt</span>\n\n <span class=\"k\">if</span> <span class=\"n\">data</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">readcols</span><span class=\"p\">(</span><span class=\"n\">coordfile</span><span class=\"p\">,</span><span class=\"n\">cols</span><span class=\"o\">=</span><span class=\"n\">columns</span><span class=\"p\">)</span>\n\n <span class=\"n\">xy1x</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">xy1y</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"n\">xy2x</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">]</span>\n <span class=\"n\">xy2y</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"mi\">3</span><span class=\"p\">]</span>\n\n <span class=\"n\">numpts</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">fit</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">xy1x</span><span class=\"p\">,</span><span class=\"n\">xy1y</span> <span class=\"o\">=</span> <span class=\"n\">apply_db_fit</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">,</span><span class=\"n\">fit</span><span class=\"p\">,</span><span class=\"n\">xsh</span><span class=\"o\">=</span><span class=\"n\">xsh</span><span class=\"p\">,</span><span class=\"n\">ysh</span><span class=\"o\">=</span><span class=\"n\">ysh</span><span class=\"p\">)</span>\n <span class=\"n\">fitstr</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;-Fit applied&#39;</span>\n <span class=\"n\">dx</span> <span class=\"o\">=</span> <span class=\"n\">xy2x</span> <span class=\"o\">-</span> <span class=\"n\">xy1x</span>\n <span class=\"n\">dy</span> <span class=\"o\">=</span> <span class=\"n\">xy2y</span> <span class=\"o\">-</span> <span class=\"n\">xy1y</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">dx</span> <span class=\"o\">=</span> <span class=\"n\">xy2x</span> <span class=\"o\">-</span> <span class=\"n\">xy1x</span> <span class=\"o\">-</span> <span class=\"n\">xsh</span>\n <span class=\"n\">dy</span> <span class=\"o\">=</span> <span class=\"n\">xy2y</span> <span class=\"o\">-</span> <span class=\"n\">xy1y</span> <span class=\"o\">-</span> <span class=\"n\">ysh</span>\n <span class=\"c1\"># apply scaling factor to deltas</span>\n <span class=\"n\">dx</span> <span class=\"o\">*=</span> <span class=\"n\">scale</span>\n <span class=\"n\">dy</span> <span class=\"o\">*=</span> <span class=\"n\">scale</span>\n\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;Total # points: &#39;</span><span class=\"p\">,</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">dx</span><span class=\"p\">))</span>\n <span class=\"k\">if</span> <span class=\"n\">limit</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">indx</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sqrt</span><span class=\"p\">(</span><span class=\"n\">dx</span><span class=\"o\">**</span><span class=\"mi\">2</span> <span class=\"o\">+</span> <span class=\"n\">dy</span><span class=\"o\">**</span><span class=\"mi\">2</span><span class=\"p\">)</span> <span class=\"o\">&lt;=</span> <span class=\"n\">limit</span><span class=\"p\">)</span>\n <span class=\"n\">dx</span> <span class=\"o\">=</span> <span class=\"n\">dx</span><span class=\"p\">[</span><span class=\"n\">indx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"n\">dy</span> <span class=\"o\">=</span> <span class=\"n\">dy</span><span class=\"p\">[</span><span class=\"n\">indx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"n\">xy1x</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"p\">[</span><span class=\"n\">indx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"n\">xy1y</span> <span class=\"o\">=</span> <span class=\"n\">xy1y</span><span class=\"p\">[</span><span class=\"n\">indx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">xlower</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">xindx</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">abs</span><span class=\"p\">(</span><span class=\"n\">dx</span><span class=\"p\">)</span> <span class=\"o\">&gt;=</span> <span class=\"n\">xlower</span><span class=\"p\">)</span>\n <span class=\"n\">dx</span> <span class=\"o\">=</span> <span class=\"n\">dx</span><span class=\"p\">[</span><span class=\"n\">xindx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"n\">dy</span> <span class=\"o\">=</span> <span class=\"n\">dy</span><span class=\"p\">[</span><span class=\"n\">xindx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"n\">xy1x</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"p\">[</span><span class=\"n\">xindx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"n\">xy1y</span> <span class=\"o\">=</span> <span class=\"n\">xy1y</span><span class=\"p\">[</span><span class=\"n\">xindx</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;# of points after clipping: &#39;</span><span class=\"p\">,</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">dx</span><span class=\"p\">))</span>\n\n <span class=\"n\">dr</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sqrt</span><span class=\"p\">(</span><span class=\"n\">dx</span><span class=\"o\">**</span><span class=\"mi\">2</span> <span class=\"o\">+</span> <span class=\"n\">dy</span><span class=\"o\">**</span><span class=\"mi\">2</span><span class=\"p\">)</span>\n <span class=\"n\">max_vector</span> <span class=\"o\">=</span> <span class=\"n\">dr</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">output</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">write_xy_file</span><span class=\"p\">(</span><span class=\"n\">output</span><span class=\"p\">,[</span><span class=\"n\">xy1x</span><span class=\"p\">,</span><span class=\"n\">xy1y</span><span class=\"p\">,</span><span class=\"n\">dx</span><span class=\"p\">,</span><span class=\"n\">dy</span><span class=\"p\">])</span>\n\n <span class=\"n\">fig</span> <span class=\"o\">=</span> <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">figure</span><span class=\"p\">(</span><span class=\"n\">num</span><span class=\"o\">=</span><span class=\"n\">figure_id</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">append</span><span class=\"p\">:</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">clf</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">vector</span><span class=\"p\">:</span>\n <span class=\"n\">dxs</span> <span class=\"o\">=</span> <span class=\"n\">imagestats</span><span class=\"o\">.</span><span class=\"n\">ImageStats</span><span class=\"p\">(</span><span class=\"n\">dx</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">))</span>\n <span class=\"n\">dys</span> <span class=\"o\">=</span> <span class=\"n\">imagestats</span><span class=\"o\">.</span><span class=\"n\">ImageStats</span><span class=\"p\">(</span><span class=\"n\">dy</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">))</span>\n <span class=\"n\">minx</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span>\n <span class=\"n\">maxx</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n <span class=\"n\">miny</span> <span class=\"o\">=</span> <span class=\"n\">xy1y</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span>\n <span class=\"n\">maxy</span> <span class=\"o\">=</span> <span class=\"n\">xy1y</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n <span class=\"n\">xrange</span> <span class=\"o\">=</span> <span class=\"n\">maxx</span> <span class=\"o\">-</span> <span class=\"n\">minx</span>\n <span class=\"n\">yrange</span> <span class=\"o\">=</span> <span class=\"n\">maxy</span> <span class=\"o\">-</span> <span class=\"n\">miny</span>\n\n <span class=\"n\">qplot</span> <span class=\"o\">=</span> <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">quiver</span><span class=\"p\">(</span><span class=\"n\">xy1x</span><span class=\"p\">[::</span><span class=\"n\">every</span><span class=\"p\">],</span><span class=\"n\">xy1y</span><span class=\"p\">[::</span><span class=\"n\">every</span><span class=\"p\">],</span><span class=\"n\">dx</span><span class=\"p\">[::</span><span class=\"n\">every</span><span class=\"p\">],</span><span class=\"n\">dy</span><span class=\"p\">[::</span><span class=\"n\">every</span><span class=\"p\">],</span>\\\n <span class=\"n\">units</span><span class=\"o\">=</span><span class=\"s1\">&#39;y&#39;</span><span class=\"p\">,</span><span class=\"n\">headwidth</span><span class=\"o\">=</span><span class=\"n\">headw</span><span class=\"p\">,</span><span class=\"n\">headlength</span><span class=\"o\">=</span><span class=\"n\">headl</span><span class=\"p\">)</span>\n <span class=\"n\">key_dx</span> <span class=\"o\">=</span> <span class=\"n\">xrange</span><span class=\"o\">*</span><span class=\"mf\">0.01</span>\n <span class=\"n\">key_dy</span> <span class=\"o\">=</span> <span class=\"n\">yrange</span><span class=\"o\">*</span><span class=\"p\">(</span><span class=\"mf\">0.005</span><span class=\"o\">*</span><span class=\"n\">textscale</span><span class=\"p\">)</span>\n <span class=\"n\">maxvec</span> <span class=\"o\">=</span> <span class=\"n\">max_vector</span><span class=\"o\">/</span><span class=\"mf\">2.</span>\n <span class=\"n\">key_len</span> <span class=\"o\">=</span> <span class=\"nb\">round</span><span class=\"p\">((</span><span class=\"n\">maxvec</span><span class=\"o\">+</span><span class=\"mf\">0.005</span><span class=\"p\">),</span><span class=\"mi\">2</span><span class=\"p\">)</span>\n\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">xlabel</span><span class=\"p\">(</span><span class=\"s1\">&#39;DX: </span><span class=\"si\">%.4f</span><span class=\"s1\"> to </span><span class=\"si\">%.4f</span><span class=\"s1\"> +/- </span><span class=\"si\">%.4f</span><span class=\"s1\">&#39;</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">dxs</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">,</span><span class=\"n\">dxs</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">,</span><span class=\"n\">dxs</span><span class=\"o\">.</span><span class=\"n\">stddev</span><span class=\"p\">))</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">ylabel</span><span class=\"p\">(</span><span class=\"s1\">&#39;DY: </span><span class=\"si\">%.4f</span><span class=\"s1\"> to </span><span class=\"si\">%.4f</span><span class=\"s1\"> +/- </span><span class=\"si\">%.4f</span><span class=\"s1\">&#39;</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">dys</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">,</span><span class=\"n\">dys</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">,</span><span class=\"n\">dys</span><span class=\"o\">.</span><span class=\"n\">stddev</span><span class=\"p\">))</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">title</span><span class=\"p\">(</span><span class=\"s2\">r&quot;$Vector\\ plot\\ of\\ </span><span class=\"si\">%d</span><span class=\"s2\">/</span><span class=\"si\">%d</span><span class=\"s2\">\\ residuals:\\ </span><span class=\"si\">%s</span><span class=\"s2\">$&quot;</span><span class=\"o\">%</span><span class=\"p\">(</span>\n <span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">numpts</span><span class=\"p\">,</span><span class=\"n\">title</span><span class=\"p\">))</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">quiverkey</span><span class=\"p\">(</span><span class=\"n\">qplot</span><span class=\"p\">,</span><span class=\"n\">minx</span><span class=\"o\">+</span><span class=\"n\">key_dx</span><span class=\"p\">,</span><span class=\"n\">miny</span><span class=\"o\">-</span><span class=\"n\">key_dy</span><span class=\"p\">,</span><span class=\"n\">key_len</span><span class=\"p\">,</span><span class=\"s2\">&quot;</span><span class=\"si\">%0.2f</span><span class=\"s2\"> pixels&quot;</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">key_len</span><span class=\"p\">),</span>\n <span class=\"n\">coordinates</span><span class=\"o\">=</span><span class=\"s1\">&#39;data&#39;</span><span class=\"p\">,</span><span class=\"n\">labelpos</span><span class=\"o\">=</span><span class=\"s1\">&#39;E&#39;</span><span class=\"p\">,</span><span class=\"n\">labelcolor</span><span class=\"o\">=</span><span class=\"s1\">&#39;Maroon&#39;</span><span class=\"p\">,</span><span class=\"n\">color</span><span class=\"o\">=</span><span class=\"s1\">&#39;Maroon&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">plot_defs</span> <span class=\"o\">=</span> <span class=\"p\">[[</span><span class=\"n\">xy1x</span><span class=\"p\">,</span><span class=\"n\">dx</span><span class=\"p\">,</span><span class=\"s2\">&quot;X (pixels)&quot;</span><span class=\"p\">,</span><span class=\"s2\">&quot;DX (pixels)&quot;</span><span class=\"p\">],</span>\\\n <span class=\"p\">[</span><span class=\"n\">xy1y</span><span class=\"p\">,</span><span class=\"n\">dx</span><span class=\"p\">,</span><span class=\"s2\">&quot;Y (pixels)&quot;</span><span class=\"p\">,</span><span class=\"s2\">&quot;DX (pixels)&quot;</span><span class=\"p\">],</span>\\\n <span class=\"p\">[</span><span class=\"n\">xy1x</span><span class=\"p\">,</span><span class=\"n\">dy</span><span class=\"p\">,</span><span class=\"s2\">&quot;X (pixels)&quot;</span><span class=\"p\">,</span><span class=\"s2\">&quot;DY (pixels)&quot;</span><span class=\"p\">],</span>\\\n <span class=\"p\">[</span><span class=\"n\">xy1y</span><span class=\"p\">,</span><span class=\"n\">dy</span><span class=\"p\">,</span><span class=\"s2\">&quot;Y (pixels)&quot;</span><span class=\"p\">,</span><span class=\"s2\">&quot;DY (pixels)&quot;</span><span class=\"p\">]]</span>\n <span class=\"k\">if</span> <span class=\"n\">axes</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"c1\"># Compute a global set of axis limits for all plots</span>\n <span class=\"n\">minx</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span>\n <span class=\"n\">maxx</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n <span class=\"n\">miny</span> <span class=\"o\">=</span> <span class=\"n\">dx</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span>\n <span class=\"n\">maxy</span> <span class=\"o\">=</span> <span class=\"n\">dx</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">xy1y</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span> <span class=\"o\">&lt;</span> <span class=\"n\">minx</span><span class=\"p\">:</span> <span class=\"n\">minx</span> <span class=\"o\">=</span> <span class=\"n\">xy1y</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">xy1y</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span> <span class=\"o\">&gt;</span> <span class=\"n\">maxx</span><span class=\"p\">:</span> <span class=\"n\">maxx</span> <span class=\"o\">=</span> <span class=\"n\">xy1y</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">dy</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span> <span class=\"o\">&lt;</span> <span class=\"n\">miny</span><span class=\"p\">:</span> <span class=\"n\">miny</span> <span class=\"o\">=</span> <span class=\"n\">dy</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">dy</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span> <span class=\"o\">&gt;</span> <span class=\"n\">maxy</span><span class=\"p\">:</span> <span class=\"n\">maxy</span> <span class=\"o\">=</span> <span class=\"n\">dy</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">minx</span> <span class=\"o\">=</span> <span class=\"n\">axes</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">][</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">maxx</span> <span class=\"o\">=</span> <span class=\"n\">axes</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">][</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"n\">miny</span> <span class=\"o\">=</span> <span class=\"n\">axes</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">][</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">maxy</span> <span class=\"o\">=</span> <span class=\"n\">axes</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">][</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n\n <span class=\"k\">if</span> <span class=\"n\">ylimit</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">miny</span> <span class=\"o\">=</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"o\">*</span><span class=\"n\">ylimit</span>\n <span class=\"n\">maxy</span> <span class=\"o\">=</span> <span class=\"n\">ylimit</span>\n\n <span class=\"n\">xrange</span> <span class=\"o\">=</span> <span class=\"n\">maxx</span> <span class=\"o\">-</span> <span class=\"n\">minx</span>\n <span class=\"n\">yrange</span> <span class=\"o\">=</span> <span class=\"n\">maxy</span> <span class=\"o\">-</span> <span class=\"n\">miny</span>\n\n <span class=\"n\">rms_labelled</span><span class=\"o\">=</span><span class=\"kc\">False</span>\n <span class=\"k\">if</span> <span class=\"n\">title</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">fig</span><span class=\"o\">.</span><span class=\"n\">suptitle</span><span class=\"p\">(</span><span class=\"s2\">&quot;Residuals [</span><span class=\"si\">%d</span><span class=\"s2\">/</span><span class=\"si\">%d</span><span class=\"s2\">]&quot;</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">numpts</span><span class=\"p\">),</span><span class=\"n\">ha</span><span class=\"o\">=</span><span class=\"s1\">&#39;center&#39;</span><span class=\"p\">,</span><span class=\"n\">fontsize</span><span class=\"o\">=</span><span class=\"n\">labelsize</span><span class=\"o\">+</span><span class=\"mi\">6</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># This definition of the title supports math symbols in the title</span>\n <span class=\"n\">fig</span><span class=\"o\">.</span><span class=\"n\">suptitle</span><span class=\"p\">(</span><span class=\"s2\">r&quot;$&quot;</span><span class=\"o\">+</span><span class=\"n\">title</span><span class=\"o\">+</span><span class=\"s2\">&quot;$&quot;</span><span class=\"p\">,</span><span class=\"n\">ha</span><span class=\"o\">=</span><span class=\"s1\">&#39;center&#39;</span><span class=\"p\">,</span> <span class=\"n\">fontsize</span><span class=\"o\">=</span><span class=\"n\">labelsize</span><span class=\"o\">+</span><span class=\"mi\">6</span><span class=\"p\">)</span>\n\n <span class=\"k\">for</span> <span class=\"n\">pnum</span><span class=\"p\">,</span> <span class=\"n\">p</span> <span class=\"ow\">in</span> <span class=\"nb\">enumerate</span><span class=\"p\">(</span><span class=\"n\">plot_defs</span><span class=\"p\">):</span>\n <span class=\"n\">pn</span> <span class=\"o\">=</span> <span class=\"n\">pnum</span><span class=\"o\">+</span><span class=\"mi\">1</span>\n <span class=\"n\">ax</span> <span class=\"o\">=</span> <span class=\"n\">fig</span><span class=\"o\">.</span><span class=\"n\">add_subplot</span><span class=\"p\">(</span><span class=\"mi\">2</span><span class=\"p\">,</span><span class=\"mi\">2</span><span class=\"p\">,</span><span class=\"n\">pn</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">plot</span><span class=\"p\">(</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span><span class=\"s1\">&#39;b.&#39;</span><span class=\"p\">,</span><span class=\"n\">label</span><span class=\"o\">=</span><span class=\"s1\">&#39;RMS(X) = </span><span class=\"si\">%.4f</span><span class=\"s1\">, RMS(Y) = </span><span class=\"si\">%.4f</span><span class=\"s1\">&#39;</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">dx</span><span class=\"o\">.</span><span class=\"n\">std</span><span class=\"p\">(),</span><span class=\"n\">dy</span><span class=\"o\">.</span><span class=\"n\">std</span><span class=\"p\">()))</span>\n <span class=\"n\">lx</span><span class=\"o\">=</span><span class=\"p\">[</span> <span class=\"nb\">int</span><span class=\"p\">((</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">()</span><span class=\"o\">-</span><span class=\"mi\">500</span><span class=\"p\">)</span><span class=\"o\">/</span><span class=\"mi\">500</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"mi\">500</span><span class=\"p\">,</span><span class=\"nb\">int</span><span class=\"p\">((</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span><span class=\"o\">+</span><span class=\"mi\">500</span><span class=\"p\">)</span><span class=\"o\">/</span><span class=\"mi\">500</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"mi\">500</span><span class=\"p\">]</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">plot</span><span class=\"p\">([</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]],[</span><span class=\"mf\">0.0</span><span class=\"p\">,</span><span class=\"mf\">0.0</span><span class=\"p\">],</span><span class=\"s1\">&#39;k&#39;</span><span class=\"p\">,</span><span class=\"n\">linewidth</span><span class=\"o\">=</span><span class=\"mi\">3</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">axis</span><span class=\"p\">([</span><span class=\"n\">minx</span><span class=\"p\">,</span><span class=\"n\">maxx</span><span class=\"p\">,</span><span class=\"n\">miny</span><span class=\"p\">,</span><span class=\"n\">maxy</span><span class=\"p\">])</span>\n <span class=\"k\">if</span> <span class=\"n\">rms</span> <span class=\"ow\">and</span> <span class=\"ow\">not</span> <span class=\"n\">rms_labelled</span><span class=\"p\">:</span>\n <span class=\"n\">leg_handles</span><span class=\"p\">,</span> <span class=\"n\">leg_labels</span> <span class=\"o\">=</span> <span class=\"n\">ax</span><span class=\"o\">.</span><span class=\"n\">get_legend_handles_labels</span><span class=\"p\">()</span>\n <span class=\"n\">fig</span><span class=\"o\">.</span><span class=\"n\">legend</span><span class=\"p\">(</span><span class=\"n\">leg_handles</span><span class=\"p\">,</span> <span class=\"n\">leg_labels</span><span class=\"p\">,</span> <span class=\"n\">loc</span><span class=\"o\">=</span><span class=\"s1\">&#39;center left&#39;</span><span class=\"p\">,</span>\n <span class=\"n\">fontsize</span><span class=\"o\">=</span><span class=\"s1\">&#39;small&#39;</span><span class=\"p\">,</span> <span class=\"n\">frameon</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span>\n <span class=\"n\">bbox_to_anchor</span><span class=\"o\">=</span><span class=\"p\">(</span><span class=\"mf\">0.33</span><span class=\"p\">,</span> <span class=\"mf\">0.51</span><span class=\"p\">),</span> <span class=\"n\">borderaxespad</span><span class=\"o\">=</span><span class=\"mi\">0</span><span class=\"p\">)</span>\n <span class=\"n\">rms_labelled</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n\n <span class=\"n\">ax</span><span class=\"o\">.</span><span class=\"n\">tick_params</span><span class=\"p\">(</span><span class=\"n\">labelsize</span><span class=\"o\">=</span><span class=\"n\">labelsize</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># Fine-tune figure; hide x ticks for top plots and y ticks for right plots</span>\n <span class=\"k\">if</span> <span class=\"n\">pn</span> <span class=\"o\">&lt;=</span> <span class=\"mi\">2</span><span class=\"p\">:</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">setp</span><span class=\"p\">(</span><span class=\"n\">ax</span><span class=\"o\">.</span><span class=\"n\">get_xticklabels</span><span class=\"p\">(),</span> <span class=\"n\">visible</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">ax</span><span class=\"o\">.</span><span class=\"n\">set_xlabel</span><span class=\"p\">(</span><span class=\"n\">plot_defs</span><span class=\"p\">[</span><span class=\"n\">pnum</span><span class=\"p\">][</span><span class=\"mi\">2</span><span class=\"p\">])</span>\n\n <span class=\"k\">if</span> <span class=\"n\">pn</span><span class=\"o\">%</span><span class=\"mi\">2</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">setp</span><span class=\"p\">(</span><span class=\"n\">ax</span><span class=\"o\">.</span><span class=\"n\">get_yticklabels</span><span class=\"p\">(),</span> <span class=\"n\">visible</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">ax</span><span class=\"o\">.</span><span class=\"n\">set_ylabel</span><span class=\"p\">(</span><span class=\"n\">plot_defs</span><span class=\"p\">[</span><span class=\"n\">pnum</span><span class=\"p\">][</span><span class=\"mi\">3</span><span class=\"p\">])</span>\n\n <span class=\"k\">if</span> <span class=\"n\">linfit</span><span class=\"p\">:</span>\n <span class=\"n\">lxr</span> <span class=\"o\">=</span> <span class=\"nb\">int</span><span class=\"p\">((</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">-</span> <span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span><span class=\"o\">/</span><span class=\"mi\">100</span><span class=\"p\">)</span>\n <span class=\"n\">lyr</span> <span class=\"o\">=</span> <span class=\"nb\">int</span><span class=\"p\">((</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span> <span class=\"o\">-</span> <span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">min</span><span class=\"p\">())</span><span class=\"o\">/</span><span class=\"mi\">100</span><span class=\"p\">)</span>\n <span class=\"n\">A</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">vstack</span><span class=\"p\">([</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]))])</span><span class=\"o\">.</span><span class=\"n\">T</span>\n <span class=\"n\">m</span><span class=\"p\">,</span><span class=\"n\">c</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">linalg</span><span class=\"o\">.</span><span class=\"n\">lstsq</span><span class=\"p\">(</span><span class=\"n\">A</span><span class=\"p\">,</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">])[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">yr</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">m</span><span class=\"o\">*</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">c</span><span class=\"p\">,</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">*</span><span class=\"n\">m</span><span class=\"o\">+</span><span class=\"n\">c</span><span class=\"p\">]</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">plot</span><span class=\"p\">([</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]],</span><span class=\"n\">yr</span><span class=\"p\">,</span><span class=\"s1\">&#39;r&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">text</span><span class=\"p\">(</span><span class=\"n\">lx</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">lxr</span><span class=\"p\">,</span><span class=\"n\">p</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span><span class=\"o\">+</span><span class=\"n\">lyr</span><span class=\"p\">,</span><span class=\"s2\">&quot;</span><span class=\"si\">%0.5g</span><span class=\"s2\">*x + </span><span class=\"si\">%0.5g</span><span class=\"s2\"> [</span><span class=\"si\">%0.5g</span><span class=\"s2\">,</span><span class=\"si\">%0.5g</span><span class=\"s2\">]&quot;</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">m</span><span class=\"p\">,</span><span class=\"n\">c</span><span class=\"p\">,</span><span class=\"n\">yr</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">yr</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]),</span><span class=\"n\">color</span><span class=\"o\">=</span><span class=\"s1\">&#39;r&#39;</span><span class=\"p\">)</span>\n\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">draw</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">plotname</span><span class=\"p\">:</span>\n <span class=\"n\">suffix</span> <span class=\"o\">=</span> <span class=\"n\">plotname</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">4</span><span class=\"p\">:]</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;.&#39;</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"n\">suffix</span><span class=\"p\">:</span>\n <span class=\"n\">output</span> <span class=\"o\">+=</span> <span class=\"s1\">&#39;.png&#39;</span>\n <span class=\"nb\">format</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;png&#39;</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">suffix</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:]</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"s1\">&#39;png&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;pdf&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;ps&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;eps&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;svg&#39;</span><span class=\"p\">]:</span>\n <span class=\"nb\">format</span><span class=\"o\">=</span><span class=\"n\">suffix</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:]</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">savefig</span><span class=\"p\">(</span><span class=\"n\">plotname</span><span class=\"p\">,</span><span class=\"nb\">format</span><span class=\"o\">=</span><span class=\"nb\">format</span><span class=\"p\">)</span></div>\n\n<span class=\"k\">def</span> <span class=\"nf\">apply_db_fit</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">,</span><span class=\"n\">fit</span><span class=\"p\">,</span><span class=\"n\">xsh</span><span class=\"o\">=</span><span class=\"mf\">0.0</span><span class=\"p\">,</span><span class=\"n\">ysh</span><span class=\"o\">=</span><span class=\"mf\">0.0</span><span class=\"p\">):</span>\n <span class=\"n\">xy1x</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">xy1y</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"n\">numpts</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">fit</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">xy1</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">zeros</span><span class=\"p\">((</span><span class=\"n\">xy1x</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"mi\">2</span><span class=\"p\">),</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float64</span><span class=\"p\">)</span>\n <span class=\"n\">xy1</span><span class=\"p\">[:,</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">xy1x</span>\n <span class=\"n\">xy1</span><span class=\"p\">[:,</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">xy1y</span>\n <span class=\"n\">xy1</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">dot</span><span class=\"p\">(</span><span class=\"n\">xy1</span><span class=\"p\">,</span><span class=\"n\">fit</span><span class=\"p\">)</span>\n <span class=\"n\">xy1x</span> <span class=\"o\">=</span> <span class=\"n\">xy1</span><span class=\"p\">[:,</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"n\">xsh</span>\n <span class=\"n\">xy1y</span> <span class=\"o\">=</span> <span class=\"n\">xy1</span><span class=\"p\">[:,</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"n\">ysh</span>\n <span class=\"k\">return</span> <span class=\"n\">xy1x</span><span class=\"p\">,</span><span class=\"n\">xy1y</span>\n\n\n<span class=\"k\">def</span> <span class=\"nf\">write_xy_file</span><span class=\"p\">(</span><span class=\"n\">outname</span><span class=\"p\">,</span><span class=\"n\">xydata</span><span class=\"p\">,</span><span class=\"n\">append</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span><span class=\"nb\">format</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s2\">&quot;</span><span class=\"si\">%20.6f</span><span class=\"s2\">&quot;</span><span class=\"p\">]):</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">xydata</span><span class=\"p\">,</span><span class=\"nb\">list</span><span class=\"p\">):</span>\n <span class=\"n\">xydata</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"n\">xydata</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">append</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">exists</span><span class=\"p\">(</span><span class=\"n\">outname</span><span class=\"p\">):</span>\n <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">remove</span><span class=\"p\">(</span><span class=\"n\">outname</span><span class=\"p\">)</span>\n <span class=\"n\">fout1</span> <span class=\"o\">=</span> <span class=\"nb\">open</span><span class=\"p\">(</span><span class=\"n\">outname</span><span class=\"p\">,</span><span class=\"s1\">&#39;a+&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">row</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">xydata</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">][</span><span class=\"mi\">0</span><span class=\"p\">])):</span>\n <span class=\"n\">outstr</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;&quot;</span>\n <span class=\"k\">for</span> <span class=\"n\">cols</span><span class=\"p\">,</span><span class=\"n\">fmts</span> <span class=\"ow\">in</span> <span class=\"nb\">zip</span><span class=\"p\">(</span><span class=\"n\">xydata</span><span class=\"p\">,</span><span class=\"nb\">format</span><span class=\"p\">):</span>\n <span class=\"k\">for</span> <span class=\"n\">col</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">cols</span><span class=\"p\">)):</span>\n <span class=\"n\">outstr</span> <span class=\"o\">+=</span> <span class=\"n\">fmts</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">cols</span><span class=\"p\">[</span><span class=\"n\">col</span><span class=\"p\">][</span><span class=\"n\">row</span><span class=\"p\">])</span>\n <span class=\"n\">fout1</span><span class=\"o\">.</span><span class=\"n\">write</span><span class=\"p\">(</span><span class=\"n\">outstr</span><span class=\"o\">+</span><span class=\"s2\">&quot;</span><span class=\"se\">\\n</span><span class=\"s2\">&quot;</span><span class=\"p\">)</span>\n <span class=\"n\">fout1</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;wrote XY data to: &#39;</span><span class=\"p\">,</span><span class=\"n\">outname</span><span class=\"p\">)</span>\n\n<div class=\"viewcode-block\" id=\"find_xy_peak\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.find_xy_peak\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">find_xy_peak</span><span class=\"p\">(</span><span class=\"n\">img</span><span class=\"p\">,</span><span class=\"n\">center</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">sigma</span><span class=\"o\">=</span><span class=\"mf\">3.0</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Find the center of the peak of offsets</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\"># find level of noise in histogram</span>\n <span class=\"n\">istats</span> <span class=\"o\">=</span> <span class=\"n\">imagestats</span><span class=\"o\">.</span><span class=\"n\">ImageStats</span><span class=\"p\">(</span><span class=\"n\">img</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">),</span><span class=\"n\">nclip</span><span class=\"o\">=</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"n\">fields</span><span class=\"o\">=</span><span class=\"s1\">&#39;stddev,mode,mean,max,min&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">istats</span><span class=\"o\">.</span><span class=\"n\">stddev</span> <span class=\"o\">==</span> <span class=\"mf\">0.0</span><span class=\"p\">:</span>\n <span class=\"n\">istats</span> <span class=\"o\">=</span> <span class=\"n\">imagestats</span><span class=\"o\">.</span><span class=\"n\">ImageStats</span><span class=\"p\">(</span><span class=\"n\">img</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">),</span><span class=\"n\">fields</span><span class=\"o\">=</span><span class=\"s1\">&#39;stddev,mode,mean,max,min&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">imgsum</span> <span class=\"o\">=</span> <span class=\"n\">img</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">()</span>\n\n <span class=\"c1\"># clip out all values below mean+3*sigma from histogram</span>\n <span class=\"n\">imgc</span> <span class=\"o\">=</span><span class=\"n\">img</span><span class=\"p\">[:,:]</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n <span class=\"n\">imgc</span><span class=\"p\">[</span><span class=\"n\">imgc</span> <span class=\"o\">&lt;</span> <span class=\"n\">istats</span><span class=\"o\">.</span><span class=\"n\">mode</span><span class=\"o\">+</span><span class=\"n\">istats</span><span class=\"o\">.</span><span class=\"n\">stddev</span><span class=\"o\">*</span><span class=\"n\">sigma</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"mf\">0.0</span>\n <span class=\"c1\"># identify position of peak</span>\n <span class=\"n\">yp0</span><span class=\"p\">,</span><span class=\"n\">xp0</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">where</span><span class=\"p\">(</span><span class=\"n\">imgc</span> <span class=\"o\">==</span> <span class=\"n\">imgc</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">())</span>\n\n <span class=\"c1\"># Perform bounds checking on slice from img</span>\n <span class=\"n\">ymin</span> <span class=\"o\">=</span> <span class=\"nb\">max</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">yp0</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span><span class=\"o\">-</span><span class=\"mi\">3</span><span class=\"p\">)</span>\n <span class=\"n\">ymax</span> <span class=\"o\">=</span> <span class=\"nb\">min</span><span class=\"p\">(</span><span class=\"n\">img</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">yp0</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span><span class=\"o\">+</span><span class=\"mi\">4</span><span class=\"p\">)</span>\n <span class=\"n\">xmin</span> <span class=\"o\">=</span> <span class=\"nb\">max</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">xp0</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span><span class=\"o\">-</span><span class=\"mi\">3</span><span class=\"p\">)</span>\n <span class=\"n\">xmax</span> <span class=\"o\">=</span> <span class=\"nb\">min</span><span class=\"p\">(</span><span class=\"n\">img</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">xp0</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span><span class=\"o\">+</span><span class=\"mi\">4</span><span class=\"p\">)</span>\n <span class=\"c1\"># take sum of at most a 7x7 pixel box around peak</span>\n <span class=\"n\">xp_slice</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"nb\">slice</span><span class=\"p\">(</span><span class=\"n\">ymin</span><span class=\"p\">,</span><span class=\"n\">ymax</span><span class=\"p\">),</span>\n <span class=\"nb\">slice</span><span class=\"p\">(</span><span class=\"n\">xmin</span><span class=\"p\">,</span><span class=\"n\">xmax</span><span class=\"p\">))</span>\n <span class=\"n\">yp</span><span class=\"p\">,</span><span class=\"n\">xp</span> <span class=\"o\">=</span> <span class=\"n\">ndimage</span><span class=\"o\">.</span><span class=\"n\">center_of_mass</span><span class=\"p\">(</span><span class=\"n\">img</span><span class=\"p\">[</span><span class=\"n\">xp_slice</span><span class=\"p\">])</span>\n <span class=\"k\">if</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnan</span><span class=\"p\">(</span><span class=\"n\">xp</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnan</span><span class=\"p\">(</span><span class=\"n\">yp</span><span class=\"p\">):</span>\n <span class=\"n\">xp</span><span class=\"o\">=</span><span class=\"mf\">0.0</span>\n <span class=\"n\">yp</span><span class=\"o\">=</span><span class=\"mf\">0.0</span>\n <span class=\"n\">flux</span> <span class=\"o\">=</span> <span class=\"mf\">0.0</span>\n <span class=\"n\">zpqual</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">xp</span> <span class=\"o\">+=</span> <span class=\"n\">xp_slice</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">start</span>\n <span class=\"n\">yp</span> <span class=\"o\">+=</span> <span class=\"n\">xp_slice</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">start</span>\n\n <span class=\"c1\"># compute S/N criteria for this peak: flux/sqrt(mean of rest of array)</span>\n <span class=\"n\">flux</span> <span class=\"o\">=</span> <span class=\"n\">imgc</span><span class=\"p\">[</span><span class=\"n\">xp_slice</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">()</span>\n <span class=\"n\">delta_size</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">img</span><span class=\"o\">.</span><span class=\"n\">size</span> <span class=\"o\">-</span> <span class=\"n\">imgc</span><span class=\"p\">[</span><span class=\"n\">xp_slice</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">size</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">delta_size</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"n\">delta_size</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n <span class=\"n\">delta_flux</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">imgsum</span> <span class=\"o\">-</span> <span class=\"n\">flux</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">flux</span> <span class=\"o\">&gt;</span> <span class=\"n\">imgc</span><span class=\"p\">[</span><span class=\"n\">xp_slice</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">():</span> <span class=\"n\">delta_flux</span> <span class=\"o\">=</span> <span class=\"n\">flux</span> <span class=\"o\">-</span> <span class=\"n\">imgc</span><span class=\"p\">[</span><span class=\"n\">xp_slice</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n <span class=\"k\">else</span><span class=\"p\">:</span> <span class=\"n\">delta_flux</span> <span class=\"o\">=</span> <span class=\"n\">flux</span>\n <span class=\"n\">zpqual</span> <span class=\"o\">=</span> <span class=\"n\">flux</span><span class=\"o\">/</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sqrt</span><span class=\"p\">(</span><span class=\"n\">delta_flux</span><span class=\"o\">/</span><span class=\"n\">delta_size</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnan</span><span class=\"p\">(</span><span class=\"n\">zpqual</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isinf</span><span class=\"p\">(</span><span class=\"n\">zpqual</span><span class=\"p\">):</span>\n <span class=\"n\">zpqual</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"k\">if</span> <span class=\"n\">center</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">xp</span> <span class=\"o\">-=</span> <span class=\"n\">center</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">yp</span> <span class=\"o\">-=</span> <span class=\"n\">center</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"n\">flux</span> <span class=\"o\">=</span> <span class=\"n\">imgc</span><span class=\"p\">[</span><span class=\"n\">xp_slice</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">max</span><span class=\"p\">()</span>\n\n <span class=\"k\">del</span> <span class=\"n\">imgc</span>\n <span class=\"k\">return</span> <span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"p\">,</span><span class=\"n\">flux</span><span class=\"p\">,</span><span class=\"n\">zpqual</span></div>\n\n<div class=\"viewcode-block\" id=\"plot_zeropoint\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.plot_zeropoint\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">plot_zeropoint</span><span class=\"p\">(</span><span class=\"n\">pars</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Plot 2d histogram.</span>\n\n<span class=\"sd\"> Pars will be a dictionary containing:</span>\n<span class=\"sd\"> data, figure_id, vmax, title_str, xp,yp, searchrad</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"kn\">from</span> <span class=\"nn\">matplotlib</span> <span class=\"k\">import</span> <span class=\"n\">pyplot</span> <span class=\"k\">as</span> <span class=\"n\">plt</span>\n\n <span class=\"n\">xp</span> <span class=\"o\">=</span> <span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;xp&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">yp</span> <span class=\"o\">=</span> <span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;yp&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">searchrad</span> <span class=\"o\">=</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;searchrad&#39;</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"mf\">0.5</span><span class=\"p\">)</span>\n\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">figure</span><span class=\"p\">(</span><span class=\"n\">num</span><span class=\"o\">=</span><span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;figure_id&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">clf</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;interactive&#39;</span><span class=\"p\">]:</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">ion</span><span class=\"p\">()</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">ioff</span><span class=\"p\">()</span>\n\n <span class=\"n\">a</span><span class=\"o\">=</span><span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">imshow</span><span class=\"p\">(</span><span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;data&#39;</span><span class=\"p\">],</span><span class=\"n\">vmin</span><span class=\"o\">=</span><span class=\"mi\">0</span><span class=\"p\">,</span><span class=\"n\">vmax</span><span class=\"o\">=</span><span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;vmax&#39;</span><span class=\"p\">],</span><span class=\"n\">interpolation</span><span class=\"o\">=</span><span class=\"s1\">&#39;nearest&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">jet</span><span class=\"p\">()</span><span class=\"c1\">#gray()</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">colorbar</span><span class=\"p\">()</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">title</span><span class=\"p\">(</span><span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;title_str&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">plot</span><span class=\"p\">(</span><span class=\"n\">xp</span><span class=\"o\">+</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"o\">+</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"n\">color</span><span class=\"o\">=</span><span class=\"s1\">&#39;red&#39;</span><span class=\"p\">,</span><span class=\"n\">marker</span><span class=\"o\">=</span><span class=\"s1\">&#39;+&#39;</span><span class=\"p\">,</span><span class=\"n\">markersize</span><span class=\"o\">=</span><span class=\"mi\">24</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">plot</span><span class=\"p\">(</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"n\">color</span><span class=\"o\">=</span><span class=\"s1\">&#39;yellow&#39;</span><span class=\"p\">,</span><span class=\"n\">marker</span><span class=\"o\">=</span><span class=\"s1\">&#39;+&#39;</span><span class=\"p\">,</span><span class=\"n\">markersize</span><span class=\"o\">=</span><span class=\"mi\">120</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">text</span><span class=\"p\">(</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"s2\">&quot;Offset=0,0&quot;</span><span class=\"p\">,</span>\n <span class=\"n\">verticalalignment</span><span class=\"o\">=</span><span class=\"s1\">&#39;bottom&#39;</span><span class=\"p\">,</span><span class=\"n\">color</span><span class=\"o\">=</span><span class=\"s1\">&#39;yellow&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">xlabel</span><span class=\"p\">(</span><span class=\"s2\">&quot;Offset in X (pixels)&quot;</span><span class=\"p\">)</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">ylabel</span><span class=\"p\">(</span><span class=\"s2\">&quot;Offset in Y (pixels)&quot;</span><span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;plotname&#39;</span><span class=\"p\">]:</span>\n <span class=\"n\">suffix</span> <span class=\"o\">=</span> <span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;plotname&#39;</span><span class=\"p\">][</span><span class=\"o\">-</span><span class=\"mi\">4</span><span class=\"p\">:]</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;.&#39;</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"n\">suffix</span><span class=\"p\">:</span>\n <span class=\"n\">output</span> <span class=\"o\">+=</span> <span class=\"s1\">&#39;.png&#39;</span>\n <span class=\"nb\">format</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;png&#39;</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">suffix</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:]</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"s1\">&#39;png&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;pdf&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;ps&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;eps&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;svg&#39;</span><span class=\"p\">]:</span>\n <span class=\"nb\">format</span><span class=\"o\">=</span><span class=\"n\">suffix</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">:]</span>\n <span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">savefig</span><span class=\"p\">(</span><span class=\"n\">pars</span><span class=\"p\">[</span><span class=\"s1\">&#39;plotname&#39;</span><span class=\"p\">],</span><span class=\"nb\">format</span><span class=\"o\">=</span><span class=\"nb\">format</span><span class=\"p\">)</span></div>\n\n<div class=\"viewcode-block\" id=\"build_xy_zeropoint\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.build_xy_zeropoint\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">build_xy_zeropoint</span><span class=\"p\">(</span><span class=\"n\">imgxy</span><span class=\"p\">,</span><span class=\"n\">refxy</span><span class=\"p\">,</span><span class=\"n\">searchrad</span><span class=\"o\">=</span><span class=\"mf\">3.0</span><span class=\"p\">,</span><span class=\"n\">histplot</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span><span class=\"n\">figure_id</span><span class=\"o\">=</span><span class=\"mi\">1</span><span class=\"p\">,</span>\n <span class=\"n\">plotname</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">interactive</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Create a matrix which contains the delta between each XY position and</span>\n<span class=\"sd\"> each UV position.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;Computing initial guess for X and Y shifts...&#39;</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># run C function to create ZP matrix</span>\n <span class=\"n\">xyshape</span> <span class=\"o\">=</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">searchrad</span><span class=\"o\">*</span><span class=\"mi\">2</span><span class=\"p\">)</span><span class=\"o\">+</span><span class=\"mi\">1</span>\n <span class=\"n\">zpmat</span> <span class=\"o\">=</span> <span class=\"n\">cdriz</span><span class=\"o\">.</span><span class=\"n\">arrxyzero</span><span class=\"p\">(</span><span class=\"n\">imgxy</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">),</span> <span class=\"n\">refxy</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">),</span> <span class=\"n\">searchrad</span><span class=\"p\">)</span>\n\n <span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"p\">,</span><span class=\"n\">flux</span><span class=\"p\">,</span><span class=\"n\">zpqual</span> <span class=\"o\">=</span> <span class=\"n\">find_xy_peak</span><span class=\"p\">(</span><span class=\"n\">zpmat</span><span class=\"p\">,</span><span class=\"n\">center</span><span class=\"o\">=</span><span class=\"p\">(</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"n\">searchrad</span><span class=\"p\">))</span>\n <span class=\"k\">if</span> <span class=\"n\">zpqual</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;Found initial X and Y shifts of &#39;</span><span class=\"p\">,</span><span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39; with significance of &#39;</span><span class=\"p\">,</span><span class=\"n\">zpqual</span><span class=\"p\">,</span> <span class=\"s1\">&#39;and &#39;</span><span class=\"p\">,</span><span class=\"n\">flux</span><span class=\"p\">,</span><span class=\"s1\">&#39; matches&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># try with a lower sigma to detect a peak in a sparse set of sources</span>\n <span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"p\">,</span><span class=\"n\">flux</span><span class=\"p\">,</span><span class=\"n\">zpqual</span> <span class=\"o\">=</span> <span class=\"n\">find_xy_peak</span><span class=\"p\">(</span><span class=\"n\">zpmat</span><span class=\"p\">,</span><span class=\"n\">center</span><span class=\"o\">=</span><span class=\"p\">(</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"n\">searchrad</span><span class=\"p\">),</span><span class=\"n\">sigma</span><span class=\"o\">=</span><span class=\"mf\">1.0</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">zpqual</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;Found initial X and Y shifts of &#39;</span><span class=\"p\">,</span><span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39; with significance of &#39;</span><span class=\"p\">,</span><span class=\"n\">zpqual</span><span class=\"p\">,</span> <span class=\"s1\">&#39;and &#39;</span><span class=\"p\">,</span><span class=\"n\">flux</span><span class=\"p\">,</span><span class=\"s1\">&#39; matches&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;!&#39;</span><span class=\"o\">*</span><span class=\"mi\">80</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;!&#39;</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;! WARNING: No valid shift found within a search radius of &#39;</span><span class=\"p\">,</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"s1\">&#39; pixels.&#39;</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;!&#39;</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;!&#39;</span><span class=\"o\">*</span><span class=\"mi\">80</span><span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"n\">histplot</span><span class=\"p\">:</span>\n <span class=\"n\">zpstd</span> <span class=\"o\">=</span> <span class=\"n\">flux</span><span class=\"o\">//</span><span class=\"mi\">5</span>\n <span class=\"k\">if</span> <span class=\"n\">zpstd</span> <span class=\"o\">&lt;</span> <span class=\"mi\">10</span><span class=\"p\">:</span> <span class=\"n\">zpstd</span> <span class=\"o\">=</span> <span class=\"mi\">10</span>\n <span class=\"c1\">#if zpstd &gt; 100: zpstd = 100</span>\n <span class=\"k\">if</span> <span class=\"n\">zpqual</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">zpstd</span> <span class=\"o\">=</span> <span class=\"mi\">10</span>\n <span class=\"n\">zqual</span> <span class=\"o\">=</span> <span class=\"mf\">0.0</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">zqual</span> <span class=\"o\">=</span> <span class=\"n\">zpqual</span>\n\n <span class=\"n\">title_str</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;Histogram of offsets: Peak has </span><span class=\"si\">%d</span><span class=\"s2\"> matches at (</span><span class=\"si\">%0.4g</span><span class=\"s2\">, </span><span class=\"si\">%0.4g</span><span class=\"s2\">)&quot;</span><span class=\"o\">%</span><span class=\"p\">(</span><span class=\"n\">flux</span><span class=\"p\">,</span><span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"p\">)</span>\n\n <span class=\"n\">plot_pars</span> <span class=\"o\">=</span> <span class=\"p\">{</span><span class=\"s1\">&#39;data&#39;</span><span class=\"p\">:</span><span class=\"n\">zpmat</span><span class=\"p\">,</span><span class=\"s1\">&#39;figure_id&#39;</span><span class=\"p\">:</span><span class=\"n\">figure_id</span><span class=\"p\">,</span><span class=\"s1\">&#39;vmax&#39;</span><span class=\"p\">:</span><span class=\"n\">zpstd</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;xp&#39;</span><span class=\"p\">:</span><span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"s1\">&#39;yp&#39;</span><span class=\"p\">:</span><span class=\"n\">yp</span><span class=\"p\">,</span><span class=\"s1\">&#39;searchrad&#39;</span><span class=\"p\">:</span><span class=\"n\">searchrad</span><span class=\"p\">,</span><span class=\"s1\">&#39;title_str&#39;</span><span class=\"p\">:</span><span class=\"n\">title_str</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;plotname&#39;</span><span class=\"p\">:</span><span class=\"n\">plotname</span><span class=\"p\">,</span> <span class=\"s1\">&#39;interactive&#39;</span><span class=\"p\">:</span><span class=\"n\">interactive</span><span class=\"p\">}</span>\n\n <span class=\"n\">plot_zeropoint</span><span class=\"p\">(</span><span class=\"n\">plot_pars</span><span class=\"p\">)</span>\n <span class=\"k\">del</span> <span class=\"n\">zpmat</span>\n\n <span class=\"k\">return</span> <span class=\"n\">xp</span><span class=\"p\">,</span><span class=\"n\">yp</span><span class=\"p\">,</span><span class=\"n\">flux</span><span class=\"p\">,</span><span class=\"n\">zpqual</span></div>\n\n<div class=\"viewcode-block\" id=\"build_pos_grid\"><a class=\"viewcode-back\" href=\"../../tweakutils.html#drizzlepac.tweakutils.build_pos_grid\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">build_pos_grid</span><span class=\"p\">(</span><span class=\"n\">start</span><span class=\"p\">,</span><span class=\"n\">end</span><span class=\"p\">,</span><span class=\"n\">nstep</span><span class=\"p\">,</span> <span class=\"n\">mesh</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Return a grid of positions starting at X,Y given by &#39;start&#39;, and ending</span>\n<span class=\"sd\"> at X,Y given by &#39;end&#39;. The grid will be completely filled in X and Y by</span>\n<span class=\"sd\"> every &#39;step&#39; interval.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"kn\">from</span> <span class=\"nn\">.</span> <span class=\"k\">import</span> <span class=\"n\">linearfit</span>\n <span class=\"c1\"># Build X and Y arrays</span>\n <span class=\"n\">dx</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">end</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">-</span> <span class=\"n\">start</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span>\n <span class=\"k\">if</span> <span class=\"n\">dx</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">nstart</span> <span class=\"o\">=</span> <span class=\"n\">end</span>\n <span class=\"n\">end</span> <span class=\"o\">=</span> <span class=\"n\">start</span>\n <span class=\"n\">start</span> <span class=\"o\">=</span> <span class=\"n\">nstart</span>\n <span class=\"n\">dx</span> <span class=\"o\">=</span> <span class=\"o\">-</span><span class=\"n\">dx</span>\n <span class=\"n\">stepx</span> <span class=\"o\">=</span> <span class=\"n\">dx</span><span class=\"o\">/</span><span class=\"n\">nstep</span>\n <span class=\"c1\"># Perform linear fit to find exact line that connects start and end</span>\n <span class=\"n\">xarr</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"n\">start</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">end</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">stepx</span><span class=\"o\">/</span><span class=\"mf\">2.0</span><span class=\"p\">,</span><span class=\"n\">stepx</span><span class=\"p\">)</span>\n <span class=\"n\">yarr</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">interp</span><span class=\"p\">(</span><span class=\"n\">xarr</span><span class=\"p\">,[</span><span class=\"n\">start</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span><span class=\"n\">end</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]],[</span><span class=\"n\">start</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span><span class=\"n\">end</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]])</span>\n\n <span class=\"c1\"># create grid of positions</span>\n <span class=\"k\">if</span> <span class=\"n\">mesh</span><span class=\"p\">:</span>\n <span class=\"n\">xa</span><span class=\"p\">,</span><span class=\"n\">ya</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">meshgrid</span><span class=\"p\">(</span><span class=\"n\">xarr</span><span class=\"p\">,</span><span class=\"n\">yarr</span><span class=\"p\">)</span>\n <span class=\"n\">xarr</span> <span class=\"o\">=</span> <span class=\"n\">xa</span><span class=\"o\">.</span><span class=\"n\">ravel</span><span class=\"p\">()</span>\n <span class=\"n\">yarr</span> <span class=\"o\">=</span> <span class=\"n\">ya</span><span class=\"o\">.</span><span class=\"n\">ravel</span><span class=\"p\">()</span>\n\n <span class=\"k\">return</span> <span class=\"n\">xarr</span><span class=\"p\">,</span><span class=\"n\">yarr</span></div>\n</pre></div>\n\n </div>\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"../../genindex.html\" title=\"General Index\"\n >index</a></li>\n <li class=\"right\" >\n <a href=\"../../py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"../../index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"../index.html\" >Module code</a> &#187;</li> \n </ul>\n </div>\n <div class=\"footer\" role=\"contentinfo\">\n &#169; Copyright 2017, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom, Mihai Cara.\n Created using <a href=\"http://sphinx-doc.org/\">Sphinx</a> 1.5.1.\n </div>\n </body>\n</html>" }, { "alpha_fraction": 0.7010124921798706, "alphanum_fraction": 0.707348108291626, "avg_line_length": 53.52561950683594, "blob_id": "c57f80e3a86ccba7ceff2bb95621be8fc5eaa1ac", "content_id": "957696c2fda4d46ef9bd586b021c88022b56f3f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 65976, "license_type": "no_license", "max_line_length": 179, "num_lines": 1210, "path": "/lib/drizzlepac/htmlhelp/genindex.html", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n \n <title>Index &#8212; DrizzlePac 2.1.16 (05-June-2017) documentation</title>\n \n <link rel=\"stylesheet\" href=\"_static/stsci_sphinx.css\" type=\"text/css\" />\n <link rel=\"stylesheet\" href=\"_static/pygments.css\" type=\"text/css\" />\n \n <script type=\"text/javascript\">\n var DOCUMENTATION_OPTIONS = {\n URL_ROOT: './',\n VERSION: '2.1.16 (05-June-2017)',\n COLLAPSE_INDEX: false,\n FILE_SUFFIX: '.html',\n HAS_SOURCE: true,\n SOURCELINK_SUFFIX: '.txt'\n };\n </script>\n <script type=\"text/javascript\" src=\"_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"#\" />\n <link rel=\"search\" title=\"Search\" href=\"search.html\" /> \n </head>\n <body role=\"document\">\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"#\" title=\"General Index\"\n accesskey=\"I\">index</a></li>\n <li class=\"right\" >\n <a href=\"py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li> \n </ul>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n <p class=\"logo\"><a href=\"index.html\">\n <img class=\"logo\" src=\"_static/stsci_logo.png\" alt=\"Logo\"/>\n </a></p>\n\n \n\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3>Quick search</h3>\n <form class=\"search\" action=\"search.html\" method=\"get\">\n <div><input type=\"text\" name=\"q\" /></div>\n <div><input type=\"submit\" value=\"Go\" /></div>\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n </form>\n</div>\n<script type=\"text/javascript\">$('#searchbox').show(0);</script>\n </div>\n </div>\n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n <div class=\"body\" role=\"main\">\n \n\n<h1 id=\"index\">Index</h1>\n\n<div class=\"genindex-jumpbox\">\n <a href=\"#A\"><strong>A</strong></a>\n | <a href=\"#B\"><strong>B</strong></a>\n | <a href=\"#C\"><strong>C</strong></a>\n | <a href=\"#D\"><strong>D</strong></a>\n | <a href=\"#E\"><strong>E</strong></a>\n | <a href=\"#F\"><strong>F</strong></a>\n | <a href=\"#G\"><strong>G</strong></a>\n | <a href=\"#H\"><strong>H</strong></a>\n | <a href=\"#I\"><strong>I</strong></a>\n | <a href=\"#L\"><strong>L</strong></a>\n | <a href=\"#M\"><strong>M</strong></a>\n | <a href=\"#N\"><strong>N</strong></a>\n | <a href=\"#O\"><strong>O</strong></a>\n | <a href=\"#P\"><strong>P</strong></a>\n | <a href=\"#R\"><strong>R</strong></a>\n | <a href=\"#S\"><strong>S</strong></a>\n | <a href=\"#T\"><strong>T</strong></a>\n | <a href=\"#U\"><strong>U</strong></a>\n | <a href=\"#V\"><strong>V</strong></a>\n | <a href=\"#W\"><strong>W</strong></a>\n | <a href=\"#X\"><strong>X</strong></a>\n \n</div>\n<h2 id=\"A\">A</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.ACSInputImage\">ACSInputImage (class in drizzlepac.acsData)</a>\n</li>\n <li><a href=\"outimage.html#drizzlepac.outputimage.OutputImage.addDrizKeywords\">addDrizKeywords() (drizzlepac.outputimage.OutputImage method)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.addIVMInputs\">addIVMInputs() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask.addMember\">addMember() (drizzlepac.staticMask.staticMask method)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.ProcSteps.addStep\">addStep() (drizzlepac.util.ProcSteps method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.append_not_matched_sources\">append_not_matched_sources() (drizzlepac.imgclasses.RefImage method)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.apply_exclusions\">apply_exclusions() (drizzlepac.catalogs.Catalog method)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.apply_fitlin\">apply_fitlin() (in module drizzlepac.wcs_functions)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.apply_flux_limits\">apply_flux_limits() (drizzlepac.catalogs.Catalog method)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.applyContextPar\">applyContextPar() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.applyUserPars_steps\">applyUserPars_steps() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.convertwcs.archive_prefix_OPUS_WCS\">archive_prefix_OPUS_WCS() (in module stwcs.wcsutil.convertwcs)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.archive_wcs_file\">archive_wcs_file() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n <li><a href=\"astrodrizzle.html#drizzlepac.astrodrizzle.AstroDrizzle\">AstroDrizzle() (in module drizzlepac.astrodrizzle)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.atfile_ivm\">atfile_ivm() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.atfile_sci\">atfile_sci() (in module drizzlepac.util)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"B\">B</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.WCSMap.backward\">backward() (drizzlepac.wcs_functions.WCSMap method)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.base_taskname\">base_taskname() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject\">baseImageObject (class in drizzlepac.imageObject)</a>\n</li>\n <li><a href=\"ablot.html#drizzlepac.ablot.blot\">blot() (in module drizzlepac.ablot)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.build_hstwcs\">build_hstwcs() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.build_pixel_transform\">build_pixel_transform() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.build_pos_grid\">build_pos_grid() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.build_xy_zeropoint\">build_xy_zeropoint() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.buildASNList\">buildASNList() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"ablot.html#drizzlepac.ablot.buildBlotParamDict\">buildBlotParamDict() (in module drizzlepac.ablot)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.buildCatalogs\">buildCatalogs() (drizzlepac.catalogs.Catalog method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.buildDefaultRefWCS\">buildDefaultRefWCS() (drizzlepac.imgclasses.Image method)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.buildDrizParamDict\">buildDrizParamDict() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.buildEmptyDRZ\">buildEmptyDRZ() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.buildERRmask\">buildERRmask() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.buildEXPmask\">buildEXPmask() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.buildFileList\">buildFileList() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.buildFileListOrig\">buildFileListOrig() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.buildIVMmask\">buildIVMmask() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.buildMask\">buildMask() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.buildMask\">(drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"static.html#drizzlepac.staticMask.buildSignatureKey\">buildSignatureKey() (in module drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.buildSkyCatalog\">buildSkyCatalog() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.RefCatalog.buildXY\">buildXY() (drizzlepac.catalogs.RefCatalog method)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"C\">C</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.calcNewEdges\">calcNewEdges() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog\">Catalog (class in drizzlepac.catalogs)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.CCDInputImage\">CCDInputImage (class in drizzlepac.stisData)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.changeSuffixinASN\">changeSuffixinASN() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.check_blank\">check_blank() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.checkDGEOFile\">checkDGEOFile() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.checkForDuplicateInputs\">checkForDuplicateInputs() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.checkMultipleFiles\">checkMultipleFiles() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.WCSMap.checkWCS\">checkWCS() (drizzlepac.wcs_functions.WCSMap method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.clean\">clean() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.clean\">(drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.clean\">(drizzlepac.imgclasses.RefImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"mdztab.html#drizzlepac.mdzhandler.cleanBlank\">cleanBlank() (in module drizzlepac.mdzhandler)</a>\n</li>\n <li><a href=\"mdztab.html#drizzlepac.mdzhandler.cleanInt\">cleanInt() (in module drizzlepac.mdzhandler)</a>\n</li>\n <li><a href=\"mdztab.html#drizzlepac.mdzhandler.cleanNaN\">cleanNaN() (in module drizzlepac.mdzhandler)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.clear_dirty_flag\">clear_dirty_flag() (drizzlepac.imgclasses.RefImage method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.close\">close() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.close\">(drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.close\">(drizzlepac.imgclasses.RefImage method)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask.close\">(drizzlepac.staticMask.staticMask method)</a>\n</li>\n </ul></li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.RefCatalog.COLNAMES\">COLNAMES (drizzlepac.catalogs.RefCatalog attribute)</a>\n\n <ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.UserCatalog.COLNAMES\">(drizzlepac.catalogs.UserCatalog attribute)</a>\n</li>\n </ul></li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.compute_fit_rms\">compute_fit_rms() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.compute_texptime\">compute_texptime() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.imageObject.compute_wcslin\">compute_wcslin() (drizzlepac.imageObject.imageObject method)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.computeEdgesCenter\">computeEdgesCenter() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.computeRange\">computeRange() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.constructFilename\">constructFilename() (in module drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.convertWCS\">convertWCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.count_sci_extensions\">count_sci_extensions() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.countImages\">countImages() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.create_CD\">create_CD() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.create_output\">create_output() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.convertwcs.create_prefix_OPUS_WCS\">create_prefix_OPUS_WCS() (in module stwcs.wcsutil.convertwcs)</a>\n</li>\n <li><a href=\"updatehdr.html#drizzlepac.updatehdr.create_unique_wcsname\">create_unique_wcsname() (in module drizzlepac.updatehdr)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.create_wcscorr\">create_wcscorr() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n <li><a href=\"drizcr.html#drizzlepac.drizCR.createCorrFile\">createCorrFile() (in module drizzlepac.drizCR)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.createFile\">createFile() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NIC2InputImage.createHoleMask\">createHoleMask() (drizzlepac.nicmosData.NIC2InputImage method)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.createImageObjectList\">createImageObjectList() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.createMask\">createMask() (in module drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"median.html#drizzlepac.createMedian.createMedian\">createMedian() (in module drizzlepac.createMedian)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.createStaticMask\">createStaticMask() (in module drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.createWcsHDU\">createWcsHDU() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.createWCSObject\">createWCSObject() (in module drizzlepac.wcs_functions)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"D\">D</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.ddtohms\">ddtohms() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.delete_wcscorr_row\">delete_wcscorr_row() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask.deleteMask\">deleteMask() (drizzlepac.staticMask.staticMask method)</a>\n</li>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.determine_extnum\">determine_extnum() (in module drizzlepac.tweakback)</a>\n</li>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.determine_orig_wcsname\">determine_orig_wcsname() (in module drizzlepac.tweakback)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.displayBadRefimageWarningBox\">displayBadRefimageWarningBox() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.displayEmptyInputWarningBox\">displayEmptyInputWarningBox() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.displayMakewcsWarningBox\">displayMakewcsWarningBox() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"ablot.html#drizzlepac.ablot.do_blot\">do_blot() (in module drizzlepac.ablot)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.do_driz\">do_driz() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.ACSInputImage.doUnitConversions\">doUnitConversions() (drizzlepac.acsData.ACSInputImage method)</a>\n\n <ul>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage.doUnitConversions\">(drizzlepac.nicmosData.NICMOSInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.FUVInputImage.doUnitConversions\">(drizzlepac.stisData.FUVInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.NUVInputImage.doUnitConversions\">(drizzlepac.stisData.NUVInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.STISInputImage.doUnitConversions\">(drizzlepac.stisData.STISInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3IRInputImage.doUnitConversions\">(drizzlepac.wfc3Data.WFC3IRInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3UVISInputImage.doUnitConversions\">(drizzlepac.wfc3Data.WFC3UVISInputImage method)</a>\n</li>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.doUnitConversions\">(drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"drizcr.html#drizzlepac.drizCR.drizCR\">drizCR() (in module drizzlepac.drizCR)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.drizFinal\">drizFinal() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.drizSeparate\">drizSeparate() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.drizzle\">drizzle() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"ablot.html#module-drizzlepac.ablot\">drizzlepac.ablot (module)</a>\n</li>\n <li><a href=\"acsobjects.html#module-drizzlepac.acsData\">drizzlepac.acsData (module)</a>\n</li>\n <li><a href=\"adrizzle.html#module-drizzlepac.adrizzle\">drizzlepac.adrizzle (module)</a>\n</li>\n <li><a href=\"astrodrizzle.html#module-drizzlepac.astrodrizzle\">drizzlepac.astrodrizzle (module)</a>\n</li>\n <li><a href=\"catalogs.html#module-drizzlepac.catalogs\">drizzlepac.catalogs (module)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"median.html#module-drizzlepac.createMedian\">drizzlepac.createMedian (module)</a>\n</li>\n <li><a href=\"drizcr.html#module-drizzlepac.drizCR\">drizzlepac.drizCR (module)</a>\n</li>\n <li><a href=\"imagefindpars.html#module-drizzlepac.imagefindpars\">drizzlepac.imagefindpars (module)</a>\n</li>\n <li><a href=\"baseobjects.html#module-drizzlepac.imageObject\">drizzlepac.imageObject (module)</a>\n</li>\n <li><a href=\"image.html#module-drizzlepac.imgclasses\">drizzlepac.imgclasses (module)</a>\n</li>\n <li><a href=\"mapreg.html#module-drizzlepac.mapreg\">drizzlepac.mapreg (module)</a>\n</li>\n <li><a href=\"mdztab.html#module-drizzlepac.mdzhandler\">drizzlepac.mdzhandler (module)</a>\n</li>\n <li><a href=\"nicmosobjects.html#module-drizzlepac.nicmosData\">drizzlepac.nicmosData (module)</a>\n</li>\n <li><a href=\"outimage.html#module-drizzlepac.outputimage\">drizzlepac.outputimage (module)</a>\n</li>\n <li><a href=\"photeq.html#module-drizzlepac.photeq\">drizzlepac.photeq (module)</a>\n</li>\n <li><a href=\"pixtopix.html#module-drizzlepac.pixtopix\">drizzlepac.pixtopix (module)</a>\n</li>\n <li><a href=\"pixtosky.html#module-drizzlepac.pixtosky\">drizzlepac.pixtosky (module)</a>\n</li>\n <li><a href=\"process.html#module-drizzlepac.processInput\">drizzlepac.processInput (module)</a>\n</li>\n <li><a href=\"refimagefindpars.html#module-drizzlepac.refimagefindpars\">drizzlepac.refimagefindpars (module)</a>\n</li>\n <li><a href=\"process.html#module-drizzlepac.resetbits\">drizzlepac.resetbits (module)</a>\n</li>\n <li><a href=\"sky.html#module-drizzlepac.sky\">drizzlepac.sky (module)</a>\n</li>\n <li><a href=\"skytopix.html#module-drizzlepac.skytopix\">drizzlepac.skytopix (module)</a>\n</li>\n <li><a href=\"static.html#module-drizzlepac.staticMask\">drizzlepac.staticMask (module)</a>\n</li>\n <li><a href=\"stisobjects.html#module-drizzlepac.stisData\">drizzlepac.stisData (module)</a>\n</li>\n <li><a href=\"tweakback.html#module-drizzlepac.tweakback\">drizzlepac.tweakback (module)</a>\n</li>\n <li><a href=\"tweakreg.html#module-drizzlepac.tweakreg\">drizzlepac.tweakreg (module)</a>\n</li>\n <li><a href=\"tweakutils.html#module-drizzlepac.tweakutils\">drizzlepac.tweakutils (module)</a>\n</li>\n <li><a href=\"updatehdr.html#module-drizzlepac.updatehdr\">drizzlepac.updatehdr (module)</a>\n</li>\n <li><a href=\"updatenpol.html#module-drizzlepac.updatenpol\">drizzlepac.updatenpol (module)</a>\n</li>\n <li><a href=\"utilfuncs.html#module-drizzlepac.util\">drizzlepac.util (module)</a>\n</li>\n <li><a href=\"wcsutils.html#module-drizzlepac.wcs_functions\">drizzlepac.wcs_functions (module)</a>\n</li>\n <li><a href=\"wfc3objects.html#module-drizzlepac.wfc3Data\">drizzlepac.wfc3Data (module)</a>\n</li>\n <li><a href=\"wfpc2objects.html#module-drizzlepac.wfpc2Data\">drizzlepac.wfpc2Data (module)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"E\">E</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.end_logging\">end_logging() (in module drizzlepac.util)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.ProcSteps.endStep\">endStep() (drizzlepac.util.ProcSteps method)</a>\n</li>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.extract_input_filenames\">extract_input_filenames() (in module drizzlepac.tweakback)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"F\">F</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"updatenpol.html#drizzlepac.updatenpol.find_d2ifile\">find_d2ifile() (in module drizzlepac.updatenpol)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.find_DQ_extension\">find_DQ_extension() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.find_DQ_extension\">(drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"outimage.html#drizzlepac.outputimage.OutputImage.find_kwupdate_location\">find_kwupdate_location() (drizzlepac.outputimage.OutputImage method)</a>\n</li>\n <li><a href=\"updatenpol.html#drizzlepac.updatenpol.find_npolfile\">find_npolfile() (in module drizzlepac.updatenpol)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.find_wcscorr_row\">find_wcscorr_row() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.find_xy_peak\">find_xy_peak() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.findExtNum\">findExtNum() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"mdztab.html#drizzlepac.mdzhandler.findFormat\">findFormat() (in module drizzlepac.mdzhandler)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.findrootname\">findrootname() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.findWCSExtn\">findWCSExtn() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.fitlin\">fitlin() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.fitlin_clipped\">fitlin_clipped() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.fitlin_rscale\">fitlin_rscale() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.IdentityMap.forward\">forward() (drizzlepac.wcs_functions.IdentityMap method)</a>\n\n <ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.LinearMap.forward\">(drizzlepac.wcs_functions.LinearMap method)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.WCSMap.forward\">(drizzlepac.wcs_functions.WCSMap method)</a>\n</li>\n </ul></li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.FUVInputImage\">FUVInputImage (class in drizzlepac.stisData)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"G\">G</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.gauss\">gauss() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.gauss_array\">gauss_array() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.generateCatalog\">generateCatalog() (in module drizzlepac.catalogs)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.generateRaDec\">generateRaDec() (drizzlepac.catalogs.Catalog method)</a>\n\n <ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.RefCatalog.generateRaDec\">(drizzlepac.catalogs.RefCatalog method)</a>\n</li>\n </ul></li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.generateXY\">generateXY() (drizzlepac.catalogs.Catalog method)</a>\n\n <ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.ImageCatalog.generateXY\">(drizzlepac.catalogs.ImageCatalog method)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.RefCatalog.generateXY\">(drizzlepac.catalogs.RefCatalog method)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.UserCatalog.generateXY\">(drizzlepac.catalogs.UserCatalog method)</a>\n</li>\n </ul></li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.get_data\">get_data() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.get_detnum\">get_detnum() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.get_expstart\">get_expstart() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.get_hstwcs\">get_hstwcs() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.WCSMap.get_pix_ratio\">get_pix_ratio() (drizzlepac.wcs_functions.WCSMap method)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.get_pix_ratio_from_WCS\">get_pix_ratio_from_WCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.get_pool_size\">get_pool_size() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.get_shiftfile_row\">get_shiftfile_row() (drizzlepac.imgclasses.Image method)</a>\n\n <ul>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.get_shiftfile_row\">(drizzlepac.imgclasses.RefImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.get_wcs\">get_wcs() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.get_xy_catnames\">get_xy_catnames() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getAllData\">getAllData() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.getConfigObjPar\">getConfigObjPar() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.ACSInputImage.getdarkcurrent\">getdarkcurrent() (drizzlepac.acsData.ACSInputImage method)</a>\n\n <ul>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getdarkcurrent\">(drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage.getdarkcurrent\">(drizzlepac.nicmosData.NICMOSInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.CCDInputImage.getdarkcurrent\">(drizzlepac.stisData.CCDInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.FUVInputImage.getdarkcurrent\">(drizzlepac.stisData.FUVInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.NUVInputImage.getdarkcurrent\">(drizzlepac.stisData.NUVInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3IRInputImage.getdarkcurrent\">(drizzlepac.wfc3Data.WFC3IRInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3UVISInputImage.getdarkcurrent\">(drizzlepac.wfc3Data.WFC3UVISInputImage method)</a>\n</li>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.getdarkcurrent\">(drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getdarkimg\">getdarkimg() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage.getdarkimg\">(drizzlepac.nicmosData.NICMOSInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3IRInputImage.getdarkimg\">(drizzlepac.wfc3Data.WFC3IRInputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getData\">getData() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.getDefaultConfigObj\">getDefaultConfigObj() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.getEffGain\">getEffGain() (drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getexptimeimg\">getexptimeimg() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage.getexptimeimg\">(drizzlepac.nicmosData.NICMOSInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3IRInputImage.getexptimeimg\">(drizzlepac.wfc3Data.WFC3IRInputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getExtensions\">getExtensions() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask.getFilename\">getFilename() (drizzlepac.staticMask.staticMask method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getflat\">getflat() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage.getflat\">(drizzlepac.nicmosData.NICMOSInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.STISInputImage.getflat\">(drizzlepac.stisData.STISInputImage method)</a>\n</li>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.getflat\">(drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.getFullParList\">getFullParList() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getGain\">getGain() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getHeader\">getHeader() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"ablot.html#drizzlepac.ablot.getHelpAsString\">getHelpAsString() (in module drizzlepac.ablot)</a>\n\n <ul>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.getHelpAsString\">(in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"median.html#drizzlepac.createMedian.getHelpAsString\">(in module drizzlepac.createMedian)</a>\n</li>\n <li><a href=\"drizcr.html#drizzlepac.drizCR.getHelpAsString\">(in module drizzlepac.drizCR)</a>\n</li>\n <li><a href=\"imagefindpars.html#drizzlepac.imagefindpars.getHelpAsString\">(in module drizzlepac.imagefindpars)</a>\n</li>\n <li><a href=\"refimagefindpars.html#drizzlepac.refimagefindpars.getHelpAsString\">(in module drizzlepac.refimagefindpars)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.resetbits.getHelpAsString\">(in module drizzlepac.resetbits)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.getHelpAsString\">(in module drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.getHelpAsString\">(in module drizzlepac.tweakback)</a>\n</li>\n <li><a href=\"updatenpol.html#drizzlepac.updatenpol.getHelpAsString\">(in module drizzlepac.updatenpol)</a>\n</li>\n </ul></li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getInstrParameter\">getInstrParameter() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getKeywordList\">getKeywordList() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask.getMaskArray\">getMaskArray() (drizzlepac.staticMask.staticMask method)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask.getMaskname\">getMaskname() (drizzlepac.staticMask.staticMask method)</a>\n</li>\n <li><a href=\"mdztab.html#drizzlepac.mdzhandler.getMdriztabParameters\">getMdriztabParameters() (in module drizzlepac.mdzhandler)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.getMdriztabPars\">getMdriztabPars() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getNumpyType\">getNumpyType() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getOutputName\">getOutputName() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.CCDInputImage.getReadNoise\">getReadNoise() (drizzlepac.stisData.CCDInputImage method)</a>\n\n <ul>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.getReadNoise\">(drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getReadNoiseImage\">getReadNoiseImage() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.getRotatedSize\">getRotatedSize() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.getSectionName\">getSectionName() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.getskyimg\">getskyimg() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3IRInputImage.getskyimg\">(drizzlepac.wfc3Data.WFC3IRInputImage method)</a>\n</li>\n </ul></li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"H\">H</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"ablot.html#drizzlepac.ablot.help\">help() (in module drizzlepac.ablot)</a>\n\n <ul>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.help\">(in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"median.html#drizzlepac.createMedian.help\">(in module drizzlepac.createMedian)</a>\n</li>\n <li><a href=\"drizcr.html#drizzlepac.drizCR.help\">(in module drizzlepac.drizCR)</a>\n</li>\n <li><a href=\"imagefindpars.html#drizzlepac.imagefindpars.help\">(in module drizzlepac.imagefindpars)</a>\n</li>\n <li><a href=\"mapreg.html#drizzlepac.mapreg.help\">(in module drizzlepac.mapreg)</a>\n</li>\n <li><a href=\"photeq.html#drizzlepac.photeq.help\">(in module drizzlepac.photeq)</a>\n</li>\n <li><a href=\"refimagefindpars.html#drizzlepac.refimagefindpars.help\">(in module drizzlepac.refimagefindpars)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.resetbits.help\">(in module drizzlepac.resetbits)</a>\n</li>\n <li><a href=\"sky.html#drizzlepac.sky.help\">(in module drizzlepac.sky)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.help\">(in module drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.help\">(in module drizzlepac.tweakback)</a>\n</li>\n <li><a href=\"tweakreg.html#drizzlepac.tweakreg.help\">(in module drizzlepac.tweakreg)</a>\n</li>\n <li><a href=\"updatenpol.html#drizzlepac.updatenpol.help\">(in module drizzlepac.updatenpol)</a>\n</li>\n </ul></li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.HRCInputImage\">HRCInputImage (class in drizzlepac.acsData)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"I\">I</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.IdentityMap\">IdentityMap (class in drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image\">Image (class in drizzlepac.imgclasses)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.ImageCatalog\">ImageCatalog (class in drizzlepac.catalogs)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.imageObject\">imageObject (class in drizzlepac.imageObject)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.RefCatalog.IN_UNITS\">IN_UNITS (drizzlepac.catalogs.RefCatalog attribute)</a>\n\n <ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.UserCatalog.IN_UNITS\">(drizzlepac.catalogs.UserCatalog attribute)</a>\n</li>\n </ul></li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.info\">info() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.init_logging\">init_logging() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.init_wcscorr\">init_wcscorr() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.interpret_maskval\">interpret_maskval() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.is_blank\">is_blank() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.isASNTable\">isASNTable() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.isCommaList\">isCommaList() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage.isCountRate\">isCountRate() (drizzlepac.nicmosData.NICMOSInputImage method)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.isfloat\">isfloat() (in module drizzlepac.tweakutils)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"L\">L</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.linearize\">linearize() (in module drizzlepac.tweakback)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.LinearMap\">LinearMap (class in drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.loadFileList\">loadFileList() (in module drizzlepac.util)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"M\">M</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.make_outputwcs\">make_outputwcs() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.make_perfect_cd\">make_perfect_cd() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.make_vector_plot\">make_vector_plot() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.manageInputCopies\">manageInputCopies() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"mapreg.html#drizzlepac.mapreg.map_region_files\">map_region_files() (in module drizzlepac.mapreg)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"mapreg.html#drizzlepac.mapreg.MapReg\">MapReg() (in module drizzlepac.mapreg)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.match\">match() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"median.html#drizzlepac.createMedian.median\">median() (in module drizzlepac.createMedian)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.mergeDQarray\">mergeDQarray() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.mergeWCS\">mergeWCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"N\">N</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.ndfind_old\">ndfind_old() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NIC1InputImage\">NIC1InputImage (class in drizzlepac.nicmosData)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NIC2InputImage\">NIC2InputImage (class in drizzlepac.nicmosData)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NIC3InputImage\">NIC3InputImage (class in drizzlepac.nicmosData)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage\">NICMOSInputImage (class in drizzlepac.nicmosData)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.NUVInputImage\">NUVInputImage (class in drizzlepac.stisData)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"O\">O</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.openFile\">openFile() (drizzlepac.imgclasses.Image method)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"outimage.html#drizzlepac.outputimage.OutputImage\">OutputImage (class in drizzlepac.outputimage)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"P\">P</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.PAR_PREFIX\">PAR_PREFIX (drizzlepac.catalogs.Catalog attribute)</a>\n\n <ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.RefCatalog.PAR_PREFIX\">(drizzlepac.catalogs.RefCatalog attribute)</a>\n</li>\n </ul></li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.parse_atfile_cat\">parse_atfile_cat() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.parse_colname\">parse_colname() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.parse_colnames\">parse_colnames() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.parse_exclusions\">parse_exclusions() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.parse_skypos\">parse_skypos() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.performFit\">performFit() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"photeq.html#drizzlepac.photeq.photeq\">photeq() (in module drizzlepac.photeq)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.plot_zeropoint\">plot_zeropoint() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.plotXYCatalog\">plotXYCatalog() (drizzlepac.catalogs.Catalog method)</a>\n\n <ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.UserCatalog.plotXYCatalog\">(drizzlepac.catalogs.UserCatalog method)</a>\n</li>\n </ul></li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.print_pkg_versions\">print_pkg_versions() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.printParams\">printParams() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.process_input\">process_input() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.processFilenames\">processFilenames() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.ProcSteps\">ProcSteps (class in drizzlepac.util)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.putData\">putData() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"R\">R</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.radec_hmstodd\">radec_hmstodd() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.WCSMap.rd2xy\">rd2xy() (drizzlepac.wcs_functions.WCSMap method)</a>\n\n <ul>\n <li><a href=\"skytopix.html#drizzlepac.skytopix.rd2xy\">(in module drizzlepac.skytopix)</a>\n</li>\n </ul></li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.read_ASCII_cols\">read_ASCII_cols() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.read_FITS_cols\">read_FITS_cols() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.readAltWCS\">readAltWCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.readcols\">readcols() (in module drizzlepac.tweakutils)</a>\n\n <ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.readcols\">(in module drizzlepac.util)</a>\n</li>\n </ul></li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.readCommaList\">readCommaList() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.RefCatalog\">RefCatalog (class in drizzlepac.catalogs)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage\">RefImage (class in drizzlepac.imgclasses)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.removeAllAltWCS\">removeAllAltWCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.removeFileSafely\">removeFileSafely() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.reportResourceUsage\">reportResourceUsage() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.ProcSteps.reportTimes\">reportTimes() (drizzlepac.util.ProcSteps method)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.resetbits.reset_dq_bits\">reset_dq_bits() (in module drizzlepac.resetbits)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.resetDQBits\">resetDQBits() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.restore_file_from_wcscorr\">restore_file_from_wcscorr() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.WCSObject.restore_wcs\">restore_wcs() (drizzlepac.imageObject.WCSObject method)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.restoreDefaultWCS\">restoreDefaultWCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.returnAllChips\">returnAllChips() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"ablot.html#drizzlepac.ablot.run\">run() (in module drizzlepac.ablot)</a>\n\n <ul>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.run\">(in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"median.html#drizzlepac.createMedian.run\">(in module drizzlepac.createMedian)</a>\n</li>\n <li><a href=\"drizcr.html#drizzlepac.drizCR.run\">(in module drizzlepac.drizCR)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.resetbits.run\">(in module drizzlepac.resetbits)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.run\">(in module drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.run\">(in module drizzlepac.tweakback)</a>\n</li>\n <li><a href=\"updatenpol.html#drizzlepac.updatenpol.run\">(in module drizzlepac.updatenpol)</a>\n</li>\n </ul></li>\n <li><a href=\"ablot.html#drizzlepac.ablot.run_blot\">run_blot() (in module drizzlepac.ablot)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.run_driz\">run_driz() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.run_driz_chip\">run_driz_chip() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.run_driz_img\">run_driz_img() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"ablot.html#drizzlepac.ablot.runBlot\">runBlot() (in module drizzlepac.ablot)</a>\n</li>\n <li><a href=\"drizcr.html#drizzlepac.drizCR.rundrizCR\">rundrizCR() (in module drizzlepac.drizCR)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.runmakewcs\">runmakewcs() (in module drizzlepac.processInput)</a>\n\n <ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.runmakewcs\">(in module drizzlepac.util)</a>\n</li>\n </ul></li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"S\">S</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask.saveToFile\">saveToFile() (drizzlepac.staticMask.staticMask method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.saveVirtualOutputs\">saveVirtualOutputs() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.SBCInputImage\">SBCInputImage (class in drizzlepac.acsData)</a>\n</li>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.ACSInputImage.SEPARATOR\">SEPARATOR (drizzlepac.acsData.ACSInputImage attribute)</a>\n\n <ul>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NICMOSInputImage.SEPARATOR\">(drizzlepac.nicmosData.NICMOSInputImage attribute)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.STISInputImage.SEPARATOR\">(drizzlepac.stisData.STISInputImage attribute)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3InputImage.SEPARATOR\">(drizzlepac.wfc3Data.WFC3InputImage attribute)</a>\n</li>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.SEPARATOR\">(drizzlepac.wfpc2Data.WFPC2InputImage attribute)</a>\n</li>\n </ul></li>\n <li><a href=\"outimage.html#drizzlepac.outputimage.OutputImage.set_bunit\">set_bunit() (drizzlepac.outputimage.OutputImage method)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.set_colnames\">set_colnames() (drizzlepac.catalogs.Catalog method)</a>\n\n <ul>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.UserCatalog.set_colnames\">(drizzlepac.catalogs.UserCatalog method)</a>\n</li>\n </ul></li>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.set_dirty\">set_dirty() (drizzlepac.imgclasses.RefImage method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.set_mt_wcs\">set_mt_wcs() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.set_units\">set_units() (drizzlepac.imageObject.baseImageObject method)</a>\n\n <ul>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.imageObject.set_units\">(drizzlepac.imageObject.imageObject method)</a>\n</li>\n <li><a href=\"outimage.html#drizzlepac.outputimage.OutputImage.set_units\">(drizzlepac.outputimage.OutputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.set_wtscl\">set_wtscl() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.setCommonInput\">setCommonInput() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"drizcr.html#drizzlepac.drizCR.setDefaults\">setDefaults() (in module drizzlepac.drizCR)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.HRCInputImage.setInstrumentParameters\">setInstrumentParameters() (drizzlepac.acsData.HRCInputImage method)</a>\n\n <ul>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.SBCInputImage.setInstrumentParameters\">(drizzlepac.acsData.SBCInputImage method)</a>\n</li>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.WFCInputImage.setInstrumentParameters\">(drizzlepac.acsData.WFCInputImage method)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.imageObject.setInstrumentParameters\">(drizzlepac.imageObject.imageObject method)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NIC1InputImage.setInstrumentParameters\">(drizzlepac.nicmosData.NIC1InputImage method)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NIC2InputImage.setInstrumentParameters\">(drizzlepac.nicmosData.NIC2InputImage method)</a>\n</li>\n <li><a href=\"nicmosobjects.html#drizzlepac.nicmosData.NIC3InputImage.setInstrumentParameters\">(drizzlepac.nicmosData.NIC3InputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.CCDInputImage.setInstrumentParameters\">(drizzlepac.stisData.CCDInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.FUVInputImage.setInstrumentParameters\">(drizzlepac.stisData.FUVInputImage method)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.NUVInputImage.setInstrumentParameters\">(drizzlepac.stisData.NUVInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3IRInputImage.setInstrumentParameters\">(drizzlepac.wfc3Data.WFC3IRInputImage method)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3UVISInputImage.setInstrumentParameters\">(drizzlepac.wfc3Data.WFC3UVISInputImage method)</a>\n</li>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage.setInstrumentParameters\">(drizzlepac.wfpc2Data.WFPC2InputImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"sky.html#drizzlepac.sky.sky\">sky() (in module drizzlepac.sky)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.sortSkyCatalog\">sortSkyCatalog() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"static.html#drizzlepac.staticMask.staticMask\">staticMask (class in drizzlepac.staticMask)</a>\n</li>\n <li><a href=\"stisobjects.html#drizzlepac.stisData.STISInputImage\">STISInputImage (class in drizzlepac.stisData)</a>\n</li>\n <li><a href=\"wcscorr.html#module-stwcs.wcsutil.convertwcs\">stwcs.wcsutil.convertwcs (module)</a>\n</li>\n <li><a href=\"wcscorr.html#module-stwcs.wcsutil.wcscorr\">stwcs.wcsutil.wcscorr (module)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"T\">T</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"mdztab.html#drizzlepac.mdzhandler.toBoolean\">toBoolean() (in module drizzlepac.mdzhandler)</a>\n</li>\n <li><a href=\"pixtopix.html#drizzlepac.pixtopix.tran\">tran() (in module drizzlepac.pixtopix)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.transformToRef\">transformToRef() (drizzlepac.imgclasses.Image method)</a>\n\n <ul>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.transformToRef\">(drizzlepac.imgclasses.RefImage method)</a>\n</li>\n </ul></li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.tweakback\">tweakback() (in module drizzlepac.tweakback)</a>, <a href=\"tweakback.html#drizzlepac.tweakback.tweakback\">[1]</a>\n</li>\n <li><a href=\"tweakreg.html#drizzlepac.tweakreg.TweakReg\">TweakReg() (in module drizzlepac.tweakreg)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"U\">U</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"updatenpol.html#drizzlepac.updatenpol.update\">update() (in module drizzlepac.updatenpol)</a>\n</li>\n <li><a href=\"tweakback.html#drizzlepac.tweakback.update_chip_wcs\">update_chip_wcs() (in module drizzlepac.tweakback)</a>\n</li>\n <li><a href=\"updatehdr.html#drizzlepac.updatehdr.update_from_shiftfile\">update_from_shiftfile() (in module drizzlepac.updatehdr)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.update_input\">update_input() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.update_linCD\">update_linCD() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.update_member_names\">update_member_names() (in module drizzlepac.processInput)</a>\n</li>\n <li><a href=\"updatehdr.html#drizzlepac.updatehdr.update_wcs\">update_wcs() (in module drizzlepac.updatehdr)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.update_wcscorr\">update_wcscorr() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n <li><a href=\"wcscorr.html#stwcs.wcsutil.wcscorr.update_wcscorr_column\">update_wcscorr_column() (in module stwcs.wcsutil.wcscorr)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.updateContextImage\">updateContextImage() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.updateData\">updateData() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.updateHeader\">updateHeader() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.updateImageWCS\">updateImageWCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"adrizzle.html#drizzlepac.adrizzle.updateInputDQArray\">updateInputDQArray() (in module drizzlepac.adrizzle)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.updateIVMName\">updateIVMName() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.updateNEXTENDKw\">updateNEXTENDKw() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.baseImageObject.updateOutputValues\">updateOutputValues() (drizzlepac.imageObject.baseImageObject method)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.updateWCS\">updateWCS() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"updatehdr.html#drizzlepac.updatehdr.updatewcs_with_shift\">updatewcs_with_shift() (in module drizzlepac.updatehdr)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.UserCatalog\">UserCatalog (class in drizzlepac.catalogs)</a>\n</li>\n <li><a href=\"process.html#drizzlepac.processInput.userStop\">userStop() (in module drizzlepac.processInput)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"V\">V</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.validateUserPars\">validateUserPars() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.verifyFilePermissions\">verifyFilePermissions() (in module drizzlepac.util)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.verifyRefimage\">verifyRefimage() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.verifyUniqueWcsname\">verifyUniqueWcsname() (in module drizzlepac.util)</a>\n</li>\n <li><a href=\"utilfuncs.html#drizzlepac.util.verifyUpdatewcs\">verifyUpdatewcs() (in module drizzlepac.util)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"W\">W</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.wcsfit\">wcsfit() (in module drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.WCSMap\">WCSMap (class in drizzlepac.wcs_functions)</a>\n</li>\n <li><a href=\"baseobjects.html#drizzlepac.imageObject.WCSObject\">WCSObject (class in drizzlepac.imageObject)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3InputImage\">WFC3InputImage (class in drizzlepac.wfc3Data)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3IRInputImage\">WFC3IRInputImage (class in drizzlepac.wfc3Data)</a>\n</li>\n <li><a href=\"wfc3objects.html#drizzlepac.wfc3Data.WFC3UVISInputImage\">WFC3UVISInputImage (class in drizzlepac.wfc3Data)</a>\n</li>\n <li><a href=\"acsobjects.html#drizzlepac.acsData.WFCInputImage\">WFCInputImage (class in drizzlepac.acsData)</a>\n</li>\n <li><a href=\"wfpc2objects.html#drizzlepac.wfpc2Data.WFPC2InputImage\">WFPC2InputImage (class in drizzlepac.wfpc2Data)</a>\n</li>\n </ul></td>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"utilfuncs.html#drizzlepac.util.WithLogging\">WithLogging (class in drizzlepac.util)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.write_fit_catalog\">write_fit_catalog() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.write_outxy\">write_outxy() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"tweakutils.html#drizzlepac.tweakutils.write_shiftfile\">write_shiftfile() (in module drizzlepac.tweakutils)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.write_skycatalog\">write_skycatalog() (drizzlepac.imgclasses.Image method)</a>\n\n <ul>\n <li><a href=\"image.html#drizzlepac.imgclasses.RefImage.write_skycatalog\">(drizzlepac.imgclasses.RefImage method)</a>\n</li>\n </ul></li>\n <li><a href=\"outimage.html#drizzlepac.outputimage.OutputImage.writeFITS\">writeFITS() (drizzlepac.outputimage.OutputImage method)</a>\n</li>\n <li><a href=\"image.html#drizzlepac.imgclasses.Image.writeHeaderlet\">writeHeaderlet() (drizzlepac.imgclasses.Image method)</a>\n</li>\n <li><a href=\"catalogs.html#drizzlepac.catalogs.Catalog.writeXYCatalog\">writeXYCatalog() (drizzlepac.catalogs.Catalog method)</a>\n</li>\n </ul></td>\n</tr></table>\n\n<h2 id=\"X\">X</h2>\n<table style=\"width: 100%\" class=\"indextable genindextable\"><tr>\n <td style=\"width: 33%; vertical-align: top;\"><ul>\n <li><a href=\"wcsutils.html#drizzlepac.wcs_functions.WCSMap.xy2rd\">xy2rd() (drizzlepac.wcs_functions.WCSMap method)</a>\n\n <ul>\n <li><a href=\"pixtosky.html#drizzlepac.pixtosky.xy2rd\">(in module drizzlepac.pixtosky)</a>\n</li>\n </ul></li>\n </ul></td>\n</tr></table>\n\n\n\n </div>\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"#\" title=\"General Index\"\n >index</a></li>\n <li class=\"right\" >\n <a href=\"py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li> \n </ul>\n </div>\n <div class=\"footer\" role=\"contentinfo\">\n &#169; Copyright 2017, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom, Mihai Cara.\n Created using <a href=\"http://sphinx-doc.org/\">Sphinx</a> 1.5.1.\n </div>\n </body>\n</html>" }, { "alpha_fraction": 0.6465517282485962, "alphanum_fraction": 0.6465517282485962, "avg_line_length": 22.200000762939453, "blob_id": "cbb7668beab33fd64a7ce68282090fc753d84cf8", "content_id": "53507ddc855162e4f70937b804d8c61d57dadd59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/scripts/updatenpol", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nif __name__ == '__main__':\n import drizzlepac.updatenpol\n drizzlepac.updatenpol.main()\n" }, { "alpha_fraction": 0.640350878238678, "alphanum_fraction": 0.640350878238678, "avg_line_length": 21.799999237060547, "blob_id": "32695013fbc0b587cc444b4dcbfe5b878ab7ffb3", "content_id": "3f65b4f55277528b75d35054228aa7781b4188eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/scripts/resetbits", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nif __name__ == '__main__':\n import drizzlepac.resetbits\n drizzlepac.resetbits.main()\n" }, { "alpha_fraction": 0.6132075190544128, "alphanum_fraction": 0.6132075190544128, "avg_line_length": 20.200000762939453, "blob_id": "75b1a859d6ed56cc61973e4f02bae48d2f7545e8", "content_id": "cb6be6f70e31db3cc3fed54f8dc19a57e6af79f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/scripts/mdriz", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nif __name__ == '__main__':\n import drizzlepac.mdriz\n drizzlepac.mdriz.main()\n" }, { "alpha_fraction": 0.7188833951950073, "alphanum_fraction": 0.7225425243377686, "avg_line_length": 67.19470977783203, "blob_id": "35aca8be20bb9746afb7a074f606579e103f6b00", "content_id": "fffa0ed3aae479132f02e20e5080041eafb6e9b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 36116, "license_type": "no_license", "max_line_length": 503, "num_lines": 529, "path": "/lib/drizzlepac/htmlhelp/baseobjects.html", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n \n <title>Base ImageObject Classes &#8212; DrizzlePac 2.1.16 (05-June-2017) documentation</title>\n \n <link rel=\"stylesheet\" href=\"_static/stsci_sphinx.css\" type=\"text/css\" />\n <link rel=\"stylesheet\" href=\"_static/pygments.css\" type=\"text/css\" />\n \n <script type=\"text/javascript\">\n var DOCUMENTATION_OPTIONS = {\n URL_ROOT: './',\n VERSION: '2.1.16 (05-June-2017)',\n COLLAPSE_INDEX: false,\n FILE_SUFFIX: '.html',\n HAS_SOURCE: true,\n SOURCELINK_SUFFIX: '.txt'\n };\n </script>\n <script type=\"text/javascript\" src=\"_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"search.html\" />\n <link rel=\"next\" title=\"ACS ImageObjects\" href=\"acsobjects.html\" />\n <link rel=\"prev\" title=\"imageObject Classes\" href=\"imageobject.html\" /> \n </head>\n <body role=\"document\">\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"genindex.html\" title=\"General Index\"\n accesskey=\"I\">index</a></li>\n <li class=\"right\" >\n <a href=\"py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"right\" >\n <a href=\"acsobjects.html\" title=\"ACS ImageObjects\"\n accesskey=\"N\">next</a> |</li>\n <li class=\"right\" >\n <a href=\"imageobject.html\" title=\"imageObject Classes\"\n accesskey=\"P\">previous</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"imageobject.html\" accesskey=\"U\">imageObject Classes</a> &#187;</li> \n </ul>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n <p class=\"logo\"><a href=\"index.html\">\n <img class=\"logo\" src=\"_static/stsci_logo.png\" alt=\"Logo\"/>\n </a></p>\n <h4>Previous topic</h4>\n <p class=\"topless\"><a href=\"imageobject.html\"\n title=\"previous chapter\">imageObject Classes</a></p>\n <h4>Next topic</h4>\n <p class=\"topless\"><a href=\"acsobjects.html\"\n title=\"next chapter\">ACS ImageObjects</a></p>\n <div role=\"note\" aria-label=\"source link\">\n <h3>This Page</h3>\n <ul class=\"this-page-menu\">\n <li><a href=\"_sources/baseobjects.rst.txt\"\n rel=\"nofollow\">Show Source</a></li>\n </ul>\n </div>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3>Quick search</h3>\n <form class=\"search\" action=\"search.html\" method=\"get\">\n <div><input type=\"text\" name=\"q\" /></div>\n <div><input type=\"submit\" value=\"Go\" /></div>\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n </form>\n</div>\n<script type=\"text/javascript\">$('#searchbox').show(0);</script>\n </div>\n </div>\n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n <div class=\"body\" role=\"main\">\n \n <div class=\"section\" id=\"base-imageobject-classes\">\n<span id=\"baseimageobjects\"></span><h1>Base ImageObject Classes<a class=\"headerlink\" href=\"#base-imageobject-classes\" title=\"Permalink to this headline\">¶</a></h1>\n<p>A class which makes image objects for each input filename.</p>\n<span class=\"target\" id=\"module-drizzlepac.imageObject\"></span><p>A class which makes image objects for each input filename.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Authors:</th><td class=\"field-body\">Warren Hack</td>\n</tr>\n<tr class=\"field-even field\"><th class=\"field-name\">License:</th><td class=\"field-body\"><a class=\"reference external\" href=\"http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE\">http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE</a></td>\n</tr>\n</tbody>\n</table>\n<dl class=\"class\">\n<dt id=\"drizzlepac.imageObject.baseImageObject\">\n<em class=\"property\">class </em><code class=\"descclassname\">drizzlepac.imageObject.</code><code class=\"descname\">baseImageObject</code><span class=\"sig-paren\">(</span><em>filename</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Bases: <a class=\"reference external\" href=\"https://docs.python.org/2/library/functions.html#object\" title=\"(in Python v2.7)\"><code class=\"xref py py-class docutils literal\"><span class=\"pre\">object</span></code></a></p>\n<p>Base ImageObject which defines the primary set of methods.</p>\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.buildERRmask\">\n<code class=\"descname\">buildERRmask</code><span class=\"sig-paren\">(</span><em>chip</em>, <em>dqarr</em>, <em>scale</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.buildERRmask\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.buildERRmask\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Builds a weight mask from an input DQ array and an ERR array\nassociated with the input image.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.buildEXPmask\">\n<code class=\"descname\">buildEXPmask</code><span class=\"sig-paren\">(</span><em>chip</em>, <em>dqarr</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.buildEXPmask\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.buildEXPmask\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Builds a weight mask from an input DQ array and the exposure time\nper pixel for this chip.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.buildIVMmask\">\n<code class=\"descname\">buildIVMmask</code><span class=\"sig-paren\">(</span><em>chip</em>, <em>dqarr</em>, <em>scale</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.buildIVMmask\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.buildIVMmask\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Builds a weight mask from an input DQ array and either an IVM array\nprovided by the user or a self-generated IVM array derived from the\nflat-field reference file associated with the input image.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.buildMask\">\n<code class=\"descname\">buildMask</code><span class=\"sig-paren\">(</span><em>chip</em>, <em>bits=0</em>, <em>write=False</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.buildMask\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.buildMask\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Build masks as specified in the user parameters found in the\nconfigObj object.</p>\n<p>We should overload this function in the instrument specific\nimplementations so that we can add other stuff to the badpixel\nmask? Like vignetting areas and chip boundries in nicmos which\nare camera dependent? these are not defined in the DQ masks, but\nshould be masked out to get the best results in multidrizzle.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.clean\">\n<code class=\"descname\">clean</code><span class=\"sig-paren\">(</span><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.clean\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.clean\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Deletes intermediate products generated for this imageObject.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.close\">\n<code class=\"descname\">close</code><span class=\"sig-paren\">(</span><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.close\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.close\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Close the object nicely and release all the data\narrays from memory YOU CANT GET IT BACK, the pointers\nand data are gone so use the getData method to get\nthe data array returned for future use. You can use\nputData to reattach a new data array to the imageObject.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.findExtNum\">\n<code class=\"descname\">findExtNum</code><span class=\"sig-paren\">(</span><em>extname=None</em>, <em>extver=1</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.findExtNum\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.findExtNum\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Find the extension number of the give extname and extver.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.find_DQ_extension\">\n<code class=\"descname\">find_DQ_extension</code><span class=\"sig-paren\">(</span><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.find_DQ_extension\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.find_DQ_extension\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return the suffix for the data quality extension and the name of the\nfile which that DQ extension should be read from.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getAllData\">\n<code class=\"descname\">getAllData</code><span class=\"sig-paren\">(</span><em>extname=None</em>, <em>exclude=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getAllData\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getAllData\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>This function is meant to make it easier to attach ALL the data\nextensions of the image object so that we can write out copies of\nthe original image nicer.</p>\n<p>If no extname is given, the it retrieves all data from the original\nfile and attaches it. Otherwise, give the name of the extensions\nyou want and all of those will be restored.</p>\n<p>Ok, I added another option. If you want to get all the data\nextensions EXCEPT a particular one, leave extname=NONE and\nset exclude=EXTNAME. This is helpfull cause you might not know\nall the extnames the image has, this will find out and exclude\nthe one you do not want overwritten.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getData\">\n<code class=\"descname\">getData</code><span class=\"sig-paren\">(</span><em>exten=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getData\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getData\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return just the data array from the specified extension\nfileutil is used instead of fits to account for non-\nFITS input images. openImage returns a fits object.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getExtensions\">\n<code class=\"descname\">getExtensions</code><span class=\"sig-paren\">(</span><em>extname='SCI'</em>, <em>section=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getExtensions\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getExtensions\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return the list of EXTVER values for extensions with name specified in extname.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getGain\">\n<code class=\"descname\">getGain</code><span class=\"sig-paren\">(</span><em>exten</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getGain\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getGain\" title=\"Permalink to this definition\">¶</a></dt>\n<dd></dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getHeader\">\n<code class=\"descname\">getHeader</code><span class=\"sig-paren\">(</span><em>exten=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getHeader\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getHeader\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return just the specified header extension fileutil\nis used instead of fits to account for non-FITS\ninput images. openImage returns a fits object.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getInstrParameter\">\n<code class=\"descname\">getInstrParameter</code><span class=\"sig-paren\">(</span><em>value</em>, <em>header</em>, <em>keyword</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getInstrParameter\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getInstrParameter\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>This method gets a instrument parameter from a\npair of task parameters: a value, and a header keyword.</p>\n<dl class=\"docutils\">\n<dt>The default behavior is:</dt>\n<dd><ul class=\"first last simple\">\n<li>if the value and header keyword are given, raise an exception.</li>\n<li>if the value is given, use it.</li>\n<li>if the value is blank and the header keyword is given, use\nthe header keyword.</li>\n<li>if both are blank, or if the header keyword is not\nfound, return None.</li>\n</ul>\n</dd>\n</dl>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getKeywordList\">\n<code class=\"descname\">getKeywordList</code><span class=\"sig-paren\">(</span><em>kw</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getKeywordList\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getKeywordList\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return lists of all attribute values for all active chips in the\nimageObject.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getNumpyType\">\n<code class=\"descname\">getNumpyType</code><span class=\"sig-paren\">(</span><em>irafType</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getNumpyType\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getNumpyType\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return the corresponding numpy data type.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getOutputName\">\n<code class=\"descname\">getOutputName</code><span class=\"sig-paren\">(</span><em>name</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getOutputName\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getOutputName\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return the name of the file or PyFITS object associated with that\nname, depending on the setting of self.inmemory.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getReadNoiseImage\">\n<code class=\"descname\">getReadNoiseImage</code><span class=\"sig-paren\">(</span><em>chip</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getReadNoiseImage\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getReadNoiseImage\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p class=\"rubric\">Notes</p>\n<p>Method for returning the readnoise image of a detector\n(in electrons).</p>\n<p>The method will return an array of the same shape as the image.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Units:</th><td class=\"field-body\">electrons</td>\n</tr>\n</tbody>\n</table>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getdarkcurrent\">\n<code class=\"descname\">getdarkcurrent</code><span class=\"sig-paren\">(</span><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getdarkcurrent\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getdarkcurrent\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p class=\"rubric\">Notes</p>\n<p>Return the dark current for the detector. This value\nwill be contained within an instrument specific keyword.\nThe value in the image header will be converted to units\nof electrons.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Units:</th><td class=\"field-body\">electrons</td>\n</tr>\n</tbody>\n</table>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getdarkimg\">\n<code class=\"descname\">getdarkimg</code><span class=\"sig-paren\">(</span><em>chip</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getdarkimg\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getdarkimg\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p class=\"rubric\">Notes</p>\n<p>Return an array representing the dark image for the detector.</p>\n<p>The method will return an array of the same shape as the image.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Units:</th><td class=\"field-body\">electrons</td>\n</tr>\n</tbody>\n</table>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getexptimeimg\">\n<code class=\"descname\">getexptimeimg</code><span class=\"sig-paren\">(</span><em>chip</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getexptimeimg\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getexptimeimg\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Returns:</th><td class=\"field-body\"><p class=\"first\"><strong>exptimeimg</strong> : numpy array</p>\n<blockquote class=\"last\">\n<div><p>The method will return an array of the same shape as the image.</p>\n</div></blockquote>\n</td>\n</tr>\n</tbody>\n</table>\n<p class=\"rubric\">Notes</p>\n<p>Return an array representing the exposure time per pixel for the detector.\nThis method will be overloaded for IR detectors which have their own\nEXP arrays, namely, WFC3/IR and NICMOS images.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Units:</th><td class=\"field-body\">None</td>\n</tr>\n</tbody>\n</table>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getflat\">\n<code class=\"descname\">getflat</code><span class=\"sig-paren\">(</span><em>chip</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getflat\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getflat\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Method for retrieving a detector&#8217;s flat field.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Returns:</th><td class=\"field-body\"><p class=\"first\">flat: array</p>\n<blockquote class=\"last\">\n<div><p>This method will return an array the same shape as the image in\n<strong>units of electrons</strong>.</p>\n</div></blockquote>\n</td>\n</tr>\n</tbody>\n</table>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.getskyimg\">\n<code class=\"descname\">getskyimg</code><span class=\"sig-paren\">(</span><em>chip</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.getskyimg\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.getskyimg\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p class=\"rubric\">Notes</p>\n<p>Return an array representing the sky image for the detector. The value\nof the sky is what would actually be subtracted from the exposure by\nthe skysub step.</p>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Units:</th><td class=\"field-body\">electrons</td>\n</tr>\n</tbody>\n</table>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.info\">\n<code class=\"descname\">info</code><span class=\"sig-paren\">(</span><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.info\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.info\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Return fits information on the _image.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.putData\">\n<code class=\"descname\">putData</code><span class=\"sig-paren\">(</span><em>data=None</em>, <em>exten=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.putData\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.putData\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Now that we are removing the data from the object to save memory,\nwe need something that cleanly puts the data array back into\nthe object so that we can write out everything together using\nsomething like fits.writeto....this method is an attempt to\nmake sure that when you add an array back to the .data section\nof the hdu it still matches the header information for that\nsection ( ie. update the bitpix to reflect the datatype of the\narray you are adding). The other header stuff is up to you to verify.</p>\n<p>Data should be the data array exten is where you want to stick it,\neither extension number or a string like &#8216;sci,1&#8217;</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.returnAllChips\">\n<code class=\"descname\">returnAllChips</code><span class=\"sig-paren\">(</span><em>extname=None</em>, <em>exclude=None</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.returnAllChips\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.returnAllChips\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Returns a list containing all the chips which match the\nextname given minus those specified for exclusion (if any).</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.saveVirtualOutputs\">\n<code class=\"descname\">saveVirtualOutputs</code><span class=\"sig-paren\">(</span><em>outdict</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.saveVirtualOutputs\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.saveVirtualOutputs\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Assign in-memory versions of generated products for this imageObject\nbased on dictionary &#8216;outdict&#8217;.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.set_mt_wcs\">\n<code class=\"descname\">set_mt_wcs</code><span class=\"sig-paren\">(</span><em>image</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.set_mt_wcs\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.set_mt_wcs\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Reset the WCS for this image based on the WCS information from\nanother imageObject.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.set_units\">\n<code class=\"descname\">set_units</code><span class=\"sig-paren\">(</span><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.set_units\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.set_units\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Record the units for this image, both BUNITS from header and\nin_units as needed internally. This method will be defined\nspecifically for each instrument.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.set_wtscl\">\n<code class=\"descname\">set_wtscl</code><span class=\"sig-paren\">(</span><em>chip</em>, <em>wtscl_par</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.set_wtscl\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.set_wtscl\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Sets the value of the wt_scl parameter as needed for drizzling.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.updateContextImage\">\n<code class=\"descname\">updateContextImage</code><span class=\"sig-paren\">(</span><em>contextpar</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.updateContextImage\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.updateContextImage\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Reset the name of the context image to None if parameter <code class=\"xref py py-obj docutils literal\"><span class=\"pre\">context</span></code> == False.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.updateData\">\n<code class=\"descname\">updateData</code><span class=\"sig-paren\">(</span><em>exten</em>, <em>data</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.updateData\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.updateData\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Write out updated data and header to\nthe original input file for this object.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.updateIVMName\">\n<code class=\"descname\">updateIVMName</code><span class=\"sig-paren\">(</span><em>ivmname</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.updateIVMName\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.updateIVMName\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Update outputNames for image with user-supplied IVM filename.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.baseImageObject.updateOutputValues\">\n<code class=\"descname\">updateOutputValues</code><span class=\"sig-paren\">(</span><em>output_wcs</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#baseImageObject.updateOutputValues\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.baseImageObject.updateOutputValues\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Copy info from output WCSObject into outputnames for each chip\nfor use in creating outputimage object.</p>\n</dd></dl>\n\n</dd></dl>\n\n<dl class=\"class\">\n<dt id=\"drizzlepac.imageObject.imageObject\">\n<em class=\"property\">class </em><code class=\"descclassname\">drizzlepac.imageObject.</code><code class=\"descname\">imageObject</code><span class=\"sig-paren\">(</span><em>filename</em>, <em>group=None</em>, <em>inmemory=False</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#imageObject\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.imageObject\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Bases: <a class=\"reference internal\" href=\"#drizzlepac.imageObject.baseImageObject\" title=\"drizzlepac.imageObject.baseImageObject\"><code class=\"xref py py-class docutils literal\"><span class=\"pre\">drizzlepac.imageObject.baseImageObject</span></code></a></p>\n<p>This returns an imageObject that contains all the\nnecessary information to run the image file through\nany multidrizzle function. It is essentially a\nPyFits object with extra attributes.</p>\n<p>There will be generic keywords which are good for\nthe entire image file, and some that might pertain\nonly to the specific chip.</p>\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.imageObject.compute_wcslin\">\n<code class=\"descname\">compute_wcslin</code><span class=\"sig-paren\">(</span><em>undistort=True</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#imageObject.compute_wcslin\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.imageObject.compute_wcslin\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Compute the undistorted WCS based solely on the known distortion\nmodel information associated with the WCS.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.imageObject.setInstrumentParameters\">\n<code class=\"descname\">setInstrumentParameters</code><span class=\"sig-paren\">(</span><em>instrpars</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#imageObject.setInstrumentParameters\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.imageObject.setInstrumentParameters\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Define instrument-specific parameters for use in the code.\nBy definition, this definition will need to be overridden by\nmethods defined in each instrument&#8217;s sub-class.</p>\n</dd></dl>\n\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.imageObject.set_units\">\n<code class=\"descname\">set_units</code><span class=\"sig-paren\">(</span><em>chip</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#imageObject.set_units\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.imageObject.set_units\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Define units for this image.</p>\n</dd></dl>\n\n</dd></dl>\n\n<dl class=\"class\">\n<dt id=\"drizzlepac.imageObject.WCSObject\">\n<em class=\"property\">class </em><code class=\"descclassname\">drizzlepac.imageObject.</code><code class=\"descname\">WCSObject</code><span class=\"sig-paren\">(</span><em>filename</em>, <em>suffix='_drz'</em><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#WCSObject\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.WCSObject\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Bases: <a class=\"reference internal\" href=\"#drizzlepac.imageObject.baseImageObject\" title=\"drizzlepac.imageObject.baseImageObject\"><code class=\"xref py py-class docutils literal\"><span class=\"pre\">drizzlepac.imageObject.baseImageObject</span></code></a></p>\n<dl class=\"method\">\n<dt id=\"drizzlepac.imageObject.WCSObject.restore_wcs\">\n<code class=\"descname\">restore_wcs</code><span class=\"sig-paren\">(</span><span class=\"sig-paren\">)</span><a class=\"reference internal\" href=\"_modules/drizzlepac/imageObject.html#WCSObject.restore_wcs\"><span class=\"viewcode-link\">[source]</span></a><a class=\"headerlink\" href=\"#drizzlepac.imageObject.WCSObject.restore_wcs\" title=\"Permalink to this definition\">¶</a></dt>\n<dd></dd></dl>\n\n</dd></dl>\n\n</div>\n\n\n </div>\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"genindex.html\" title=\"General Index\"\n >index</a></li>\n <li class=\"right\" >\n <a href=\"py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"right\" >\n <a href=\"acsobjects.html\" title=\"ACS ImageObjects\"\n >next</a> |</li>\n <li class=\"right\" >\n <a href=\"imageobject.html\" title=\"imageObject Classes\"\n >previous</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"imageobject.html\" >imageObject Classes</a> &#187;</li> \n </ul>\n </div>\n <div class=\"footer\" role=\"contentinfo\">\n &#169; Copyright 2017, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom, Mihai Cara.\n Created using <a href=\"http://sphinx-doc.org/\">Sphinx</a> 1.5.1.\n </div>\n </body>\n</html>" }, { "alpha_fraction": 0.5516822338104248, "alphanum_fraction": 0.5569517612457275, "avg_line_length": 39.434425354003906, "blob_id": "1993acdc84107afb29b7a6375d2d26d68e45af02", "content_id": "4fc5d9218d02b8c80faf8f1cfb231df8e91b0bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4934, "license_type": "no_license", "max_line_length": 153, "num_lines": 122, "path": "/defsetup.py", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nfrom distutils.core import Extension\nimport sys, os.path, os\nfrom distutils import sysconfig\n\n# BUILD should be 'debug', 'profile' or 'release'\nBUILD = 'release'\n\ntry:\n import numpy\nexcept ImportError:\n print(\"Numpy was not found. It may not be installed or it may not be on your PYTHONPATH. Multidrizzle requires numpy v 1.0.2 or later.\\n\")\n raise\n\n# This is the case for building as part of stsci_python\nif os.path.exists('pywcs'):\n # use the not-installed-yet source tree for pywcs\n pywcsincludes = [os.path.join('pywcs', 'src')]\n candidates = []\n for path in os.listdir('pywcs'):\n if path.startswith('wcslib'):\n candidates.append(path)\n if len(candidates) == 1:\n pywcsincludes.append(os.path.join('pywcs', candidates[0], 'C'))\n else:\n raise SystemExit(\"No suitable version of wcslib found in the current distribution of pywcs\")\nelse:\n try:\n from astropy import wcs as pywcs\n pywcslib = pywcs.__path__[0]\n pywcsincludes = [os.path.join(pywcslib, 'include'),\n os.path.join(pywcslib, 'include', 'wcslib')]\n except ImportError:\n raise ImportError(\"PyWCS was not found. It may not be installed or it may not be on your PYTHONPATH. \\nPydrizzle requires pywcs 1.4 or later.\\n\")\n\nif numpy.__version__ < \"1.0.2\":\n raise SystemExit(\"Numpy 1.0.2 or later required to build Multidrizzle.\")\n\nprint(\"Building C extensions using NUMPY.\")\n\nnumpyinc = numpy.get_include()\n\npythonlib = sysconfig.get_python_lib(plat_specific=1)\npythoninc = sysconfig.get_python_inc()\nver = sysconfig.get_python_version()\npythonver = 'python' + ver\n\nif sys.platform != 'win32':\n EXTRA_LINK_ARGS = []\nelse:\n EXTRA_LINK_ARGS = ['/NODEFAULTLIB:MSVCRT' ] # , pywcslib+'/_pywcs.dll']\n EXTRA_LINK_ARGS = []\n\n\ndef getNumpyExtensions():\n define_macros = [('PYDRIZZLE', None)]\n undef_macros = []\n EXTRA_COMPILE_ARGS = []\n if BUILD.lower() == 'debug':\n define_macros.append(('DEBUG', None))\n undef_macros.append('NDEBUG')\n if not sys.platform.startswith('sun') and \\\n not sys.platform == 'win32':\n EXTRA_COMPILE_ARGS.extend([\"-fno-inline\", \"-O0\", \"-g\"])\n elif BUILD.lower() == 'profile':\n define_macros.append(('NDEBUG', None))\n undef_macros.append('DEBUG')\n if not sys.platform.startswith('sun') and \\\n not sys.platform == 'win32':\n EXTRA_COMPILE_ARGS.extend([\"-O3\", \"-g\"])\n elif BUILD.lower() == 'release':\n # Define ECHO as nothing to prevent spurious newlines from\n # printing within the libwcs parser\n define_macros.append(('NDEBUG', None))\n undef_macros.append('DEBUG')\n else:\n raise ValueError(\"BUILD should be one of 'debug', 'profile', or 'release'\")\n\n\n ext = [Extension(\"drizzlepac.cdriz\",['src/arrdrizmodule.c',\n 'src/cdrizzleblot.c',\n 'src/cdrizzlebox.c',\n 'src/cdrizzleio.c',\n 'src/cdrizzlemap.c',\n 'src/cdrizzleutil.c',\n 'src/cdrizzlewcs.c'],\n define_macros=define_macros,\n undef_macros=undef_macros,\n include_dirs=[pythoninc] + [numpyinc] + \\\n pywcsincludes,\n extra_link_args=EXTRA_LINK_ARGS,\n extra_compile_args=EXTRA_COMPILE_ARGS,\n # not needed on windows; not needed on mac\n # libraries=['m']\n )]\n\n return ext\n\n\npkg = \"drizzlepac\"\n\nsetupargs = {\n\n 'version' : '1.1.1dev',\n 'description' : \"C-based MultiDrizzle\",\n 'author' : \"Megan Sosey, Warren Hack, Christopher Hanley, Chris Sontag, Mihai Cara\",\n 'author_email' : \"[email protected]\",\n 'license' : \"http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE\",\n 'platforms' : [\"Linux\",\"Solaris\",\"Mac OS X\",\"Win\"],\n 'data_files' : [( pkg+\"/pars\", ['lib/drizzlepac/pars/*']),\n ( pkg+\"/htmlhelp/_images/math\", ['lib/drizzlepac/htmlhelp/_images/math/*']),\n ( pkg+\"/htmlhelp/_images\", ['lib/drizzlepac/htmlhelp/_images/*.*']),\n ( pkg+\"/htmlhelp/_sources\", ['lib/drizzlepac/htmlhelp/_sources/*']),\n ( pkg+\"/htmlhelp/_static\", ['lib/drizzlepac/htmlhelp/_static/*']),\n ( pkg+\"/htmlhelp\", ['lib/drizzlepac/htmlhelp/*.html']),\n ( pkg, ['lib/drizzlepac/*.help'])],\n 'scripts' : [\"scripts/mdriz\",\"scripts/resetbits\",\"scripts/updatenpol\",\"scripts/runastrodriz\"] ,\n 'ext_modules' : getNumpyExtensions(),\n 'package_dir' : { 'drizzlepac' : 'lib/drizzlepac', },\n\n }\n\n" }, { "alpha_fraction": 0.6002081632614136, "alphanum_fraction": 0.6087824702262878, "avg_line_length": 141.10736083984375, "blob_id": "f7e54e965009c02bc8bd71d91bbc2524b44f0ca3", "content_id": "1aec49450f9a9f1b52772b585f8ea4aa6ea26bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 189286, "license_type": "no_license", "max_line_length": 934, "num_lines": 1332, "path": "/lib/drizzlepac/htmlhelp/_modules/drizzlepac/imageObject.html", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n \n <title>drizzlepac.imageObject &#8212; DrizzlePac 2.1.16 (05-June-2017) documentation</title>\n \n <link rel=\"stylesheet\" href=\"../../_static/stsci_sphinx.css\" type=\"text/css\" />\n <link rel=\"stylesheet\" href=\"../../_static/pygments.css\" type=\"text/css\" />\n \n <script type=\"text/javascript\">\n var DOCUMENTATION_OPTIONS = {\n URL_ROOT: '../../',\n VERSION: '2.1.16 (05-June-2017)',\n COLLAPSE_INDEX: false,\n FILE_SUFFIX: '.html',\n HAS_SOURCE: true,\n SOURCELINK_SUFFIX: '.txt'\n };\n </script>\n <script type=\"text/javascript\" src=\"../../_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"../../_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"../../_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"../../genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"../../search.html\" /> \n </head>\n <body role=\"document\">\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"../../genindex.html\" title=\"General Index\"\n accesskey=\"I\">index</a></li>\n <li class=\"right\" >\n <a href=\"../../py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"../../index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"../index.html\" accesskey=\"U\">Module code</a> &#187;</li> \n </ul>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n <p class=\"logo\"><a href=\"../../index.html\">\n <img class=\"logo\" src=\"../../_static/stsci_logo.png\" alt=\"Logo\"/>\n </a></p>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3>Quick search</h3>\n <form class=\"search\" action=\"../../search.html\" method=\"get\">\n <div><input type=\"text\" name=\"q\" /></div>\n <div><input type=\"submit\" value=\"Go\" /></div>\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n </form>\n</div>\n<script type=\"text/javascript\">$('#searchbox').show(0);</script>\n </div>\n </div>\n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n <div class=\"body\" role=\"main\">\n \n <h1>Source code for drizzlepac.imageObject</h1><div class=\"highlight\"><pre>\n<span></span><span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\">A class which makes image objects for each input filename.</span>\n\n<span class=\"sd\">:Authors: Warren Hack</span>\n\n<span class=\"sd\">:License: `&lt;http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE&gt;`_</span>\n\n<span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"kn\">from</span> <span class=\"nn\">__future__</span> <span class=\"k\">import</span> <span class=\"n\">absolute_import</span><span class=\"p\">,</span> <span class=\"n\">division</span><span class=\"p\">,</span> <span class=\"n\">print_function</span> <span class=\"c1\"># confidence medium</span>\n\n<span class=\"kn\">import</span> <span class=\"nn\">copy</span><span class=\"o\">,</span> <span class=\"nn\">os</span><span class=\"o\">,</span> <span class=\"nn\">re</span><span class=\"o\">,</span> <span class=\"nn\">sys</span>\n\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"kn\">from</span> <span class=\"nn\">stwcs</span> <span class=\"k\">import</span> <span class=\"n\">distortion</span>\n\n<span class=\"kn\">from</span> <span class=\"nn\">stsci.tools</span> <span class=\"k\">import</span> <span class=\"n\">fileutil</span><span class=\"p\">,</span> <span class=\"n\">logutil</span><span class=\"p\">,</span> <span class=\"n\">textutil</span>\n<span class=\"kn\">from</span> <span class=\"nn\">astropy.io</span> <span class=\"k\">import</span> <span class=\"n\">fits</span>\n<span class=\"kn\">from</span> <span class=\"nn\">.</span> <span class=\"k\">import</span> <span class=\"n\">util</span>\n<span class=\"kn\">from</span> <span class=\"nn\">.</span> <span class=\"k\">import</span> <span class=\"n\">wcs_functions</span>\n<span class=\"kn\">from</span> <span class=\"nn\">.</span> <span class=\"k\">import</span> <span class=\"n\">buildmask</span>\n\n\n<span class=\"n\">IRAF_DTYPES</span><span class=\"o\">=</span><span class=\"p\">{</span><span class=\"s1\">&#39;float64&#39;</span><span class=\"p\">:</span><span class=\"o\">-</span><span class=\"mi\">64</span><span class=\"p\">,</span><span class=\"s1\">&#39;float32&#39;</span><span class=\"p\">:</span><span class=\"o\">-</span><span class=\"mi\">32</span><span class=\"p\">,</span><span class=\"s1\">&#39;uint8&#39;</span><span class=\"p\">:</span><span class=\"mi\">8</span><span class=\"p\">,</span><span class=\"s1\">&#39;int16&#39;</span><span class=\"p\">:</span><span class=\"mi\">16</span><span class=\"p\">,</span><span class=\"s1\">&#39;int32&#39;</span><span class=\"p\">:</span><span class=\"mi\">32</span><span class=\"p\">}</span>\n\n\n<span class=\"kn\">from</span> <span class=\"nn\">.version</span> <span class=\"k\">import</span> <span class=\"o\">*</span>\n\n<span class=\"n\">log</span> <span class=\"o\">=</span> <span class=\"n\">logutil</span><span class=\"o\">.</span><span class=\"n\">create_logger</span><span class=\"p\">(</span><span class=\"n\">__name__</span><span class=\"p\">)</span>\n\n\n<div class=\"viewcode-block\" id=\"baseImageObject\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">baseImageObject</span><span class=\"p\">(</span><span class=\"nb\">object</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Base ImageObject which defines the primary set of methods.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">):</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"o\">=</span> <span class=\"s2\">&quot;SCI&quot;</span> <span class=\"c1\"># the extension the science image is stored in</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">maskExt</span><span class=\"o\">=</span><span class=\"s2\">&quot;DQ&quot;</span> <span class=\"c1\">#the extension with the mask image in it</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">errExt</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;ERR&quot;</span> <span class=\"c1\"># the extension the ERR array can be found in</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span> <span class=\"o\">=</span> <span class=\"n\">filename</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_original_file_name</span> <span class=\"o\">=</span> <span class=\"n\">filename</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">native_units</span><span class=\"o\">=</span><span class=\"s1\">&#39;ELECTRONS&#39;</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">flatkey</span> <span class=\"o\">=</span> <span class=\"kc\">None</span> <span class=\"c1\"># keyword which points to flat-field reference file</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_instrument</span><span class=\"o\">=</span><span class=\"kc\">None</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span><span class=\"o\">=</span><span class=\"kc\">None</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"o\">=</span><span class=\"p\">{}</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputValues</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">createContext</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">inmemory</span> <span class=\"o\">=</span> <span class=\"kc\">False</span> <span class=\"c1\"># flag for all in-memory operations</span>\n <span class=\"c1\">#this is the number of science chips to be processed in the file</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">=</span><span class=\"mi\">1</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_nextend</span><span class=\"o\">=</span><span class=\"mi\">0</span>\n <span class=\"c1\"># this is the number of chip which will be combined based on &#39;group&#39; parameter</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_nmembers</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">__getitem__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">exten</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Overload getitem to return the data and header</span>\n<span class=\"sd\"> these only work on the HDU list already in memory</span>\n<span class=\"sd\"> once the data has been zero&#39;s in self._image you should</span>\n<span class=\"sd\"> use getData or getHeader to re-read the file.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">return</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">getExtn</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">,</span><span class=\"n\">extn</span><span class=\"o\">=</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n\n\n <span class=\"k\">def</span> <span class=\"nf\">__cmp__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">other</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Overload the comparison operator</span>\n<span class=\"sd\"> just to check the filename of the object?</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">return</span> <span class=\"p\">(</span><span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">other</span><span class=\"p\">,</span> <span class=\"n\">imageObject</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span> <span class=\"o\">==</span> <span class=\"n\">other</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">)</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">_isNotValid</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">par1</span><span class=\"p\">,</span> <span class=\"n\">par2</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Method used to determine if a value or keyword is</span>\n<span class=\"sd\"> supplied as input for instrument specific parameters.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">invalidValues</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"s1\">&#39;None&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;INDEF&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"p\">(</span><span class=\"n\">par1</span> <span class=\"ow\">in</span> <span class=\"n\">invalidValues</span> <span class=\"ow\">and</span> <span class=\"n\">par2</span> <span class=\"ow\">in</span> <span class=\"n\">invalidValues</span><span class=\"p\">)</span>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.info\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.info\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">info</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Return fits information on the _image.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\">#if the file hasn&#39;t been closed yet then we can</span>\n <span class=\"c1\">#use the fits info which looks at the extensions</span>\n <span class=\"c1\">#if(self._isSimpleFits):</span>\n <span class=\"c1\"># print self._filename,&quot; is a simple fits image&quot;</span>\n <span class=\"c1\">#else:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">()</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.close\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.close\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">close</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Close the object nicely and release all the data</span>\n<span class=\"sd\"> arrays from memory YOU CANT GET IT BACK, the pointers</span>\n<span class=\"sd\"> and data are gone so use the getData method to get</span>\n<span class=\"sd\"> the data array returned for future use. You can use</span>\n<span class=\"sd\"> putData to reattach a new data array to the imageObject.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">return</span>\n\n <span class=\"c1\"># mcara: I think the code below is not necessary but in order to</span>\n <span class=\"c1\"># preserve the same functionality as the code removed below,</span>\n <span class=\"c1\"># I make an empty copy of the image object:</span>\n <span class=\"n\">empty_image</span> <span class=\"o\">=</span> <span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">HDUList</span><span class=\"p\">()</span>\n <span class=\"k\">for</span> <span class=\"n\">u</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">:</span>\n <span class=\"n\">empty_image</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">u</span><span class=\"o\">.</span><span class=\"n\">__class__</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">header</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">))</span>\n <span class=\"c1\"># mcara: END unnecessary code</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span> <span class=\"c1\">#calls fits.close()</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span> <span class=\"o\">=</span> <span class=\"n\">empty_image</span></div>\n\n <span class=\"c1\">#we actuallly want to make sure that all the</span>\n <span class=\"c1\">#data extensions have been closed and deleted</span>\n <span class=\"c1\">#since we could have the DQ,ERR and others read in</span>\n <span class=\"c1\">#at this point, but I&#39;d like there to be something</span>\n <span class=\"c1\">#valid there afterwards that I can play with</span>\n\n <span class=\"c1\"># mcara: REMOVED unnecessary code:</span>\n <span class=\"c1\">#</span>\n <span class=\"c1\"># if not self._isSimpleFits:</span>\n <span class=\"c1\"># for ext,hdu in enumerate(self._image):</span>\n <span class=\"c1\"># #use the datatype for the extension</span>\n <span class=\"c1\"># #dtype=self.getNumpyType(hdu.header[&quot;BITPIX&quot;])</span>\n <span class=\"c1\"># hdu.data = None #np.array(0,dtype=dtype) #so we dont get io errors on stuff that wasn&#39;t read in yet</span>\n <span class=\"c1\"># else:</span>\n <span class=\"c1\"># self._image.data= None # np.array(0,dtype=self.getNumpyType(self._image.header[&quot;BITPIX&quot;]))</span>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.clean\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.clean\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">clean</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Deletes intermediate products generated for this imageObject.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">clean_files</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"s1\">&#39;blotImage&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;crmaskImage&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;finalMask&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;staticMask&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;singleDrizMask&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;outSky&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outSContext&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;outSWeight&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;outSingle&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outMedian&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;dqmask&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;tmpmask&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;skyMatchMask&#39;</span><span class=\"p\">]</span>\n\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s1\">&#39;Removing intermediate files for </span><span class=\"si\">%s</span><span class=\"s1\">&#39;</span> <span class=\"o\">%</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">)</span>\n <span class=\"c1\"># We need to remove the combined products first; namely, median image</span>\n <span class=\"n\">util</span><span class=\"o\">.</span><span class=\"n\">removeFileSafely</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outMedian&#39;</span><span class=\"p\">])</span>\n <span class=\"c1\"># Now remove chip-specific intermediate files, if any were created.</span>\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">returnAllChips</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"s1\">&#39;SCI&#39;</span><span class=\"p\">):</span>\n <span class=\"k\">for</span> <span class=\"n\">fname</span> <span class=\"ow\">in</span> <span class=\"n\">clean_files</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">fname</span> <span class=\"ow\">in</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">:</span>\n <span class=\"n\">util</span><span class=\"o\">.</span><span class=\"n\">removeFileSafely</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"n\">fname</span><span class=\"p\">])</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getData\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getData\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getData</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">exten</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Return just the data array from the specified extension</span>\n<span class=\"sd\"> fileutil is used instead of fits to account for non-</span>\n<span class=\"sd\"> FITS input images. openImage returns a fits object.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">if</span> <span class=\"n\">exten</span><span class=\"o\">.</span><span class=\"n\">lower</span><span class=\"p\">()</span><span class=\"o\">.</span><span class=\"n\">find</span><span class=\"p\">(</span><span class=\"s1\">&#39;sci&#39;</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"c1\"># For SCI extensions, the current file will have the data</span>\n <span class=\"n\">fname</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># otherwise, the data being requested may need to come from a</span>\n <span class=\"c1\"># separate file, as is the case with WFPC2 DQ data.</span>\n <span class=\"c1\">#</span>\n <span class=\"c1\"># convert exten to &#39;sci&#39;,extver to get the DQ info for that chip</span>\n <span class=\"n\">extn</span> <span class=\"o\">=</span> <span class=\"n\">exten</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">extn</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">])]</span>\n <span class=\"n\">fname</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqfile</span>\n\n <span class=\"n\">extnum</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_interpretExten</span><span class=\"p\">(</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">data</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">exists</span><span class=\"p\">(</span><span class=\"n\">fname</span><span class=\"p\">):</span>\n <span class=\"n\">_image</span><span class=\"o\">=</span><span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"n\">fname</span><span class=\"p\">,</span> <span class=\"n\">clobber</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">_data</span><span class=\"o\">=</span><span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">getExtn</span><span class=\"p\">(</span><span class=\"n\">_image</span><span class=\"p\">,</span> <span class=\"n\">extn</span><span class=\"o\">=</span><span class=\"n\">exten</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">data</span>\n <span class=\"n\">_image</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">del</span> <span class=\"n\">_image</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">_data</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_data</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_data</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">data</span>\n\n <span class=\"k\">return</span> <span class=\"n\">_data</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getHeader\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getHeader\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getHeader</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">exten</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Return just the specified header extension fileutil</span>\n<span class=\"sd\"> is used instead of fits to account for non-FITS</span>\n<span class=\"sd\"> input images. openImage returns a fits object.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">_image</span><span class=\"o\">=</span><span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">,</span> <span class=\"n\">clobber</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">_header</span><span class=\"o\">=</span><span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">getExtn</span><span class=\"p\">(</span><span class=\"n\">_image</span><span class=\"p\">,</span><span class=\"n\">extn</span><span class=\"o\">=</span><span class=\"n\">exten</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">header</span>\n <span class=\"n\">_image</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">del</span> <span class=\"n\">_image</span>\n <span class=\"k\">return</span> <span class=\"n\">_header</span></div>\n\n <span class=\"k\">def</span> <span class=\"nf\">_interpretExten</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">exten</span><span class=\"p\">):</span>\n <span class=\"c1\">#check if the exten is a string or number and translate to the correct chip</span>\n <span class=\"n\">_extnum</span><span class=\"o\">=</span><span class=\"mi\">0</span>\n\n <span class=\"k\">if</span> <span class=\"s1\">&#39;,&#39;</span> <span class=\"ow\">in</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">exten</span><span class=\"p\">):</span> <span class=\"c1\">#assume a string like &quot;sci,1&quot; has been given</span>\n <span class=\"n\">_extensplit</span><span class=\"o\">=</span><span class=\"n\">exten</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">_extname</span><span class=\"o\">=</span><span class=\"n\">_extensplit</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">_extver</span><span class=\"o\">=</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">_extensplit</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">])</span>\n <span class=\"n\">_extnum</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">findExtNum</span><span class=\"p\">(</span><span class=\"n\">_extname</span><span class=\"p\">,</span><span class=\"n\">_extver</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\">#assume that a direct extnum has been given</span>\n <span class=\"n\">_extnum</span><span class=\"o\">=</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"n\">_extnum</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">msg</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;no extension number found&quot;</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">error</span><span class=\"p\">(</span><span class=\"n\">msg</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span><span class=\"p\">(</span><span class=\"n\">msg</span><span class=\"p\">)</span>\n\n <span class=\"k\">return</span> <span class=\"n\">_extnum</span>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.updateData\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.updateData\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">updateData</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">exten</span><span class=\"p\">,</span><span class=\"n\">data</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Write out updated data and header to</span>\n<span class=\"sd\"> the original input file for this object.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">_extnum</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_interpretExten</span><span class=\"p\">(</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n <span class=\"n\">fimg</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">,</span> <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;update&#39;</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">fimg</span><span class=\"p\">[</span><span class=\"n\">_extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">data</span>\n <span class=\"n\">fimg</span><span class=\"p\">[</span><span class=\"n\">_extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">_extnum</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span>\n <span class=\"n\">fimg</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.putData\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.putData\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">putData</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">data</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">exten</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Now that we are removing the data from the object to save memory,</span>\n<span class=\"sd\"> we need something that cleanly puts the data array back into</span>\n<span class=\"sd\"> the object so that we can write out everything together using</span>\n<span class=\"sd\"> something like fits.writeto....this method is an attempt to</span>\n<span class=\"sd\"> make sure that when you add an array back to the .data section</span>\n<span class=\"sd\"> of the hdu it still matches the header information for that</span>\n<span class=\"sd\"> section ( ie. update the bitpix to reflect the datatype of the</span>\n<span class=\"sd\"> array you are adding). The other header stuff is up to you to verify.</span>\n\n<span class=\"sd\"> Data should be the data array exten is where you want to stick it,</span>\n<span class=\"sd\"> either extension number or a string like &#39;sci,1&#39;</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"k\">if</span> <span class=\"n\">data</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">warning</span><span class=\"p\">(</span><span class=\"s2\">&quot;No data supplied&quot;</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">extnum</span> <span class=\"o\">=</span> <span class=\"n\">_interpretExten</span><span class=\"p\">(</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n <span class=\"n\">ext</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">extnum</span><span class=\"p\">]</span>\n <span class=\"c1\"># update the bitpix to the current datatype, this aint fancy and</span>\n <span class=\"c1\"># ignores bscale</span>\n <span class=\"n\">ext</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;BITPIX&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">IRAF_DTYPES</span><span class=\"p\">[</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">dtype</span><span class=\"o\">.</span><span class=\"n\">name</span><span class=\"p\">]</span>\n <span class=\"n\">ext</span><span class=\"o\">.</span><span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">data</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getAllData\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getAllData\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getAllData</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">exclude</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; This function is meant to make it easier to attach ALL the data</span>\n<span class=\"sd\"> extensions of the image object so that we can write out copies of</span>\n<span class=\"sd\"> the original image nicer.</span>\n\n<span class=\"sd\"> If no extname is given, the it retrieves all data from the original</span>\n<span class=\"sd\"> file and attaches it. Otherwise, give the name of the extensions</span>\n<span class=\"sd\"> you want and all of those will be restored.</span>\n\n<span class=\"sd\"> Ok, I added another option. If you want to get all the data</span>\n<span class=\"sd\"> extensions EXCEPT a particular one, leave extname=NONE and</span>\n<span class=\"sd\"> set exclude=EXTNAME. This is helpfull cause you might not know</span>\n<span class=\"sd\"> all the extnames the image has, this will find out and exclude</span>\n<span class=\"sd\"> the one you do not want overwritten.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">extensions</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_findExtnames</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"n\">extname</span><span class=\"p\">,</span><span class=\"n\">exclude</span><span class=\"o\">=</span><span class=\"n\">exclude</span><span class=\"p\">)</span>\n\n <span class=\"k\">for</span> <span class=\"n\">i</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_nextend</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"nb\">hasattr</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">],</span><span class=\"s1\">&#39;_extension&#39;</span><span class=\"p\">)</span> <span class=\"ow\">and</span> \\\n <span class=\"s2\">&quot;IMAGE&quot;</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_extension</span><span class=\"p\">:</span>\n <span class=\"n\">extver</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;extver&#39;</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extname</span> <span class=\"ow\">in</span> <span class=\"n\">extensions</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">extver</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">group_member</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getData</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extname</span> <span class=\"o\">+</span> <span class=\"s1\">&#39;,&#39;</span><span class=\"o\">+</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extver</span><span class=\"p\">))</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.returnAllChips\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.returnAllChips\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">returnAllChips</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">exclude</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Returns a list containing all the chips which match the</span>\n<span class=\"sd\"> extname given minus those specified for exclusion (if any).</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">extensions</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_findExtnames</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"n\">extname</span><span class=\"p\">,</span><span class=\"n\">exclude</span><span class=\"o\">=</span><span class=\"n\">exclude</span><span class=\"p\">)</span>\n <span class=\"n\">chiplist</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">i</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_nextend</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;extver&#39;</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">:</span>\n <span class=\"n\">extver</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;extver&#39;</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">extver</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n <span class=\"k\">if</span> <span class=\"nb\">hasattr</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">],</span><span class=\"s1\">&#39;_extension&#39;</span><span class=\"p\">)</span> <span class=\"ow\">and</span> \\\n <span class=\"s2\">&quot;IMAGE&quot;</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_extension</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extname</span> <span class=\"ow\">in</span> <span class=\"n\">extensions</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">extver</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">group_member</span><span class=\"p\">:</span>\n <span class=\"n\">chiplist</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">])</span>\n <span class=\"k\">return</span> <span class=\"n\">chiplist</span></div>\n\n <span class=\"k\">def</span> <span class=\"nf\">_findExtnames</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">exclude</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; This method builds a list of all extensions which have &#39;EXTNAME&#39;==extname</span>\n<span class=\"sd\"> and do not include any extensions with &#39;EXTNAME&#39;==exclude, if any are</span>\n<span class=\"sd\"> specified for exclusion at all.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\">#make a list of the available extension names for the object</span>\n <span class=\"n\">extensions</span><span class=\"o\">=</span><span class=\"p\">[]</span>\n <span class=\"k\">if</span> <span class=\"n\">extname</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"p\">,</span><span class=\"nb\">list</span><span class=\"p\">):</span> <span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"n\">extname</span><span class=\"p\">]</span>\n <span class=\"k\">for</span> <span class=\"n\">extn</span> <span class=\"ow\">in</span> <span class=\"n\">extname</span><span class=\"p\">:</span>\n <span class=\"n\">extensions</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">extn</span><span class=\"o\">.</span><span class=\"n\">upper</span><span class=\"p\">())</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\">#restore all the extensions data from the original file, be careful here</span>\n <span class=\"c1\">#if you&#39;ve altered data in memory you want to keep!</span>\n <span class=\"k\">for</span> <span class=\"n\">i</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_nextend</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"nb\">hasattr</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">],</span><span class=\"s1\">&#39;_extension&#39;</span><span class=\"p\">)</span> <span class=\"ow\">and</span> \\\n <span class=\"s2\">&quot;IMAGE&quot;</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_extension</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extname</span><span class=\"o\">.</span><span class=\"n\">upper</span><span class=\"p\">()</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"n\">extensions</span><span class=\"p\">:</span>\n <span class=\"n\">extensions</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extname</span><span class=\"p\">)</span>\n <span class=\"c1\">#remove this extension from the list</span>\n <span class=\"k\">if</span> <span class=\"n\">exclude</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">exclude</span><span class=\"o\">.</span><span class=\"n\">upper</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"n\">exclude</span> <span class=\"ow\">in</span> <span class=\"n\">extensions</span><span class=\"p\">:</span>\n <span class=\"n\">newExt</span><span class=\"o\">=</span><span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">item</span> <span class=\"ow\">in</span> <span class=\"n\">extensions</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">item</span> <span class=\"o\">!=</span> <span class=\"n\">exclude</span><span class=\"p\">:</span>\n <span class=\"n\">newExt</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">item</span><span class=\"p\">)</span>\n <span class=\"n\">extensions</span><span class=\"o\">=</span><span class=\"n\">newExt</span>\n <span class=\"k\">del</span> <span class=\"n\">newExt</span>\n <span class=\"k\">return</span> <span class=\"n\">extensions</span>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.findExtNum\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.findExtNum\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">findExtNum</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span> <span class=\"n\">extver</span><span class=\"o\">=</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;Find the extension number of the give extname and extver.&quot;&quot;&quot;</span>\n\n <span class=\"n\">extnum</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">extname</span> <span class=\"o\">=</span> <span class=\"n\">extname</span><span class=\"o\">.</span><span class=\"n\">upper</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isSimpleFits</span><span class=\"p\">:</span>\n <span class=\"k\">for</span> <span class=\"n\">ext</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"p\">(</span><span class=\"nb\">hasattr</span><span class=\"p\">(</span><span class=\"n\">ext</span><span class=\"p\">,</span><span class=\"s1\">&#39;_extension&#39;</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"s1\">&#39;IMAGE&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">ext</span><span class=\"o\">.</span><span class=\"n\">_extension</span> <span class=\"ow\">and</span>\n <span class=\"p\">(</span><span class=\"n\">ext</span><span class=\"o\">.</span><span class=\"n\">extname</span> <span class=\"o\">==</span> <span class=\"n\">extname</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"p\">(</span><span class=\"n\">ext</span><span class=\"o\">.</span><span class=\"n\">extver</span> <span class=\"o\">==</span> <span class=\"n\">extver</span><span class=\"p\">)):</span>\n <span class=\"n\">extnum</span> <span class=\"o\">=</span> <span class=\"n\">ext</span><span class=\"o\">.</span><span class=\"n\">extnum</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s2\">&quot;Image is simple fits&quot;</span><span class=\"p\">)</span>\n\n <span class=\"k\">return</span> <span class=\"n\">extnum</span></div>\n\n <span class=\"k\">def</span> <span class=\"nf\">_assignRootname</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Assign a unique rootname for the image based in the expname.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTNAME&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">lower</span><span class=\"p\">()</span>\n <span class=\"n\">extver</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTVER&quot;</span><span class=\"p\">]</span>\n <span class=\"n\">expname</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span><span class=\"o\">.</span><span class=\"n\">lower</span><span class=\"p\">()</span>\n\n <span class=\"c1\"># record extension-based name to reflect what extension a mask file corresponds to</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">rootname</span><span class=\"o\">=</span><span class=\"n\">expname</span> <span class=\"o\">+</span> <span class=\"s2\">&quot;_&quot;</span> <span class=\"o\">+</span> <span class=\"n\">extname</span> <span class=\"o\">+</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">extver</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">sciname</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span> <span class=\"o\">+</span> <span class=\"s2\">&quot;[&quot;</span> <span class=\"o\">+</span> <span class=\"n\">extname</span> <span class=\"o\">+</span><span class=\"s2\">&quot;,&quot;</span><span class=\"o\">+</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">extver</span><span class=\"p\">)</span><span class=\"o\">+</span><span class=\"s2\">&quot;]&quot;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">dqrootname</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span> <span class=\"o\">+</span> <span class=\"s2\">&quot;_&quot;</span> <span class=\"o\">+</span> <span class=\"n\">extname</span> <span class=\"o\">+</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">extver</span><span class=\"p\">)</span>\n <span class=\"c1\"># Needed to keep EXPNAMEs associated properly (1 EXPNAME for all chips)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_expname</span><span class=\"o\">=</span><span class=\"n\">expname</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_chip</span> <span class=\"o\">=</span><span class=\"n\">chip</span>\n\n\n <span class=\"k\">def</span> <span class=\"nf\">_setOutputNames</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">rootname</span><span class=\"p\">,</span><span class=\"n\">suffix</span><span class=\"o\">=</span><span class=\"s1\">&#39;_drz&#39;</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Define the default output filenames for drizzle products,</span>\n<span class=\"sd\"> these are based on the original rootname of the image</span>\n<span class=\"sd\"> filename should be just 1 filename, so call this in a loop</span>\n<span class=\"sd\"> for chip names contained inside a file.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\"># Define FITS output filenames for intermediate products</span>\n\n <span class=\"c1\"># Build names based on final DRIZZLE output name</span>\n <span class=\"c1\"># where &#39;output&#39; normally would have been created</span>\n <span class=\"c1\"># by &#39;process_input()&#39;</span>\n <span class=\"c1\">#</span>\n <span class=\"n\">outFinal</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"n\">suffix</span><span class=\"o\">+</span><span class=\"s1\">&#39;.fits&#39;</span>\n <span class=\"n\">outSci</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"n\">suffix</span><span class=\"o\">+</span><span class=\"s1\">&#39;_sci.fits&#39;</span>\n <span class=\"n\">outWeight</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"n\">suffix</span><span class=\"o\">+</span><span class=\"s1\">&#39;_wht.fits&#39;</span>\n <span class=\"n\">outContext</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"n\">suffix</span><span class=\"o\">+</span><span class=\"s1\">&#39;_ctx.fits&#39;</span>\n <span class=\"n\">outMedian</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"s1\">&#39;_med.fits&#39;</span>\n\n <span class=\"c1\"># Build names based on input name</span>\n <span class=\"n\">origFilename</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;.fits&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;_OrIg.fits&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">outSky</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span> <span class=\"o\">+</span> <span class=\"s1\">&#39;_sky.fits&#39;</span>\n <span class=\"n\">outSingle</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"s1\">&#39;_single_sci.fits&#39;</span>\n <span class=\"n\">outSWeight</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"s1\">&#39;_single_wht.fits&#39;</span>\n <span class=\"n\">crCorImage</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span><span class=\"o\">+</span><span class=\"s1\">&#39;_crclean.fits&#39;</span>\n\n <span class=\"c1\"># Build outputNames dictionary</span>\n <span class=\"n\">fnames</span><span class=\"o\">=</span><span class=\"p\">{</span>\n <span class=\"s1\">&#39;origFilename&#39;</span><span class=\"p\">:</span><span class=\"n\">origFilename</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outFinal&#39;</span><span class=\"p\">:</span><span class=\"n\">outFinal</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outMedian&#39;</span><span class=\"p\">:</span><span class=\"n\">outMedian</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outSci&#39;</span><span class=\"p\">:</span><span class=\"n\">outSci</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outWeight&#39;</span><span class=\"p\">:</span><span class=\"n\">outWeight</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outContext&#39;</span><span class=\"p\">:</span><span class=\"n\">outContext</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outSingle&#39;</span><span class=\"p\">:</span><span class=\"n\">outSingle</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outSWeight&#39;</span><span class=\"p\">:</span><span class=\"n\">outSWeight</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outSContext&#39;</span><span class=\"p\">:</span><span class=\"kc\">None</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;outSky&#39;</span><span class=\"p\">:</span><span class=\"n\">outSky</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;crcorImage&#39;</span><span class=\"p\">:</span><span class=\"n\">crCorImage</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;ivmFile&#39;</span><span class=\"p\">:</span><span class=\"kc\">None</span><span class=\"p\">}</span>\n\n\n <span class=\"k\">return</span> <span class=\"n\">fnames</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">_setChipOutputNames</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">rootname</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"n\">blotImage</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span> <span class=\"o\">+</span> <span class=\"s1\">&#39;_blt.fits&#39;</span>\n <span class=\"n\">crmaskImage</span> <span class=\"o\">=</span> <span class=\"n\">rootname</span> <span class=\"o\">+</span> <span class=\"s1\">&#39;_crmask.fits&#39;</span>\n\n <span class=\"c1\"># Start with global names</span>\n <span class=\"n\">fnames</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span>\n\n <span class=\"c1\"># Now add chip-specific entries</span>\n <span class=\"n\">fnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;blotImage&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">blotImage</span>\n <span class=\"n\">fnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;crmaskImage&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">crmaskImage</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"c1\"># Define mask names as additional entries into outputNames dictionary</span>\n <span class=\"n\">fnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;finalMask&#39;</span><span class=\"p\">]</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqrootname</span><span class=\"o\">+</span><span class=\"s1\">&#39;_final_mask.fits&#39;</span> <span class=\"c1\"># used by final_drizzle</span>\n <span class=\"n\">fnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;singleDrizMask&#39;</span><span class=\"p\">]</span><span class=\"o\">=</span><span class=\"n\">fnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;finalMask&#39;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;final&#39;</span><span class=\"p\">,</span><span class=\"s1\">&#39;single&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">fnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;staticMask&#39;</span><span class=\"p\">]</span><span class=\"o\">=</span><span class=\"kc\">None</span>\n\n <span class=\"c1\"># Add the following entries for use in creating outputImage object</span>\n <span class=\"n\">fnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;data&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">sciname</span>\n <span class=\"k\">return</span> <span class=\"n\">fnames</span>\n\n<span class=\"c1\">##############################################################</span>\n<span class=\"c1\">#</span>\n<span class=\"c1\"># Methods related to managing virtual intermediate output products</span>\n<span class=\"c1\"># as opposed to writing them out as files on disk</span>\n<span class=\"c1\">#</span>\n<span class=\"c1\">##############################################################</span>\n <span class=\"k\">def</span> <span class=\"nf\">_initVirtualOutputs</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Sets up the structure to hold all the output data arrays for</span>\n<span class=\"sd\"> this image in memory.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">virtualOutputs</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"k\">for</span> <span class=\"n\">product</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">virtualOutputs</span><span class=\"p\">[</span><span class=\"n\">product</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.saveVirtualOutputs\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.saveVirtualOutputs\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">saveVirtualOutputs</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">outdict</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Assign in-memory versions of generated products for this imageObject</span>\n<span class=\"sd\"> based on dictionary &#39;outdict&#39;.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">inmemory</span><span class=\"p\">:</span>\n <span class=\"k\">return</span>\n <span class=\"k\">for</span> <span class=\"n\">outname</span> <span class=\"ow\">in</span> <span class=\"n\">outdict</span><span class=\"p\">:</span>\n<span class=\"c1\"># log.info(&#39;saveVirtualOutputs: writing key &#39;+outname+&#39; for id: &#39;+str(id(self)))</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">virtualOutputs</span><span class=\"p\">[</span><span class=\"n\">outname</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">outdict</span><span class=\"p\">[</span><span class=\"n\">outname</span><span class=\"p\">]</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getOutputName\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getOutputName\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getOutputName</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">name</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Return the name of the file or PyFITS object associated with that</span>\n<span class=\"sd\"> name, depending on the setting of self.inmemory.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">val</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">inmemory</span><span class=\"p\">:</span> <span class=\"c1\"># if inmemory was turned on...</span>\n <span class=\"c1\"># return virtualOutput object saved with that name</span>\n <span class=\"n\">val</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">virtualOutputs</span><span class=\"p\">[</span><span class=\"n\">val</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"n\">val</span></div>\n\n<span class=\"c1\">##############################################################</span>\n<span class=\"c1\"># Methods for managing output values associated with this input</span>\n<span class=\"c1\">##############################################################</span>\n<div class=\"viewcode-block\" id=\"baseImageObject.updateOutputValues\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.updateOutputValues\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">updateOutputValues</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">output_wcs</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Copy info from output WCSObject into outputnames for each chip</span>\n<span class=\"sd\"> for use in creating outputimage object.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">outputvals</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputValues</span>\n\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;output&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outFinal&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;outnx&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">_naxis1</span>\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;outny&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">_naxis2</span>\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;texptime&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">_exptime</span>\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;texpstart&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">_expstart</span>\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;texpend&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">_expend</span>\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;nimages&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">nimages</span>\n\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;scale&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">pscale</span> <span class=\"c1\">#/ self._image[self.scienceExt,1].wcs.pscale</span>\n <span class=\"n\">outputvals</span><span class=\"p\">[</span><span class=\"s1\">&#39;exptime&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_exptime</span>\n\n <span class=\"n\">outnames</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span>\n <span class=\"n\">outnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outMedian&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outMedian&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">outnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outFinal&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outFinal&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">outnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outSci&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outSci&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">outnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outWeight&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outWeight&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">outnames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outContext&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">output_wcs</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outContext&#39;</span><span class=\"p\">]</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.updateContextImage\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.updateContextImage\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">updateContextImage</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">contextpar</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Reset the name of the context image to None if parameter `context` == False.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">createContext</span> <span class=\"o\">=</span> <span class=\"n\">contextpar</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">contextpar</span><span class=\"p\">:</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s1\">&#39;No context image will be created for </span><span class=\"si\">%s</span><span class=\"s1\">&#39;</span> <span class=\"o\">%</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;outContext&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"kc\">None</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.find_DQ_extension\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.find_DQ_extension\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">find_DQ_extension</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Return the suffix for the data quality extension and the name of the</span>\n<span class=\"sd\"> file which that DQ extension should be read from.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">dqfile</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">dq_suffix</span><span class=\"o\">=</span><span class=\"kc\">None</span>\n <span class=\"k\">if</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">maskExt</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"k\">for</span> <span class=\"n\">hdu</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">:</span>\n <span class=\"c1\"># Look for DQ extension in input file</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;extname&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span> <span class=\"ow\">and</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;extname&#39;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">lower</span><span class=\"p\">()</span> <span class=\"o\">==</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">maskExt</span><span class=\"o\">.</span><span class=\"n\">lower</span><span class=\"p\">():</span>\n <span class=\"n\">dqfile</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span>\n <span class=\"n\">dq_suffix</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">maskExt</span>\n <span class=\"k\">break</span>\n\n <span class=\"k\">return</span> <span class=\"n\">dqfile</span><span class=\"p\">,</span><span class=\"n\">dq_suffix</span></div>\n\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getKeywordList\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getKeywordList\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getKeywordList</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">kw</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Return lists of all attribute values for all active chips in the</span>\n<span class=\"sd\"> imageObject.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">kwlist</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">group_member</span><span class=\"p\">:</span>\n <span class=\"n\">kwlist</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">__dict__</span><span class=\"p\">[</span><span class=\"n\">kw</span><span class=\"p\">])</span>\n\n <span class=\"k\">return</span> <span class=\"n\">kwlist</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getGain\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getGain\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getGain</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">exten</span><span class=\"p\">):</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">exten</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_gain</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getflat\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getflat\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getflat</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Method for retrieving a detector&#39;s flat field.</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> flat: array</span>\n<span class=\"sd\"> This method will return an array the same shape as the image in</span>\n<span class=\"sd\"> **units of electrons**.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"n\">exten</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;</span><span class=\"si\">%s</span><span class=\"s1\">,</span><span class=\"si\">%d</span><span class=\"s1\">&#39;</span> <span class=\"o\">%</span> <span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"c1\"># The keyword for ACS flat fields in the primary header of the flt</span>\n <span class=\"c1\"># file is pfltfile. This flat file is already in the required</span>\n <span class=\"c1\"># units of electrons.</span>\n\n <span class=\"c1\"># The use of fileutil.osfn interprets any environment variable, such as jref$,</span>\n <span class=\"c1\"># used in the specification of the reference filename</span>\n <span class=\"n\">filename</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">osfn</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">flatkey</span><span class=\"p\">])</span>\n\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">handle</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">,</span> <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;readonly&#39;</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">hdu</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">getExtn</span><span class=\"p\">(</span><span class=\"n\">handle</span><span class=\"p\">,</span><span class=\"n\">extn</span><span class=\"o\">=</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">!=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]:</span>\n <span class=\"n\">_ltv2</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">round</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv2</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_ltv2</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">_size2</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">_ltv2</span>\n <span class=\"k\">if</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">shape</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">!=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]:</span>\n <span class=\"n\">_ltv1</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">round</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv1</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_ltv1</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">_size1</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">+</span><span class=\"n\">_ltv1</span>\n\n <span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">_ltv2</span><span class=\"p\">:</span><span class=\"n\">_size2</span><span class=\"p\">,</span> <span class=\"n\">_ltv1</span><span class=\"p\">:</span><span class=\"n\">_size1</span><span class=\"p\">]</span>\n <span class=\"n\">handle</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">except</span><span class=\"p\">:</span>\n <span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_dtype</span><span class=\"p\">)</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">warning</span><span class=\"p\">(</span><span class=\"s2\">&quot;Cannot find file </span><span class=\"si\">%s</span><span class=\"s2\">.</span><span class=\"se\">\\n</span><span class=\"s2\"> Treating flatfield &quot;</span>\n <span class=\"s2\">&quot;constant value of &#39;1&#39;.&quot;</span> <span class=\"o\">%</span> <span class=\"n\">filename</span><span class=\"p\">)</span>\n <span class=\"n\">flat</span> <span class=\"o\">=</span> <span class=\"n\">data</span>\n <span class=\"k\">return</span> <span class=\"n\">flat</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getReadNoiseImage\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getReadNoiseImage\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getReadNoiseImage</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> =====</span>\n<span class=\"sd\"> Method for returning the readnoise image of a detector</span>\n<span class=\"sd\"> (in electrons).</span>\n\n<span class=\"sd\"> The method will return an array of the same shape as the image.</span>\n\n<span class=\"sd\"> :units: electrons</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n\n <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">,</span><span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_dtype</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getexptimeimg\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getexptimeimg\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getexptimeimg</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> =====</span>\n<span class=\"sd\"> Return an array representing the exposure time per pixel for the detector.</span>\n<span class=\"sd\"> This method will be overloaded for IR detectors which have their own</span>\n<span class=\"sd\"> EXP arrays, namely, WFC3/IR and NICMOS images.</span>\n\n<span class=\"sd\"> :units:</span>\n<span class=\"sd\"> None</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> =======</span>\n<span class=\"sd\"> exptimeimg : numpy array</span>\n<span class=\"sd\"> The method will return an array of the same shape as the image.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_wtscl_par</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;expsq&#39;</span><span class=\"p\">:</span>\n <span class=\"n\">wtscl</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span><span class=\"o\">*</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">wtscl</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span>\n\n <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">,</span><span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_dtype</span><span class=\"p\">)</span><span class=\"o\">*</span><span class=\"n\">wtscl</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getdarkimg\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getdarkimg\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getdarkimg</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> =====</span>\n<span class=\"sd\"> Return an array representing the dark image for the detector.</span>\n\n<span class=\"sd\"> The method will return an array of the same shape as the image.</span>\n\n<span class=\"sd\"> :units: electrons</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">,</span><span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_dtype</span><span class=\"p\">)</span><span class=\"o\">*</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">darkcurrent</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getskyimg\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getskyimg\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getskyimg</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> =====</span>\n<span class=\"sd\"> Return an array representing the sky image for the detector. The value</span>\n<span class=\"sd\"> of the sky is what would actually be subtracted from the exposure by</span>\n<span class=\"sd\"> the skysub step.</span>\n\n<span class=\"sd\"> :units: electrons</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span><span class=\"p\">,</span><span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_dtype</span><span class=\"p\">)</span><span class=\"o\">*</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">subtractedSky</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getdarkcurrent\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getdarkcurrent\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getdarkcurrent</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Notes</span>\n<span class=\"sd\"> =====</span>\n<span class=\"sd\"> Return the dark current for the detector. This value</span>\n<span class=\"sd\"> will be contained within an instrument specific keyword.</span>\n<span class=\"sd\"> The value in the image header will be converted to units</span>\n<span class=\"sd\"> of electrons.</span>\n\n<span class=\"sd\"> :units: electrons</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">pass</span></div>\n\n<span class=\"c1\">#the following two functions are basically doing the same thing,</span>\n<span class=\"c1\">#how are they used differently in the code?</span>\n<div class=\"viewcode-block\" id=\"baseImageObject.getExtensions\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getExtensions\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getExtensions</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"s1\">&#39;SCI&#39;</span><span class=\"p\">,</span><span class=\"n\">section</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"sd\">&#39;&#39;&#39; Return the list of EXTVER values for extensions with name specified in extname.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">section</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">numext</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">section</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">hdu</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;extname&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span> <span class=\"ow\">and</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;extname&#39;</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">extname</span><span class=\"p\">:</span>\n <span class=\"n\">section</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;extver&#39;</span><span class=\"p\">])</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">section</span><span class=\"p\">,</span><span class=\"nb\">list</span><span class=\"p\">):</span>\n <span class=\"n\">section</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">section</span><span class=\"p\">]</span>\n\n <span class=\"k\">return</span> <span class=\"n\">section</span></div>\n\n\n\n <span class=\"k\">def</span> <span class=\"nf\">_countEXT</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"s2\">&quot;SCI&quot;</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Count the number of extensions in the file</span>\n<span class=\"sd\"> with the given name (EXTNAME).</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">count</span><span class=\"o\">=</span><span class=\"mi\">0</span> <span class=\"c1\">#simple fits image</span>\n\n <span class=\"k\">if</span> <span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s1\">&#39;PRIMARY&#39;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTEND&quot;</span><span class=\"p\">]):</span>\n <span class=\"k\">for</span> <span class=\"n\">i</span><span class=\"p\">,</span><span class=\"n\">hdu</span> <span class=\"ow\">in</span> <span class=\"nb\">enumerate</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"n\">i</span> <span class=\"o\">&gt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">hduExtname</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;EXTNAME&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extnum</span><span class=\"o\">=</span><span class=\"n\">i</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTNAME&quot;</span><span class=\"p\">]</span>\n <span class=\"n\">hduExtname</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;EXTVER&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extver</span><span class=\"o\">=</span><span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTVER&quot;</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extver</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n\n <span class=\"k\">if</span> <span class=\"p\">((</span><span class=\"n\">extname</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"ow\">and</span> \\\n <span class=\"p\">(</span><span class=\"n\">hduExtname</span> <span class=\"ow\">and</span> <span class=\"p\">(</span><span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTNAME&quot;</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">extname</span><span class=\"p\">)))</span> \\\n <span class=\"ow\">or</span> <span class=\"n\">extname</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">count</span><span class=\"o\">=</span><span class=\"n\">count</span><span class=\"o\">+</span><span class=\"mi\">1</span>\n <span class=\"k\">return</span> <span class=\"n\">count</span>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getNumpyType\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getNumpyType\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getNumpyType</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">irafType</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Return the corresponding numpy data type.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">iraf</span><span class=\"o\">=</span><span class=\"p\">{</span><span class=\"o\">-</span><span class=\"mi\">64</span><span class=\"p\">:</span><span class=\"s1\">&#39;float64&#39;</span><span class=\"p\">,</span><span class=\"o\">-</span><span class=\"mi\">32</span><span class=\"p\">:</span><span class=\"s1\">&#39;float32&#39;</span><span class=\"p\">,</span><span class=\"mi\">8</span><span class=\"p\">:</span><span class=\"s1\">&#39;uint8&#39;</span><span class=\"p\">,</span><span class=\"mi\">16</span><span class=\"p\">:</span><span class=\"s1\">&#39;int16&#39;</span><span class=\"p\">,</span><span class=\"mi\">32</span><span class=\"p\">:</span><span class=\"s1\">&#39;int32&#39;</span><span class=\"p\">,</span><span class=\"mi\">64</span><span class=\"p\">:</span><span class=\"s1\">&#39;int64&#39;</span><span class=\"p\">}</span>\n\n <span class=\"k\">return</span> <span class=\"n\">iraf</span><span class=\"p\">[</span><span class=\"n\">irafType</span><span class=\"p\">]</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.buildMask\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.buildMask\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">buildMask</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">,</span><span class=\"n\">bits</span><span class=\"o\">=</span><span class=\"mi\">0</span><span class=\"p\">,</span><span class=\"n\">write</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Build masks as specified in the user parameters found in the</span>\n<span class=\"sd\"> configObj object.</span>\n\n<span class=\"sd\"> We should overload this function in the instrument specific</span>\n<span class=\"sd\"> implementations so that we can add other stuff to the badpixel</span>\n<span class=\"sd\"> mask? Like vignetting areas and chip boundries in nicmos which</span>\n<span class=\"sd\"> are camera dependent? these are not defined in the DQ masks, but</span>\n<span class=\"sd\"> should be masked out to get the best results in multidrizzle.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">dqarr</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getData</span><span class=\"p\">(</span><span class=\"n\">exten</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">maskExt</span><span class=\"o\">+</span><span class=\"s1\">&#39;,&#39;</span><span class=\"o\">+</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">))</span>\n <span class=\"n\">dqmask</span> <span class=\"o\">=</span> <span class=\"n\">buildmask</span><span class=\"o\">.</span><span class=\"n\">buildMask</span><span class=\"p\">(</span><span class=\"n\">dqarr</span><span class=\"p\">,</span><span class=\"n\">bits</span><span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"n\">write</span><span class=\"p\">:</span>\n <span class=\"n\">phdu</span> <span class=\"o\">=</span> <span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">PrimaryHDU</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"o\">=</span><span class=\"n\">dqmask</span><span class=\"p\">,</span><span class=\"n\">header</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">maskExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">)</span>\n <span class=\"n\">dqmask_name</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">dqrootname</span><span class=\"o\">+</span><span class=\"s1\">&#39;_dqmask.fits&#39;</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s1\">&#39;Writing out DQ/weight mask: </span><span class=\"si\">%s</span><span class=\"s1\">&#39;</span> <span class=\"o\">%</span> <span class=\"n\">dqmask_name</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">exists</span><span class=\"p\">(</span><span class=\"n\">dqmask_name</span><span class=\"p\">):</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">remove</span><span class=\"p\">(</span><span class=\"n\">dqmask_name</span><span class=\"p\">)</span>\n <span class=\"n\">phdu</span><span class=\"o\">.</span><span class=\"n\">writeto</span><span class=\"p\">(</span><span class=\"n\">dqmask_name</span><span class=\"p\">)</span>\n <span class=\"k\">del</span> <span class=\"n\">phdu</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">dqmaskname</span> <span class=\"o\">=</span> <span class=\"n\">dqmask_name</span>\n <span class=\"c1\"># record the name of this mask file that was created for later</span>\n <span class=\"c1\"># removal by the &#39;clean()&#39; method</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;dqmask&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">dqmask_name</span>\n <span class=\"k\">del</span> <span class=\"n\">dqarr</span>\n <span class=\"k\">return</span> <span class=\"n\">dqmask</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.buildEXPmask\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.buildEXPmask\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">buildEXPmask</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">,</span> <span class=\"n\">dqarr</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Builds a weight mask from an input DQ array and the exposure time</span>\n<span class=\"sd\"> per pixel for this chip.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s2\">&quot;Applying EXPTIME weighting to DQ mask for chip </span><span class=\"si\">%s</span><span class=\"s2\">&quot;</span> <span class=\"o\">%</span>\n <span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"c1\">#exparr = self.getexptimeimg(chip)</span>\n <span class=\"n\">exparr</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_exptime</span>\n <span class=\"n\">expmask</span> <span class=\"o\">=</span> <span class=\"n\">exparr</span><span class=\"o\">*</span><span class=\"n\">dqarr</span>\n\n <span class=\"k\">return</span> <span class=\"n\">expmask</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">)</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.buildIVMmask\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.buildIVMmask\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">buildIVMmask</span><span class=\"p\">(</span><span class=\"bp\">self</span> <span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">,</span> <span class=\"n\">dqarr</span><span class=\"p\">,</span> <span class=\"n\">scale</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Builds a weight mask from an input DQ array and either an IVM array</span>\n<span class=\"sd\"> provided by the user or a self-generated IVM array derived from the</span>\n<span class=\"sd\"> flat-field reference file associated with the input image.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"n\">ivmname</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;ivmFile&#39;</span><span class=\"p\">]</span>\n\n <span class=\"k\">if</span> <span class=\"n\">ivmname</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s2\">&quot;Applying user supplied IVM files for chip </span><span class=\"si\">%s</span><span class=\"s2\">&quot;</span> <span class=\"o\">%</span> <span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"c1\">#Parse the input file name to get the extension we are working on</span>\n <span class=\"n\">extn</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;IVM,</span><span class=\"si\">{}</span><span class=\"s2\">&quot;</span><span class=\"o\">.</span><span class=\"n\">format</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n\n <span class=\"c1\">#Open the mask image for updating and the IVM image</span>\n <span class=\"n\">ivm</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"n\">ivmname</span><span class=\"p\">,</span> <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;readonly&#39;</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">ivmfile</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">getExtn</span><span class=\"p\">(</span><span class=\"n\">ivm</span><span class=\"p\">,</span> <span class=\"n\">extn</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># Multiply the IVM file by the input mask in place.</span>\n <span class=\"n\">ivmarr</span> <span class=\"o\">=</span> <span class=\"n\">ivmfile</span><span class=\"o\">.</span><span class=\"n\">data</span> <span class=\"o\">*</span> <span class=\"n\">dqarr</span>\n\n <span class=\"n\">ivm</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n\n <span class=\"k\">else</span><span class=\"p\">:</span>\n\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s2\">&quot;Automatically creating IVM files for chip </span><span class=\"si\">%s</span><span class=\"s2\">&quot;</span> <span class=\"o\">%</span> <span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"c1\"># If no IVM files were provided by the user we will</span>\n <span class=\"c1\"># need to automatically generate them based upon</span>\n <span class=\"c1\"># instrument specific information.</span>\n\n <span class=\"n\">flat</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getflat</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"n\">RN</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getReadNoiseImage</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"n\">darkimg</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getdarkimg</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"n\">skyimg</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getskyimg</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n\n <span class=\"c1\">#exptime = self.getexptimeimg(chip)</span>\n <span class=\"c1\">#exptime = sci_chip._exptime</span>\n <span class=\"c1\">#ivm = (flat*exptime)**2/(darkimg+(skyimg*flat)+RN**2)</span>\n <span class=\"n\">ivm</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">flat</span><span class=\"p\">)</span><span class=\"o\">**</span><span class=\"mi\">2</span><span class=\"o\">/</span><span class=\"p\">(</span><span class=\"n\">darkimg</span><span class=\"o\">+</span><span class=\"p\">(</span><span class=\"n\">skyimg</span><span class=\"o\">*</span><span class=\"n\">flat</span><span class=\"p\">)</span><span class=\"o\">+</span><span class=\"n\">RN</span><span class=\"o\">**</span><span class=\"mi\">2</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># Multiply the IVM file by the input mask in place.</span>\n <span class=\"n\">ivmarr</span> <span class=\"o\">=</span> <span class=\"n\">ivm</span> <span class=\"o\">*</span> <span class=\"n\">dqarr</span>\n\n <span class=\"c1\"># Update &#39;wt_scl&#39; parameter to match use of IVM file</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"nb\">pow</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span><span class=\"p\">,</span><span class=\"mi\">2</span><span class=\"p\">)</span><span class=\"o\">/</span><span class=\"nb\">pow</span><span class=\"p\">(</span><span class=\"n\">scale</span><span class=\"p\">,</span><span class=\"mi\">4</span><span class=\"p\">)</span>\n <span class=\"c1\">#sci_chip._wtscl = 1.0/pow(scale,4)</span>\n\n <span class=\"k\">return</span> <span class=\"n\">ivmarr</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">)</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.buildERRmask\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.buildERRmask\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">buildERRmask</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">,</span><span class=\"n\">dqarr</span><span class=\"p\">,</span><span class=\"n\">scale</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Builds a weight mask from an input DQ array and an ERR array</span>\n<span class=\"sd\"> associated with the input image.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n\n <span class=\"c1\"># Set default value in case of error, or lack of ERR array</span>\n <span class=\"n\">errmask</span> <span class=\"o\">=</span> <span class=\"n\">dqarr</span>\n\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">errExt</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"c1\"># Attempt to open the ERR image.</span>\n <span class=\"n\">err</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getData</span><span class=\"p\">(</span><span class=\"n\">exten</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">errExt</span><span class=\"o\">+</span><span class=\"s1\">&#39;,&#39;</span><span class=\"o\">+</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">))</span>\n\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s2\">&quot;Applying ERR weighting to DQ mask for chip </span><span class=\"si\">%s</span><span class=\"s2\">&quot;</span> <span class=\"o\">%</span>\n <span class=\"n\">chip</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># Multiply the scaled ERR file by the input mask in place.</span>\n <span class=\"c1\">#exptime = self.getexptimeimg(chip)</span>\n <span class=\"n\">exptime</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span>\n <span class=\"n\">errmask</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">exptime</span><span class=\"o\">/</span><span class=\"n\">err</span><span class=\"p\">)</span><span class=\"o\">**</span><span class=\"mi\">2</span> <span class=\"o\">*</span> <span class=\"n\">dqarr</span>\n\n <span class=\"c1\"># Update &#39;wt_scl&#39; parameter to match use of IVM file</span>\n <span class=\"c1\">#sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4)</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span><span class=\"o\">/</span><span class=\"nb\">pow</span><span class=\"p\">(</span><span class=\"n\">scale</span><span class=\"p\">,</span><span class=\"mi\">4</span><span class=\"p\">)</span>\n\n <span class=\"k\">del</span> <span class=\"n\">err</span>\n\n <span class=\"k\">except</span><span class=\"p\">:</span>\n <span class=\"c1\"># We cannot find an &#39;ERR&#39; extension and the data isn&#39;t WFPC2.</span>\n <span class=\"c1\"># Print a generic warning message and continue on with the</span>\n <span class=\"c1\"># final drizzle step.</span>\n\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"n\">textutil</span><span class=\"o\">.</span><span class=\"n\">textbox</span><span class=\"p\">(</span>\n <span class=\"s1\">&#39;WARNING: No ERR weighting will be applied to the mask &#39;</span>\n <span class=\"s1\">&#39;used in the final drizzle step! Weighting will be only &#39;</span>\n <span class=\"s1\">&#39;by exposure time.</span><span class=\"se\">\\n\\n</span><span class=\"s1\">The data provided as input does not &#39;</span>\n <span class=\"s1\">&#39;contain an ERR extension&#39;</span><span class=\"p\">),</span> <span class=\"n\">file</span><span class=\"o\">=</span><span class=\"n\">sys</span><span class=\"o\">.</span><span class=\"n\">stderr</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;</span><span class=\"se\">\\n</span><span class=\"s1\"> Continue with final drizzle step...&#39;</span><span class=\"p\">,</span> <span class=\"n\">sys</span><span class=\"o\">.</span><span class=\"n\">stderr</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># If we were unable to find an &#39;ERR&#39; extension to apply, one</span>\n <span class=\"c1\"># possible reason was that the input was a &#39;standard&#39; WFPC2 data</span>\n <span class=\"c1\"># file that does not actually contain an error array. Test for</span>\n <span class=\"c1\"># this condition and issue a Warning to the user and continue on to</span>\n <span class=\"c1\"># the final drizzle.</span>\n\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"n\">textutil</span><span class=\"o\">.</span><span class=\"n\">textbox</span><span class=\"p\">(</span>\n <span class=\"s2\">&quot;WARNING: No ERR weighting will be applied to the mask used &quot;</span>\n <span class=\"s2\">&quot;in the final drizzle step! Weighting will be only by &quot;</span>\n <span class=\"s2\">&quot;exposure time.</span><span class=\"se\">\\n\\n</span><span class=\"s2\">The WFPC2 data provided as input does not &quot;</span>\n <span class=\"s2\">&quot;contain ERR arrays. WFPC2 data is not supported by this &quot;</span>\n <span class=\"s2\">&quot;weighting type.</span><span class=\"se\">\\n\\n</span><span class=\"s2\">A workaround would be to create inverse &quot;</span>\n <span class=\"s2\">&quot;variance maps and use &#39;IVM&#39; as the final_wht_type. See the &quot;</span>\n <span class=\"s2\">&quot;HELP file for more details on using inverse variance maps.&quot;</span><span class=\"p\">),</span>\n <span class=\"n\">file</span><span class=\"o\">=</span><span class=\"n\">sys</span><span class=\"o\">.</span><span class=\"n\">stderr</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s2\">&quot;</span><span class=\"se\">\\n</span><span class=\"s2\"> Continue with final drizzle step...&quot;</span><span class=\"p\">,</span> <span class=\"n\">file</span><span class=\"o\">=</span><span class=\"n\">sys</span><span class=\"o\">.</span><span class=\"n\">stderr</span><span class=\"p\">)</span>\n\n <span class=\"k\">return</span> <span class=\"n\">errmask</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">float32</span><span class=\"p\">)</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.updateIVMName\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.updateIVMName\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">updateIVMName</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">ivmname</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Update outputNames for image with user-supplied IVM filename.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"p\">[</span><span class=\"s1\">&#39;ivmFile&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">ivmname</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.set_mt_wcs\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.set_mt_wcs\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">set_mt_wcs</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">image</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Reset the WCS for this image based on the WCS information from</span>\n<span class=\"sd\"> another imageObject.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"n\">ref_chip</span> <span class=\"o\">=</span> <span class=\"n\">image</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"n\">image</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"c1\"># Do we want to keep track of original WCS or not? No reason now...</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">wcs</span> <span class=\"o\">=</span> <span class=\"n\">ref_chip</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.set_wtscl\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.set_wtscl\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">set_wtscl</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">,</span> <span class=\"n\">wtscl_par</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Sets the value of the wt_scl parameter as needed for drizzling.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n\n <span class=\"n\">exptime</span> <span class=\"o\">=</span> <span class=\"mi\">1</span> <span class=\"c1\">#sci_chip._exptime</span>\n <span class=\"n\">_parval</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;unity&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">wtscl_par</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"nb\">type</span><span class=\"p\">(</span><span class=\"n\">wtscl_par</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"nb\">type</span><span class=\"p\">(</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">wtscl_par</span><span class=\"o\">.</span><span class=\"n\">isdigit</span><span class=\"p\">():</span>\n <span class=\"c1\"># String passed in as value, check for &#39;exptime&#39; or &#39;expsq&#39;</span>\n <span class=\"n\">_wtscl_float</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">_wtscl_float</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">wtscl_par</span><span class=\"p\">)</span>\n <span class=\"k\">except</span> <span class=\"ne\">ValueError</span><span class=\"p\">:</span>\n <span class=\"n\">_wtscl_float</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"n\">_wtscl_float</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"n\">_wtscl_float</span>\n <span class=\"k\">elif</span> <span class=\"n\">wtscl_par</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;expsq&#39;</span><span class=\"p\">:</span>\n <span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"n\">exptime</span><span class=\"o\">*</span><span class=\"n\">exptime</span>\n <span class=\"n\">_parval</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;expsq&#39;</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># Default to the case of &#39;exptime&#39;, if</span>\n <span class=\"c1\"># not explicitly specified as &#39;expsq&#39;</span>\n <span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"n\">exptime</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># int value passed in as a string, convert to float</span>\n <span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">wtscl_par</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># We have a non-string value passed in...</span>\n <span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">wtscl_par</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># Default case: wt_scl = exptime</span>\n <span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"n\">exptime</span>\n\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_wtscl_par</span> <span class=\"o\">=</span> <span class=\"n\">_parval</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"n\">_wtscl</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.set_units\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.set_units\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">set_units</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Record the units for this image, both BUNITS from header and</span>\n<span class=\"sd\"> in_units as needed internally. This method will be defined</span>\n<span class=\"sd\"> specifically for each instrument.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">pass</span></div>\n\n<div class=\"viewcode-block\" id=\"baseImageObject.getInstrParameter\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.baseImageObject.getInstrParameter\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getInstrParameter</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">value</span><span class=\"p\">,</span> <span class=\"n\">header</span><span class=\"p\">,</span> <span class=\"n\">keyword</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; This method gets a instrument parameter from a</span>\n<span class=\"sd\"> pair of task parameters: a value, and a header keyword.</span>\n\n<span class=\"sd\"> The default behavior is:</span>\n<span class=\"sd\"> - if the value and header keyword are given, raise an exception.</span>\n<span class=\"sd\"> - if the value is given, use it.</span>\n<span class=\"sd\"> - if the value is blank and the header keyword is given, use</span>\n<span class=\"sd\"> the header keyword.</span>\n<span class=\"sd\"> - if both are blank, or if the header keyword is not</span>\n<span class=\"sd\"> found, return None.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">if</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">value</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"n\">value</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"s1\">&#39;None&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39; &#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;INDEF&#39;</span><span class=\"p\">]:</span>\n <span class=\"n\">value</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"k\">if</span> <span class=\"n\">value</span> <span class=\"ow\">and</span> <span class=\"p\">(</span><span class=\"n\">keyword</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"n\">keyword</span><span class=\"o\">.</span><span class=\"n\">strip</span><span class=\"p\">()</span> <span class=\"o\">!=</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">):</span>\n <span class=\"n\">exceptionMessage</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;ERROR: Your input is ambiguous! Please specify either a value or a keyword.</span><span class=\"se\">\\n</span><span class=\"s2\"> You specifed both &quot;</span> <span class=\"o\">+</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">value</span><span class=\"p\">)</span> <span class=\"o\">+</span> <span class=\"s2\">&quot; and &quot;</span> <span class=\"o\">+</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">keyword</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span><span class=\"p\">(</span><span class=\"n\">exceptionMessage</span><span class=\"p\">)</span>\n\n <span class=\"k\">elif</span> <span class=\"n\">value</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"n\">value</span> <span class=\"o\">!=</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_averageFromList</span><span class=\"p\">(</span><span class=\"n\">value</span><span class=\"p\">)</span>\n\n <span class=\"k\">elif</span> <span class=\"n\">keyword</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"n\">keyword</span><span class=\"o\">.</span><span class=\"n\">strip</span><span class=\"p\">()</span> <span class=\"o\">!=</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_averageFromHeader</span><span class=\"p\">(</span><span class=\"n\">header</span><span class=\"p\">,</span> <span class=\"n\">keyword</span><span class=\"p\">)</span>\n\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"kc\">None</span></div>\n\n <span class=\"k\">def</span> <span class=\"nf\">_averageFromHeader</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">header</span><span class=\"p\">,</span> <span class=\"n\">keyword</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Averages out values taken from header. The keywords where</span>\n<span class=\"sd\"> to read values from are passed as a comma-separated list.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">_list</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"k\">for</span> <span class=\"n\">_kw</span> <span class=\"ow\">in</span> <span class=\"n\">keyword</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"n\">_kw</span> <span class=\"ow\">in</span> <span class=\"n\">header</span><span class=\"p\">:</span>\n <span class=\"n\">_list</span> <span class=\"o\">=</span> <span class=\"n\">_list</span> <span class=\"o\">+</span> <span class=\"s1\">&#39;,&#39;</span> <span class=\"o\">+</span> <span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"n\">_kw</span><span class=\"p\">])</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"kc\">None</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_averageFromList</span><span class=\"p\">(</span><span class=\"n\">_list</span><span class=\"p\">)</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">_averageFromList</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">param</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Averages out values passed as a comma-separated</span>\n<span class=\"sd\"> list, disregarding the zero-valued entries.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">_result</span> <span class=\"o\">=</span> <span class=\"mf\">0.0</span>\n <span class=\"n\">_count</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n\n <span class=\"k\">for</span> <span class=\"n\">_param</span> <span class=\"ow\">in</span> <span class=\"n\">param</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"n\">_param</span> <span class=\"o\">!=</span> <span class=\"s1\">&#39;&#39;</span> <span class=\"ow\">and</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">_param</span><span class=\"p\">)</span> <span class=\"o\">!=</span> <span class=\"mf\">0.0</span><span class=\"p\">:</span>\n <span class=\"n\">_result</span> <span class=\"o\">=</span> <span class=\"n\">_result</span> <span class=\"o\">+</span> <span class=\"nb\">float</span><span class=\"p\">(</span><span class=\"n\">_param</span><span class=\"p\">)</span>\n <span class=\"n\">_count</span> <span class=\"o\">+=</span> <span class=\"mi\">1</span>\n\n <span class=\"k\">if</span> <span class=\"n\">_count</span> <span class=\"o\">&gt;=</span> <span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">_result</span> <span class=\"o\">=</span> <span class=\"n\">_result</span> <span class=\"o\">/</span> <span class=\"n\">_count</span>\n <span class=\"k\">return</span> <span class=\"n\">_result</span></div>\n\n<div class=\"viewcode-block\" id=\"imageObject\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.imageObject\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">imageObject</span><span class=\"p\">(</span><span class=\"n\">baseImageObject</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> This returns an imageObject that contains all the</span>\n<span class=\"sd\"> necessary information to run the image file through</span>\n<span class=\"sd\"> any multidrizzle function. It is essentially a</span>\n<span class=\"sd\"> PyFits object with extra attributes.</span>\n\n<span class=\"sd\"> There will be generic keywords which are good for</span>\n<span class=\"sd\"> the entire image file, and some that might pertain</span>\n<span class=\"sd\"> only to the specific chip.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">,</span><span class=\"n\">group</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">inmemory</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">):</span>\n <span class=\"n\">baseImageObject</span><span class=\"o\">.</span><span class=\"n\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">)</span>\n\n <span class=\"c1\">#filutil open returns a fits object</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"o\">=</span><span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">,</span> <span class=\"n\">clobber</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n\n <span class=\"k\">except</span> <span class=\"ne\">IOError</span><span class=\"p\">:</span>\n <span class=\"k\">raise</span> <span class=\"ne\">IOError</span><span class=\"p\">(</span><span class=\"s2\">&quot;Unable to open file: </span><span class=\"si\">%s</span><span class=\"s2\">&quot;</span> <span class=\"o\">%</span> <span class=\"n\">filename</span><span class=\"p\">)</span>\n\n <span class=\"c1\">#populate the global attributes which are good for all the chips in the file</span>\n <span class=\"c1\">#self._rootname=self._image[&#39;PRIMARY&#39;].header[&quot;ROOTNAME&quot;]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span><span class=\"o\">=</span><span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">buildNewRootname</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setOutputNames</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># flag to indicate whether or not to write out intermediate products</span>\n <span class=\"c1\"># to disk (default) or keep everything in memory</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">inmemory</span> <span class=\"o\">=</span> <span class=\"n\">inmemory</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_initVirtualOutputs</span><span class=\"p\">()</span>\n\n <span class=\"c1\">#self._exptime=self._image[&quot;PRIMARY&quot;].header[&quot;EXPTIME&quot;]</span>\n <span class=\"c1\">#exptime should be set in the image subclass code since it&#39;s kept in different places</span>\n<span class=\"c1\"># if(self._exptime == 0):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"o\">=</span><span class=\"mf\">1.</span> <span class=\"c1\">#to avoid divide by zero</span>\n <span class=\"c1\"># print &quot;Setting exposure time to 1. to avoid div/0!&quot;</span>\n\n <span class=\"c1\">#this is the number of science chips to be processed in the file</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_countEXT</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">)</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">proc_unit</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"c1\">#self._nextend=self._image[&quot;PRIMARY&quot;].header[&quot;NEXTEND&quot;]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_nextend</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_countEXT</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">):</span>\n <span class=\"c1\">#the simple fits image contains the data in the primary extension,</span>\n <span class=\"c1\">#this will help us deal with the rest of the code that looks</span>\n <span class=\"c1\">#and acts on chips :)</span>\n <span class=\"c1\">#self._nextend=1</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">=</span><span class=\"mi\">1</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"o\">=</span><span class=\"s2\">&quot;PRIMARY&quot;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">maskExt</span><span class=\"o\">=</span><span class=\"kc\">None</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTNAME&quot;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;PRIMARY&quot;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;EXTVER&quot;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">extnum</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isSimpleFits</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n\n <span class=\"c1\"># Clean out any stray MDRIZSKY keywords from PRIMARY headers</span>\n <span class=\"n\">fimg</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">,</span> <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;update&#39;</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;MDRIZSKY&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">fimg</span><span class=\"p\">[</span><span class=\"s1\">&#39;PRIMARY&#39;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">:</span>\n <span class=\"k\">del</span> <span class=\"n\">fimg</span><span class=\"p\">[</span><span class=\"s1\">&#39;PRIMARY&#39;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;MDRIZSKY&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">fimg</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n <span class=\"k\">del</span> <span class=\"n\">fimg</span>\n\n <span class=\"k\">if</span> <span class=\"n\">group</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"s1\">&#39;&#39;</span><span class=\"p\">]:</span>\n <span class=\"c1\"># Only use selected chip</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;,&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">group</span><span class=\"p\">:</span>\n <span class=\"n\">group_id</span> <span class=\"o\">=</span> <span class=\"n\">group</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">group_id</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">isalpha</span><span class=\"p\">():</span> <span class=\"c1\"># user specified a specific extname,extver</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">group_id</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">])]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span> <span class=\"c1\"># user specified a list of extension numbers to process</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">grp</span> <span class=\"ow\">in</span> <span class=\"n\">group_id</span><span class=\"p\">:</span>\n <span class=\"c1\"># find extname/extver which corresponds to this extension number</span>\n <span class=\"n\">group_extname</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">grp</span><span class=\"p\">)]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;EXTNAME&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">group_extver</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">grp</span><span class=\"p\">)]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;EXTVER&#39;</span><span class=\"p\">]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">group_extver</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># find extname/extver which corresponds to this extension number</span>\n <span class=\"n\">group_extver</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">group</span><span class=\"p\">)]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;EXTVER&#39;</span><span class=\"p\">]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">group_extver</span><span class=\"p\">)]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># Use all chips</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isSimpleFits</span><span class=\"p\">:</span>\n\n <span class=\"c1\">#assign chip specific information</span>\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_assignRootname</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n\n <span class=\"c1\"># Set a flag to indicate whether this chip should be included</span>\n <span class=\"c1\"># or not, based on user input from the &#39;group&#39; parameter.</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"ow\">or</span> <span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">group</span><span class=\"p\">):</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">group_member</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_nmembers</span> <span class=\"o\">+=</span> <span class=\"mi\">1</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">group_member</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">signature</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqname</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqmaskname</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqfile</span><span class=\"p\">,</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dq_extn</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">find_DQ_extension</span><span class=\"p\">()</span>\n <span class=\"c1\">#self.maskExt = sci_chip.dq_extn</span>\n <span class=\"k\">if</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqfile</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqname</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dqfile</span> <span class=\"o\">+</span><span class=\"s1\">&#39;[&#39;</span><span class=\"o\">+</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">dq_extn</span><span class=\"o\">+</span><span class=\"s1\">&#39;,&#39;</span><span class=\"o\">+</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span><span class=\"o\">+</span><span class=\"s1\">&#39;]&#39;</span>\n\n <span class=\"c1\"># build up HSTWCS object for each chip, which will be necessary for drizzling operations</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">=</span><span class=\"n\">wcs_functions</span><span class=\"o\">.</span><span class=\"n\">get_hstwcs</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">,</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">extnum</span><span class=\"p\">)</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">detnum</span><span class=\"p\">,</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">binned</span> <span class=\"o\">=</span> <span class=\"n\">util</span><span class=\"o\">.</span><span class=\"n\">get_detnum</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">wcslin_pscale</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span>\n\n <span class=\"c1\">#assuming all the chips don&#39;t have the same dimensions in the file</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_naxis1</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;NAXIS1&quot;</span><span class=\"p\">]</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_naxis2</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;NAXIS2&quot;</span><span class=\"p\">]</span>\n\n <span class=\"c1\"># record the exptime values for this chip so that it can be</span>\n <span class=\"c1\"># easily used to generate the composite value for the final output image</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_expstart</span><span class=\"p\">,</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_expend</span> <span class=\"o\">=</span> <span class=\"n\">util</span><span class=\"o\">.</span><span class=\"n\">get_expstart</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s1\">&#39;PRIMARY&#39;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">)</span>\n\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">outputNames</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setChipOutputNames</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">rootname</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span> <span class=\"c1\">#this is a dictionary</span>\n <span class=\"c1\"># Set the units: both bunit and in_units</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">set_units</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n\n <span class=\"c1\">#initialize gain, readnoise, and exptime attributes</span>\n <span class=\"c1\"># the actual values will be set by each instrument based on</span>\n <span class=\"c1\"># keyword names specific to that instrument by &#39;setInstrumentParamters()&#39;</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_headergain</span> <span class=\"o\">=</span> <span class=\"mi\">1</span> <span class=\"c1\"># gain value read from header</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span> <span class=\"c1\"># calibrated gain value</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span> <span class=\"c1\"># calibrated readnoise</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_effGain</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_conversionFactor</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_wtscl</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span>\n\n <span class=\"c1\"># Keep track of the sky value that should be subtracted from this chip</span>\n <span class=\"c1\"># Read in value from image header, in case user has already</span>\n <span class=\"c1\"># determined the sky level</span>\n <span class=\"c1\">#</span>\n <span class=\"c1\"># .computedSky: value to be applied by the</span>\n <span class=\"c1\"># adrizzle/ablot steps.</span>\n <span class=\"c1\"># .subtractedSky: value already (or will be by adrizzle/ablot)</span>\n <span class=\"c1\"># subtracted from the image</span>\n <span class=\"c1\">#</span>\n <span class=\"k\">if</span> <span class=\"s2\">&quot;MDRIZSKY&quot;</span> <span class=\"ow\">in</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">:</span>\n <span class=\"n\">subsky</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;MDRIZSKY&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">log</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"s1\">&#39;Reading in MDRIZSKY of </span><span class=\"si\">%s</span><span class=\"s1\">&#39;</span> <span class=\"o\">%</span> <span class=\"n\">subsky</span><span class=\"p\">)</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">subtractedSky</span> <span class=\"o\">=</span> <span class=\"n\">subsky</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">computedSky</span> <span class=\"o\">=</span> <span class=\"n\">subsky</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">subtractedSky</span> <span class=\"o\">=</span> <span class=\"mf\">0.0</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">computedSky</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">darkcurrent</span> <span class=\"o\">=</span> <span class=\"mf\">0.0</span>\n\n <span class=\"c1\"># The following attributes are used when working with sub-arrays</span>\n <span class=\"c1\"># and get reference file arrays for auto-generation of IVM masks</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv1</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;LTV1&#39;</span><span class=\"p\">]</span> <span class=\"o\">*</span> <span class=\"o\">-</span><span class=\"mi\">1</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv2</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;LTV2&#39;</span><span class=\"p\">]</span> <span class=\"o\">*</span> <span class=\"o\">-</span><span class=\"mi\">1</span>\n <span class=\"k\">except</span> <span class=\"ne\">KeyError</span><span class=\"p\">:</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv1</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv2</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">if</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv1</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv1</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">if</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv2</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv2</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">size1</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;NAXIS1&#39;</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">round</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv1</span><span class=\"p\">)</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">size2</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;NAXIS2&#39;</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">round</span><span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">ltv2</span><span class=\"p\">)</span>\n <span class=\"c1\">#sci_chip.image_shape = (sci_chip.size2,sci_chip.size1)</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_shape</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;NAXIS2&#39;</span><span class=\"p\">],</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;NAXIS1&#39;</span><span class=\"p\">])</span>\n\n <span class=\"c1\"># Interpret the array dtype by translating the IRAF BITPIX value</span>\n <span class=\"k\">for</span> <span class=\"n\">dtype</span> <span class=\"ow\">in</span> <span class=\"n\">IRAF_DTYPES</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">():</span>\n <span class=\"k\">if</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;BITPIX&#39;</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">IRAF_DTYPES</span><span class=\"p\">[</span><span class=\"n\">dtype</span><span class=\"p\">]:</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">image_dtype</span> <span class=\"o\">=</span> <span class=\"n\">dtype</span>\n <span class=\"k\">break</span>\n\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">inmemory</span><span class=\"p\">:</span>\n <span class=\"c1\"># read image data array into memory</span>\n <span class=\"n\">shape</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">shape</span>\n\n\n<div class=\"viewcode-block\" id=\"imageObject.setInstrumentParameters\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.imageObject.setInstrumentParameters\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">setInstrumentParameters</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">instrpars</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Define instrument-specific parameters for use in the code.</span>\n<span class=\"sd\"> By definition, this definition will need to be overridden by</span>\n<span class=\"sd\"> methods defined in each instrument&#39;s sub-class.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">pass</span></div>\n\n<div class=\"viewcode-block\" id=\"imageObject.compute_wcslin\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.imageObject.compute_wcslin\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">compute_wcslin</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">undistort</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Compute the undistorted WCS based solely on the known distortion</span>\n<span class=\"sd\"> model information associated with the WCS.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"n\">chip_wcs</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">wcs</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">chip_wcs</span><span class=\"o\">.</span><span class=\"n\">sip</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"ow\">or</span> <span class=\"ow\">not</span> <span class=\"n\">undistort</span> <span class=\"ow\">or</span> <span class=\"n\">chip_wcs</span><span class=\"o\">.</span><span class=\"n\">instrument</span><span class=\"o\">==</span><span class=\"s1\">&#39;DEFAULT&#39;</span><span class=\"p\">:</span>\n <span class=\"n\">chip_wcs</span><span class=\"o\">.</span><span class=\"n\">sip</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">chip_wcs</span><span class=\"o\">.</span><span class=\"n\">cpdis1</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">chip_wcs</span><span class=\"o\">.</span><span class=\"n\">cpdis2</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">chip_wcs</span><span class=\"o\">.</span><span class=\"n\">det2im</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">undistort</span><span class=\"o\">=</span><span class=\"kc\">False</span>\n\n <span class=\"c1\"># compute the undistorted &#39;natural&#39; plate scale for this chip</span>\n <span class=\"n\">wcslin</span> <span class=\"o\">=</span> <span class=\"n\">distortion</span><span class=\"o\">.</span><span class=\"n\">utils</span><span class=\"o\">.</span><span class=\"n\">output_wcs</span><span class=\"p\">([</span><span class=\"n\">chip_wcs</span><span class=\"p\">],</span><span class=\"n\">undistort</span><span class=\"o\">=</span><span class=\"n\">undistort</span><span class=\"p\">)</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">wcslin_pscale</span> <span class=\"o\">=</span> <span class=\"n\">wcslin</span><span class=\"o\">.</span><span class=\"n\">pscale</span></div>\n\n<div class=\"viewcode-block\" id=\"imageObject.set_units\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.imageObject.set_units\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">set_units</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; Define units for this image.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\"># Determine output value of BUNITS</span>\n <span class=\"c1\"># and make sure it is not specified as &#39;ergs/cm...&#39;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n\n <span class=\"n\">_bunit</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;BUNIT&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span> <span class=\"ow\">and</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;BUNIT&#39;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">find</span><span class=\"p\">(</span><span class=\"s1\">&#39;ergs&#39;</span><span class=\"p\">)</span> <span class=\"o\">&lt;</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">_bunit</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;BUNIT&#39;</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_bunit</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;ELECTRONS/S&#39;</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_bunit</span> <span class=\"o\">=</span> <span class=\"n\">_bunit</span>\n <span class=\"c1\">#</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;/s&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">_bunit</span><span class=\"o\">.</span><span class=\"n\">lower</span><span class=\"p\">():</span>\n <span class=\"n\">_in_units</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;cps&#39;</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_in_units</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;counts&#39;</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">in_units</span> <span class=\"o\">=</span> <span class=\"n\">_in_units</span></div></div>\n\n\n\n<div class=\"viewcode-block\" id=\"WCSObject\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.WCSObject\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">WCSObject</span><span class=\"p\">(</span><span class=\"n\">baseImageObject</span><span class=\"p\">):</span>\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">,</span><span class=\"n\">suffix</span><span class=\"o\">=</span><span class=\"s1\">&#39;_drz&#39;</span><span class=\"p\">):</span>\n <span class=\"n\">baseImageObject</span><span class=\"o\">.</span><span class=\"n\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">)</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span> <span class=\"o\">=</span> <span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">HDUList</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">fits</span><span class=\"o\">.</span><span class=\"n\">PrimaryHDU</span><span class=\"p\">())</span>\n\n <span class=\"c1\"># Build rootname, but guard against the rootname being given without</span>\n <span class=\"c1\"># the &#39;_drz.fits&#39; suffix</span>\n <span class=\"c1\">#patt = re.compile(r&quot;_dr[zc]\\w*.fits$&quot;)</span>\n <span class=\"n\">drz_extn</span> <span class=\"o\">=</span> <span class=\"n\">suffix</span>\n <span class=\"n\">patt</span> <span class=\"o\">=</span> <span class=\"n\">re</span><span class=\"o\">.</span><span class=\"n\">compile</span><span class=\"p\">(</span><span class=\"s2\">r&quot;_dr[zc]&quot;</span><span class=\"p\">)</span>\n <span class=\"n\">m</span> <span class=\"o\">=</span> <span class=\"n\">patt</span><span class=\"o\">.</span><span class=\"n\">search</span><span class=\"p\">(</span><span class=\"n\">filename</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">m</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span> <span class=\"o\">=</span> <span class=\"n\">filename</span><span class=\"p\">[:</span><span class=\"n\">m</span><span class=\"o\">.</span><span class=\"n\">start</span><span class=\"p\">()]</span>\n <span class=\"n\">drz_extn</span> <span class=\"o\">=</span> <span class=\"n\">m</span><span class=\"o\">.</span><span class=\"n\">group</span><span class=\"p\">()</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"c1\"># Guard against having .fits in the rootname</span>\n <span class=\"k\">if</span> <span class=\"s1\">&#39;.fits&#39;</span> <span class=\"ow\">in</span> <span class=\"n\">filename</span><span class=\"p\">:</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span> <span class=\"o\">=</span> <span class=\"n\">filename</span><span class=\"p\">[:</span><span class=\"n\">filename</span><span class=\"o\">.</span><span class=\"n\">find</span><span class=\"p\">(</span><span class=\"s1\">&#39;.fits&#39;</span><span class=\"p\">)]</span>\n <span class=\"n\">drz_extn</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span> <span class=\"o\">=</span> <span class=\"n\">filename</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">outputNames</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setOutputNames</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rootname</span><span class=\"p\">,</span><span class=\"n\">suffix</span><span class=\"o\">=</span><span class=\"n\">drz_extn</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">nimages</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_bunit</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;ELECTRONS/S&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">default_wcs</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">final_wcs</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">single_wcs</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n<div class=\"viewcode-block\" id=\"WCSObject.restore_wcs\"><a class=\"viewcode-back\" href=\"../../baseobjects.html#drizzlepac.imageObject.WCSObject.restore_wcs\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">restore_wcs</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">wcs</span> <span class=\"o\">=</span> <span class=\"n\">copy</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">default_wcs</span><span class=\"p\">)</span></div></div>\n</pre></div>\n\n </div>\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"../../genindex.html\" title=\"General Index\"\n >index</a></li>\n <li class=\"right\" >\n <a href=\"../../py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"../../index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"../index.html\" >Module code</a> &#187;</li> \n </ul>\n </div>\n <div class=\"footer\" role=\"contentinfo\">\n &#169; Copyright 2017, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom, Mihai Cara.\n Created using <a href=\"http://sphinx-doc.org/\">Sphinx</a> 1.5.1.\n </div>\n </body>\n</html>" }, { "alpha_fraction": 0.6041267514228821, "alphanum_fraction": 0.6081960797309875, "avg_line_length": 36.03546142578125, "blob_id": "8415c133fc1f1081089f2edd5caa92713467915b", "content_id": "7aa2057a160ba449855bcbd76d25467e665234cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20888, "license_type": "no_license", "max_line_length": 132, "num_lines": 564, "path": "/lib/drizzlepac/createMedian.py", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "\"\"\"\nCreate a median image from the singly drizzled images.\n\n:Authors: Warren Hack\n\n:License: `<http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE>`_\n\n\"\"\"\n\n# Import external packages\nfrom __future__ import absolute_import, division, print_function # confidence medium\n\nimport sys\nimport numpy as np\nfrom astropy.io import fits\nimport os, math\n\nfrom stsci.imagestats import ImageStats\nfrom stsci.image import numcombine\nfrom stsci.tools import iterfile, nimageiter, teal, logutil\n\nfrom . import imageObject\nfrom . import util\nfrom .minmed import minmed\nfrom . import processInput\nfrom .adrizzle import _single_step_num_\n\n\nfrom .version import *\n\n__taskname__= \"drizzlepac.createMedian\" #looks in drizzlepac for createMedian.cfg\n_step_num_ = 4 #this relates directly to the syntax in the cfg file\n\nlog = logutil.create_logger(__name__)\n\n\n#this is the user access function\ndef median(input=None, configObj=None, editpars=False, **inputDict):\n \"\"\"\n Create a median image from the seperately drizzled images.\n \"\"\"\n\n if input is not None:\n inputDict[\"input\"] = input\n else:\n raise ValueError(\"Please supply an input image\")\n\n configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict,\n loadOnly=(not editpars))\n if configObj is None:\n return\n\n if not editpars:\n run(configObj)\n\n\n#this is the function that will be called from TEAL\ndef run(configObj):\n\n imgObjList,outwcs = processInput.setCommonInput(configObj,createOutwcs=False) #outwcs is not needed here\n\n createMedian(imgObjList,configObj)\n\n#\n#### Top-level interface from inside MultiDrizzle\n#\ndef createMedian(imgObjList,configObj,procSteps=None):\n \"\"\" Top-level interface to createMedian step called from top-level MultiDrizzle.\n\n This function parses the input parameters then calls the `_median()` function\n to median-combine the input images into a single image.\n\n \"\"\"\n if imgObjList is None:\n msg = \"Please provide a list of imageObjects to the median step\"\n print(msg, file=sys.stderr)\n raise ValueError(msg)\n\n if procSteps is not None:\n procSteps.addStep('Create Median')\n\n step_name = util.getSectionName(configObj,_step_num_)\n if not configObj[step_name]['median']:\n log.info('Median combination step not performed.')\n return\n\n paramDict=configObj[step_name]\n paramDict['proc_unit'] = configObj['proc_unit']\n\n # include whether or not compression was performed\n driz_sep_name = util.getSectionName(configObj,_single_step_num_)\n driz_sep_paramDict = configObj[driz_sep_name]\n paramDict['compress'] = driz_sep_paramDict['driz_sep_compress']\n\n log.info('USER INPUT PARAMETERS for Create Median Step:')\n util.printParams(paramDict, log=log)\n\n _median(imgObjList, paramDict)\n\n if procSteps is not None:\n procSteps.endStep('Create Median')\n\n\n# this is the internal function, the user called function is below\ndef _median(imageObjectList, paramDict):\n \"\"\"Create a median image from the list of image Objects\n that has been given.\n \"\"\"\n\n newmasks = paramDict['median_newmasks']\n comb_type = paramDict['combine_type']\n nlow = paramDict['combine_nlow']\n nhigh = paramDict['combine_nhigh']\n grow = paramDict['combine_grow']\n maskpt = paramDict['combine_maskpt']\n proc_units = paramDict['proc_unit']\n compress = paramDict['compress']\n bufsizeMb = paramDict['combine_bufsize']\n\n sigma=paramDict[\"combine_nsigma\"]\n sigmaSplit=sigma.split()\n nsigma1 = float(sigmaSplit[0])\n nsigma2 = float(sigmaSplit[1])\n\n #print \"Checking parameters:\"\n #print comb_type,nlow,nhigh,grow,maskpt,nsigma1,nsigma2\n if paramDict['combine_lthresh'] is None:\n lthresh = None\n else:\n lthresh = float(paramDict['combine_lthresh'])\n\n if paramDict['combine_hthresh'] is None:\n hthresh = None\n else:\n hthresh = float(paramDict['combine_hthresh'])\n\n #the name of the output median file isdefined in the output wcs object\n #and stuck in the image.outputValues[\"outMedian\"] dict of every imageObject\n medianfile=imageObjectList[0].outputNames[\"outMedian\"]\n\n\n \"\"\" Builds combined array from single drizzled images.\"\"\"\n # Start by removing any previous products...\n if(os.access(medianfile,os.F_OK)):\n os.remove(medianfile)\n\n\n # Define lists for instrument specific parameters, these should be in the image objects\n # need to be passed to the minmed routine\n readnoiseList = []\n exposureTimeList = []\n backgroundValueList = [] #list of MDRIZSKY *platescale values\n singleDrizList=[] #these are the input images\n singleWeightList=[] #pointers to the data arrays\n #skylist=[] #the list of platescale values for the images\n _wht_mean = [] # Compute the mean value of each wht image\n\n _single_hdr = None\n virtual = None\n\n #for each image object\n for image in imageObjectList:\n if virtual is None:\n virtual = image.inmemory\n\n det_gain = image.getGain(1)\n img_exptime = image._image['sci',1]._exptime\n native_units = image.native_units\n if lthresh is not None:\n if proc_units.lower() == 'native':\n if native_units.lower() == \"counts\":\n lthresh = lthresh * det_gain\n if native_units.lower() == \"counts/s\":\n lthresh = lthresh * img_exptime\n if hthresh is not None:\n if proc_units.lower() == 'native':\n if native_units.lower() == \"counts\":\n hthresh = hthresh * det_gain\n if native_units.lower() == \"counts/s\":\n hthresh = hthresh * img_exptime\n\n singleDriz = image.getOutputName(\"outSingle\")\n singleDriz_name = image.outputNames['outSingle']\n singleWeight = image.getOutputName(\"outSWeight\")\n singleWeight_name = image.outputNames['outSWeight']\n #singleDriz=image.outputNames[\"outSingle\"] #all chips are drizzled to a single output image\n #singleWeight=image.outputNames[\"outSWeight\"]\n\n # If compression was used, reference ext=1 as CompImageHDU only writes\n # out MEF files, not simple FITS.\n if compress:\n wcs_ext = '[1]'\n wcs_extnum = 1\n else:\n wcs_ext = '[0]'\n wcs_extnum = 0\n if not virtual:\n if isinstance(singleDriz,str):\n iter_singleDriz = singleDriz + wcs_ext\n iter_singleWeight = singleWeight + wcs_ext\n else:\n iter_singleDriz = singleDriz[wcs_extnum]\n iter_singleWeight = singleWeight[wcs_extnum]\n else:\n iter_singleDriz = singleDriz_name + wcs_ext\n iter_singleWeight = singleWeight_name + wcs_ext\n\n # read in WCS from first single drizzle image to use as WCS for median image\n if _single_hdr is None:\n if virtual:\n _single_hdr = singleDriz[wcs_extnum].header\n else:\n _single_hdr = fits.getheader(singleDriz_name, ext=wcs_extnum, memmap=False)\n\n _singleImage=iterfile.IterFitsFile(iter_singleDriz)\n if virtual:\n _singleImage.handle = singleDriz\n _singleImage.inmemory = True\n\n singleDrizList.append(_singleImage) #add to an array for bookkeeping\n\n # If it exists, extract the corresponding weight images\n if (not virtual and os.access(singleWeight,os.F_OK)) or (\n virtual and singleWeight):\n _weight_file=iterfile.IterFitsFile(iter_singleWeight)\n if virtual:\n _weight_file.handle = singleWeight\n _weight_file.inmemory = True\n\n singleWeightList.append(_weight_file)\n try:\n tmp_mean_value = ImageStats(_weight_file.data, lower=1e-8,\n fields=\"mean\", nclip=0).mean\n except ValueError:\n tmp_mean_value = 0.0\n _wht_mean.append(tmp_mean_value * maskpt)\n\n # Extract instrument specific parameters and place in lists\n\n # If an image has zero exposure time we will\n # redefine that value as '1'. Although this will cause inaccurate scaling\n # of the data to occur in the 'minmed' combination algorith, this is a\n # necessary evil since it avoids divide by zero exceptions. It is more\n # important that the divide by zero exceptions not cause Multidrizzle to\n # crash in the pipeline than it is to raise an exception for this obviously\n # bad data even though this is not the type of data you would wish to process\n # with Multidrizzle.\n #\n # Get the exposure time from the InputImage object\n #\n # MRD 19-May-2011\n # Changed exposureTimeList to take exposure time from img_exptime\n # variable instead of hte image._exptime attribute, since\n # image._exptime was just giving 1.\n #\n exposureTimeList.append(img_exptime)\n\n # Use only \"commanded\" chips to extract subtractedSky and rdnoise:\n rdnoise = 0.0\n nchips = 0\n bsky = None # minimum sky across **used** chips\n\n for chip in image.returnAllChips(extname=image.scienceExt):\n # compute sky value as sky/pixel using the single_drz pixel scale\n if bsky is None or bsky > chip.subtractedSky:\n bsky = chip.subtractedSky * chip._conversionFactor\n\n # Extract the readnoise value for the chip\n rdnoise += (chip._rdnoise)**2\n nchips += 1\n\n if bsky is None:\n bsky = 0.0\n\n if nchips > 0:\n rdnoise = math.sqrt(rdnoise/nchips)\n\n backgroundValueList.append(bsky)\n readnoiseList.append(rdnoise)\n\n ## compute sky value as sky/pixel using the single_drz pixel scale\n #bsky = image._image[image.scienceExt,1].subtractedSky# * (image.outputValues['scale']**2)\n #backgroundValueList.append(bsky)\n\n ## Extract the readnoise value for the chip\n #sci_chip = image._image[image.scienceExt,1]\n #readnoiseList.append(sci_chip._rdnoise) #verify this is calculated correctly in the image object\n\n print(\"reference sky value for image \",image._filename,\" is \", backgroundValueList[-1])\n #\n # END Loop over input image list\n #\n\n # create an array for the median output image, use the size of the first image in the list\n medianImageArray = np.zeros(singleDrizList[0].shape,dtype=singleDrizList[0].type())\n\n if ( comb_type.lower() == \"minmed\") and not newmasks:\n # Issue a warning if minmed is being run with newmasks turned off.\n print('\\nWARNING: Creating median image without the application of bad pixel masks!\\n')\n\n # create the master list to be used by the image iterator\n masterList = []\n masterList.extend(singleDrizList)\n masterList.extend(singleWeightList)\n\n print('\\n')\n\n # Specify the location of the drz image sections\n startDrz = 0\n endDrz = len(singleDrizList)+startDrz\n\n # Specify the location of the wht image sections\n startWht = len(singleDrizList)+startDrz\n endWht = startWht + len(singleWeightList)\n _weight_mask_list = None\n\n # Fire up the image iterator\n #\n # The overlap value needs to be set to 2*grow in order to\n # avoid edge effects when scrolling down the image, and to\n # insure that the last section returned from the iterator\n # has enough row to span the kernel used in the boxcar method\n # within minmed.\n _overlap = 2*int(grow)\n\n #Start by computing the buffer size for the iterator\n _imgarr = masterList[0].data\n _bufsize = nimageiter.BUFSIZE\n if bufsizeMb is not None:\n _bufsize *= bufsizeMb\n _imgrows = _imgarr.shape[0]\n _nrows = nimageiter.computeBuffRows(_imgarr)\n# _overlaprows = _nrows - (_overlap+1)\n# _niter = int(_imgrows/_nrows)\n# _niter = 1 + int( (_imgrows - _overlaprows)/_nrows)\n niter = nimageiter.computeNumberBuff(_imgrows,_nrows,_overlap)\n #computeNumberBuff actually returns (niter,buffrows)\n _niter= niter[0]\n _nrows = niter[1]\n _lastrows = _imgrows - (_niter*(_nrows-_overlap))\n\n # check to see if this buffer size will leave enough rows for\n # the section returned on the last iteration\n if _lastrows < _overlap+1:\n _delta_rows = (_overlap+1 - _lastrows)//_niter\n if _delta_rows < 1 and _delta_rows >= 0: _delta_rows = 1\n _bufsize += (_imgarr.shape[1]*_imgarr.itemsize) * _delta_rows\n\n if not virtual:\n masterList[0].close()\n del _imgarr\n\n for imageSectionsList,prange in nimageiter.FileIter(masterList,overlap=_overlap,bufsize=_bufsize):\n\n if newmasks:\n \"\"\" Build new masks from single drizzled images. \"\"\"\n _weight_mask_list = []\n listIndex = 0\n for _weight_arr in imageSectionsList[startWht:endWht]:\n # Initialize an output mask array to ones\n # This array will be reused for every output weight image\n _weight_mask = np.zeros(_weight_arr.shape,dtype=np.uint8)\n\n \"\"\" Generate new pixel mask file for median step.\n This mask will be created from the single-drizzled\n weight image for this image.\n\n The mean of the weight array will be computed and all\n pixels with values less than 0.7 of the mean will be flagged\n as bad in this mask. This mask will then be used when\n creating the median image.\n \"\"\"\n # Compute image statistics\n _mean = _wht_mean[listIndex]\n\n # 0 means good, 1 means bad here...\n np.putmask(_weight_mask, np.less(_weight_arr,_mean), 1)\n #_weight_mask.info()\n _weight_mask_list.append(_weight_mask)\n listIndex += 1\n\n # Do MINMED\n if ( \"minmed\" in comb_type.lower()):\n if comb_type.lower()[0] == 'i':\n # set up use of 'imedian'/'imean' in minmed algorithm\n fillval = True\n else:\n fillval = False\n\n if (_weight_mask_list in [None,[]]):\n _weight_mask_list = None\n\n # Create the combined array object using the minmed algorithm\n result = minmed(imageSectionsList[startDrz:endDrz], # list of input data to be combined.\n imageSectionsList[startWht:endWht],# list of input data weight images to be combined.\n readnoiseList, # list of readnoise values to use for the input images.\n exposureTimeList, # list of exposure times to use for the input images.\n backgroundValueList, # list of image background values to use for the input images\n weightMaskList = _weight_mask_list, # list of imput data weight masks to use for pixel rejection.\n combine_grow = grow, # Radius (pixels) for neighbor rejection\n combine_nsigma1 = nsigma1, # Significance for accepting minimum instead of median\n combine_nsigma2 = nsigma2, # Significance for accepting minimum instead of median\n fillval=fillval # turn on use of imedian/imean\n )\n# medianOutput[prange[0]:prange[1],:] = result.out_file1\n# minOutput[prange[0]:prange[1],:] = result.out_file2\n\n # DO NUMCOMBINE\n else:\n # Create the combined array object using the numcombine task\n result = numcombine.numCombine(imageSectionsList[startDrz:endDrz],\n numarrayMaskList=_weight_mask_list,\n combinationType=comb_type.lower(),\n nlow=nlow,\n nhigh=nhigh,\n upper=hthresh,\n lower=lthresh\n )\n\n # We need to account for any specified overlap when writing out\n # the processed image sections to the final output array.\n if prange[1] != _imgrows:\n medianImageArray[prange[0]:prange[1]-_overlap,:] = result.combArrObj[:-_overlap,:]\n else:\n medianImageArray[prange[0]:prange[1],:] = result.combArrObj\n\n\n del result\n del _weight_mask_list\n _weight_mask_list = None\n\n # Write out the combined image\n # use the header from the first single drizzled image in the list\n #header=fits.getheader(imageObjectList[0].outputNames[\"outSingle\"])\n _pf = _writeImage(medianImageArray, inputHeader=_single_hdr)\n\n if virtual:\n mediandict = {}\n mediandict[medianfile] = _pf\n for img in imageObjectList:\n img.saveVirtualOutputs(mediandict)\n else:\n try:\n print(\"Saving output median image to: \",medianfile)\n _pf.writeto(medianfile)\n except IOError:\n msg = \"Problem writing file: \"+medianfile\n print(msg)\n raise IOError(msg)\n\n del _pf\n\n # Always close any files opened to produce median image; namely,\n # single drizzle images and singly-drizzled weight images\n #\n\n for img in singleDrizList:\n if not virtual:\n img.close()\n singeDrizList = []\n\n # Close all singly drizzled weight images used to create median image.\n for img in singleWeightList:\n if not virtual:\n img.close()\n singleWeightList = []\n\n # If new median masks was turned on, close those files\n if _weight_mask_list:\n for arr in _weight_mask_list:\n del arr\n _weight_mask_list = None\n\n del masterList\n del medianImageArray\n\ndef _writeImage( dataArray=None, inputHeader=None):\n \"\"\" Writes out the result of the combination step.\n The header of the first 'outsingle' file in the\n association parlist is used as the header of the\n new image.\n\n Parameters\n ----------\n dataArray : arr\n Array of data to be written to a fits.PrimaryHDU object\n\n inputHeader : obj\n fits.header.Header object to use as basis for the PrimaryHDU header\n\n \"\"\"\n\n #_fname =inputFilename\n #_file = fits.open(_fname, mode='readonly')\n #_prihdu = fits.PrimaryHDU(header=_file[0].header,data=dataArray)\n\n _prihdu = fits.PrimaryHDU(data=dataArray, header=inputHeader)\n \"\"\"\n if inputHeader is None:\n #use a general primary HDU\n _prihdu = fits.PrimaryHDU(data=dataArray)\n\n else:\n _prihdu = inputHeader\n _prihdu.data=dataArray\n \"\"\"\n _pf = fits.HDUList()\n _pf.append(_prihdu)\n\n return _pf\n\n\ndef help(file=None):\n \"\"\"\n Print out syntax help for running astrodrizzle\n\n Parameters\n ----------\n file : str (Default = None)\n If given, write out help to the filename specified by this parameter\n Any previously existing file with this name will be deleted before\n writing out the help.\n\n \"\"\"\n helpstr = getHelpAsString(docstring=True, show_ver = True)\n if file is None:\n print(helpstr)\n else:\n if os.path.exists(file): os.remove(file)\n f = open(file, mode = 'w')\n f.write(helpstr)\n f.close()\n\n\ndef getHelpAsString(docstring = False, show_ver = True):\n \"\"\"\n return useful help from a file in the script directory called\n __taskname__.help\n\n \"\"\"\n install_dir = os.path.dirname(__file__)\n taskname = util.base_taskname(__taskname__, __package__)\n htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')\n helpfile = os.path.join(install_dir, taskname + '.help')\n\n if docstring or (not docstring and not os.path.exists(htmlfile)):\n if show_ver:\n helpString = os.linesep + \\\n ' '.join([__taskname__, 'Version', __version__,\n ' updated on ', __vdate__]) + 2*os.linesep\n else:\n helpString = ''\n if os.path.exists(helpfile):\n helpString += teal.getHelpFileAsString(taskname, __file__)\n else:\n if __doc__ is not None:\n helpString += __doc__ + os.linesep\n else:\n helpString = 'file://' + htmlfile\n\n return helpString\n\n\nmedian.__doc__ = getHelpAsString(docstring = True, show_ver = False)\n" }, { "alpha_fraction": 0.5667293071746826, "alphanum_fraction": 0.5708020329475403, "avg_line_length": 39.40506362915039, "blob_id": "a9f39f5b54c43d2fd516f4405147b6b9dd9c6e22", "content_id": "dbed0c3af957f62fe3a7a88a1d2b4f027a6fe927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3192, "license_type": "no_license", "max_line_length": 78, "num_lines": 79, "path": "/lib/astrodither_setup.py", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "import glob\nimport os\nimport sys\n\n\n# BUILD should be 'debug', 'profile' or 'release'\n# TODO: How often is this actually mucked with? Would it be worth adding a\n# custom command that adds a command-line option for this?\nBUILD = 'release'\n\n\ndef setup_hook(config):\n # First get the pywcs includes path\n # This is the case for building as part of stsci_python\n pywcs_dir = os.path.abspath(os.path.join(os.path.pardir, 'pywcs'))\n if os.path.exists(pywcs_dir):\n pywcsincludes = [os.path.join(pywcs_dir, 'src')]\n wcslibs = glob.glob(os.path.join(pywcs_dir, 'wcslib*'))\n if len(wcslibs) == 1:\n pywcsincludes.append(os.path.join(pywcs_dir, wcslibs[0], 'C'))\n else:\n raise SystemExit('No suitable version of wcslib found in the '\n 'pywcs distribution at %s' % pywcs_dir)\n else:\n # If pywcs is otherwise already installed...\n # TODO: Maybe we can eventually make pywcs a setup requirement for\n # drizzlepac, so long as pywcs itself installs easily enough...\n try:\n from astropy import wcs as pywcs\n # TODO: It would be nice if pywcs had a get_includes() function a\n # la numpy\n pywcslib = pywcs.__path__[0]\n pywcsincludes = [os.path.join(pywcslib, 'include'),\n os.path.join(pywcslib, 'include', 'wcslib')]\n # TODO: Institute version check?\n except ImportError:\n raise ImportError('PyWCS was not found. It may not be installed '\n 'or it may not be on your PYTHONPATH.\\n'\n 'drizzlepac requires pywcs 1.4 or later.')\n\n # Add/remove macros and compile args based on the build type\n define_macros = []\n undef_macros = []\n extra_compile_args = []\n\n if BUILD.lower() == 'debug':\n define_macros.append(('DEBUG', None))\n undef_macros.append('NDEBUG')\n if not (sys.platform.startswith('sun') or sys.platform == 'win32'):\n extra_compile_args.extend(['-fno-inline', '-O0', '-g'])\n elif BUILD.lower() == 'profile':\n define_macros.append(('NDEBUG', None))\n undef_macros.append('DEBUG')\n if not (sys.platform.startswith('sun') or sys.platform == 'win32'):\n extra_compile_args.extend(['-O3', '-g'])\n elif BUILD.lower() == 'release':\n define_macros.append(('NDEBUG', None))\n undef_macros.append('DEBUG')\n else:\n raise ValueError(\"BUILD should be one of 'debug', 'profile', or \"\n \"'release'; got %s\" % BUILD)\n\n for idx, m in enumerate(define_macros):\n if m[1] is not None:\n define_macros[idx] = '%s = %s' % m\n else:\n define_macros[idx] = m[0]\n\n ext_opts = [('include_dirs', pywcsincludes),\n ('define_macros', define_macros),\n ('undef_macros', undef_macros),\n ('extra_compile_args', extra_compile_args)]\n\n ext = config['extension=drizzlepac.cdriz']\n for opt, value in ext_opts:\n if opt in ext:\n ext[opt] += '\\n' + '\\n'.join(value)\n else:\n ext[opt] = '\\n'.join(value)\n" }, { "alpha_fraction": 0.6033356189727783, "alphanum_fraction": 0.6130896806716919, "avg_line_length": 123.41011047363281, "blob_id": "f10534511ec1902051473ef54b382cc269609627", "content_id": "89044d66aa9b405c94880d841a796faae67165f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 66434, "license_type": "no_license", "max_line_length": 531, "num_lines": 534, "path": "/lib/drizzlepac/htmlhelp/_modules/drizzlepac/stisData.html", "repo_name": "stevenrjanssens/drizzlepac", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n \n <title>drizzlepac.stisData &#8212; DrizzlePac 2.1.16 (05-June-2017) documentation</title>\n \n <link rel=\"stylesheet\" href=\"../../_static/stsci_sphinx.css\" type=\"text/css\" />\n <link rel=\"stylesheet\" href=\"../../_static/pygments.css\" type=\"text/css\" />\n \n <script type=\"text/javascript\">\n var DOCUMENTATION_OPTIONS = {\n URL_ROOT: '../../',\n VERSION: '2.1.16 (05-June-2017)',\n COLLAPSE_INDEX: false,\n FILE_SUFFIX: '.html',\n HAS_SOURCE: true,\n SOURCELINK_SUFFIX: '.txt'\n };\n </script>\n <script type=\"text/javascript\" src=\"../../_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"../../_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"../../_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"../../genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"../../search.html\" /> \n </head>\n <body role=\"document\">\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"../../genindex.html\" title=\"General Index\"\n accesskey=\"I\">index</a></li>\n <li class=\"right\" >\n <a href=\"../../py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"../../index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"../index.html\" accesskey=\"U\">Module code</a> &#187;</li> \n </ul>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n <p class=\"logo\"><a href=\"../../index.html\">\n <img class=\"logo\" src=\"../../_static/stsci_logo.png\" alt=\"Logo\"/>\n </a></p>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3>Quick search</h3>\n <form class=\"search\" action=\"../../search.html\" method=\"get\">\n <div><input type=\"text\" name=\"q\" /></div>\n <div><input type=\"submit\" value=\"Go\" /></div>\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n </form>\n</div>\n<script type=\"text/javascript\">$('#searchbox').show(0);</script>\n </div>\n </div>\n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n <div class=\"body\" role=\"main\">\n \n <h1>Source code for drizzlepac.stisData</h1><div class=\"highlight\"><pre>\n<span></span><span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\">`stisData` module provides classes used to import STIS specific instrument data.</span>\n\n<span class=\"sd\">:Authors: Megan Sosey, Christopher Hanley</span>\n\n<span class=\"sd\">:License: `&lt;http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE&gt;`_</span>\n\n<span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"kn\">from</span> <span class=\"nn\">__future__</span> <span class=\"k\">import</span> <span class=\"n\">absolute_import</span><span class=\"p\">,</span> <span class=\"n\">division</span><span class=\"p\">,</span> <span class=\"n\">print_function</span> <span class=\"c1\"># confidence medium</span>\n\n<span class=\"kn\">from</span> <span class=\"nn\">stsci.tools</span> <span class=\"k\">import</span> <span class=\"n\">fileutil</span>\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"kn\">from</span> <span class=\"nn\">stsci.imagemanip</span> <span class=\"k\">import</span> <span class=\"n\">interp2d</span>\n<span class=\"kn\">from</span> <span class=\"nn\">.imageObject</span> <span class=\"k\">import</span> <span class=\"n\">imageObject</span>\n\n\n<div class=\"viewcode-block\" id=\"STISInputImage\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.STISInputImage\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">STISInputImage</span> <span class=\"p\">(</span><span class=\"n\">imageObject</span><span class=\"p\">):</span>\n\n <span class=\"n\">SEPARATOR</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;_&#39;</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">group</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"n\">imageObject</span><span class=\"o\">.</span><span class=\"n\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">,</span><span class=\"n\">group</span><span class=\"o\">=</span><span class=\"n\">group</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># define the cosmic ray bits value to use in the dq array</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">cr_bits_value</span> <span class=\"o\">=</span> <span class=\"mi\">8192</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_effGain</span> <span class=\"o\">=</span> <span class=\"mf\">1.</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_instrument</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;INSTRUME&quot;</span><span class=\"p\">]</span> <span class=\"c1\">#this just shows instrument, not detector, detector asigned by subclass</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">native_units</span><span class=\"o\">=</span><span class=\"s1\">&#39;COUNTS&#39;</span>\n\n<div class=\"viewcode-block\" id=\"STISInputImage.getflat\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.STISInputImage.getflat\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getflat</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Method for retrieving a detector&#39;s flat field. For STIS there are three.</span>\n<span class=\"sd\"> This method will return an array the same shape as the image.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"n\">exten</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">errExt</span><span class=\"o\">+</span><span class=\"s1\">&#39;,&#39;</span><span class=\"o\">+</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># The keyword for STIS flat fields in the primary header of the flt</span>\n\n <span class=\"n\">lflatfile</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">osfn</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;LFLTFILE&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">pflatfile</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">osfn</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s1\">&#39;PFLTFILE&#39;</span><span class=\"p\">])</span>\n\n <span class=\"c1\"># Try to open the file in the location specified by LFLTFILE.</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">handle</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"n\">lflatfile</span><span class=\"p\">,</span> <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;readonly&#39;</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">hdu</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">getExtn</span><span class=\"p\">(</span><span class=\"n\">handle</span><span class=\"p\">,</span><span class=\"n\">extn</span><span class=\"o\">=</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n <span class=\"n\">lfltdata</span> <span class=\"o\">=</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">data</span>\n <span class=\"k\">if</span> <span class=\"n\">lfltdata</span><span class=\"o\">.</span><span class=\"n\">shape</span> <span class=\"o\">!=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">full_shape</span><span class=\"p\">:</span>\n <span class=\"n\">lfltdata</span> <span class=\"o\">=</span> <span class=\"n\">interp2d</span><span class=\"o\">.</span><span class=\"n\">expand2d</span><span class=\"p\">(</span><span class=\"n\">lfltdata</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">full_shape</span><span class=\"p\">)</span>\n <span class=\"k\">except</span> <span class=\"ne\">IOError</span><span class=\"p\">:</span>\n <span class=\"n\">lfltdata</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">full_shape</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">dtype</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s2\">&quot;Cannot find file &#39;</span><span class=\"si\">{:s}</span><span class=\"s2\">&#39;. Treating flatfield constant value &quot;</span>\n <span class=\"s2\">&quot;of &#39;1&#39;.</span><span class=\"se\">\\n</span><span class=\"s2\">&quot;</span><span class=\"o\">.</span><span class=\"n\">format</span><span class=\"p\">(</span><span class=\"n\">lflatfile</span><span class=\"p\">))</span>\n\n <span class=\"c1\"># Try to open the file in the location specified by PFLTFILE.</span>\n <span class=\"k\">try</span><span class=\"p\">:</span>\n <span class=\"n\">handle</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"n\">pflatfile</span><span class=\"p\">,</span> <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;readonly&#39;</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"n\">hdu</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">getExtn</span><span class=\"p\">(</span><span class=\"n\">handle</span><span class=\"p\">,</span><span class=\"n\">extn</span><span class=\"o\">=</span><span class=\"n\">exten</span><span class=\"p\">)</span>\n <span class=\"n\">pfltdata</span> <span class=\"o\">=</span> <span class=\"n\">hdu</span><span class=\"o\">.</span><span class=\"n\">data</span>\n <span class=\"k\">except</span> <span class=\"ne\">IOError</span><span class=\"p\">:</span>\n <span class=\"n\">pfltdata</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ones</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">full_shape</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">dtype</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s2\">&quot;Cannot find file &#39;</span><span class=\"si\">{:s}</span><span class=\"s2\">&#39;. Treating flatfield constant value &quot;</span>\n <span class=\"s2\">&quot;of &#39;1&#39;.</span><span class=\"se\">\\n</span><span class=\"s2\">&quot;</span><span class=\"o\">.</span><span class=\"n\">format</span><span class=\"p\">(</span><span class=\"n\">pflatfile</span><span class=\"p\">))</span>\n\n <span class=\"n\">flat</span> <span class=\"o\">=</span> <span class=\"n\">lfltdata</span> <span class=\"o\">*</span> <span class=\"n\">pfltdata</span>\n\n <span class=\"k\">return</span> <span class=\"n\">flat</span></div>\n\n<div class=\"viewcode-block\" id=\"STISInputImage.doUnitConversions\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.STISInputImage.doUnitConversions\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">doUnitConversions</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;Convert the data to electrons.</span>\n\n<span class=\"sd\"> This converts all science data extensions and saves</span>\n<span class=\"sd\"> the results back to disk. We need to make sure</span>\n<span class=\"sd\"> the data inside the chips already in memory is altered as well.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"c1\"># Image information</span>\n <span class=\"n\">_handle</span> <span class=\"o\">=</span> <span class=\"n\">fileutil</span><span class=\"o\">.</span><span class=\"n\">openImage</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_filename</span><span class=\"p\">,</span> <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"s1\">&#39;readonly&#39;</span><span class=\"p\">,</span> <span class=\"n\">memmap</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n\n <span class=\"k\">for</span> <span class=\"n\">det</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n\n <span class=\"n\">chip</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">det</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n\n <span class=\"n\">conversionFactor</span> <span class=\"o\">=</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_effGain</span> <span class=\"o\">=</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"c1\">#1.</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_conversionFactor</span> <span class=\"o\">=</span> <span class=\"n\">conversionFactor</span> <span class=\"c1\">#1.</span>\n\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">msg</span> <span class=\"o\">=</span> <span class=\"s2\">&quot;Invalid gain value for data, no conversion done&quot;</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"n\">msg</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span><span class=\"p\">(</span><span class=\"n\">msg</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># Close the files and clean-up</span>\n <span class=\"n\">_handle</span><span class=\"o\">.</span><span class=\"n\">close</span><span class=\"p\">()</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_effGain</span> <span class=\"o\">=</span> <span class=\"n\">conversionFactor</span> <span class=\"c1\"># 1.0</span></div>\n\n <span class=\"k\">def</span> <span class=\"nf\">_assignSignature</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">chip</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;Assign a unique signature for the image based</span>\n<span class=\"sd\"> on the instrument, detector, chip, and size</span>\n<span class=\"sd\"> this will be used to uniquely identify the appropriate</span>\n<span class=\"sd\"> static mask for the image.</span>\n\n<span class=\"sd\"> This also records the filename for the static mask to the outputNames dictionary.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">sci_chip</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span>\n <span class=\"n\">ny</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_naxis1</span>\n <span class=\"n\">nx</span><span class=\"o\">=</span><span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">_naxis2</span>\n <span class=\"n\">detnum</span> <span class=\"o\">=</span> <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">detnum</span>\n <span class=\"n\">instr</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_instrument</span>\n\n <span class=\"n\">sig</span><span class=\"o\">=</span><span class=\"p\">(</span><span class=\"n\">instr</span><span class=\"o\">+</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_detector</span><span class=\"p\">,(</span><span class=\"n\">nx</span><span class=\"p\">,</span><span class=\"n\">ny</span><span class=\"p\">),</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">detnum</span><span class=\"p\">))</span> <span class=\"c1\">#signature is a tuple</span>\n <span class=\"n\">sci_chip</span><span class=\"o\">.</span><span class=\"n\">signature</span><span class=\"o\">=</span><span class=\"n\">sig</span> <span class=\"c1\">#signature is a tuple</span></div>\n\n\n\n<div class=\"viewcode-block\" id=\"CCDInputImage\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.CCDInputImage\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">CCDInputImage</span><span class=\"p\">(</span><span class=\"n\">STISInputImage</span><span class=\"p\">):</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">group</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"n\">STISInputImage</span><span class=\"o\">.</span><span class=\"n\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">,</span><span class=\"n\">group</span><span class=\"o\">=</span><span class=\"n\">group</span><span class=\"p\">)</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">full_shape</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"mi\">1024</span><span class=\"p\">,</span><span class=\"mi\">1024</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_detector</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;DETECTOR&quot;</span><span class=\"p\">]</span>\n\n\n <span class=\"c1\">#if ( self.amp == &#39;D&#39; or self.amp == &#39;C&#39; ) : # cte direction depends on amp</span>\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">cte_dir</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">darkcurrent</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getdarkcurrent</span><span class=\"p\">()</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">cte_dir</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n <span class=\"c1\">#if ( self.amp == &#39;A&#39; or self.amp == &#39;B&#39; ) :</span>\n <span class=\"c1\"># self.cte_dir = -1</span>\n\n<div class=\"viewcode-block\" id=\"CCDInputImage.getdarkcurrent\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.CCDInputImage.getdarkcurrent\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getdarkcurrent</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Returns the dark current for the STIS CCD chip.</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> darkcurrent : float</span>\n<span class=\"sd\"> Dark current value in **units of electrons** (or counts, if proc_unit==&#39;native&#39;).</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">darkcurrent</span> <span class=\"o\">=</span> <span class=\"mf\">0.009</span> <span class=\"c1\">#electrons/sec</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">proc_unit</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;native&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"n\">darkcurrent</span> <span class=\"o\">/</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_gain</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"n\">darkcurrent</span></div>\n\n<div class=\"viewcode-block\" id=\"CCDInputImage.getReadNoise\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.CCDInputImage.getReadNoise\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getReadNoise</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Method for returning the readnoise of a detector (in DN).</span>\n\n<span class=\"sd\"> :units: DN</span>\n\n<span class=\"sd\"> This should work on a chip, since different chips to be consistant with other</span>\n<span class=\"sd\"> detector classes where different chips have different gains.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">proc_unit</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;native&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">/</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_gain</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span></div>\n\n<div class=\"viewcode-block\" id=\"CCDInputImage.setInstrumentParameters\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.CCDInputImage.setInstrumentParameters\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">setInstrumentParameters</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; This method overrides the superclass to set default values into</span>\n<span class=\"sd\"> the parameter dictionary, in case empty entries are provided.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n <span class=\"n\">pri_header</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span>\n\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gain&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;ATODGAIN&#39;</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rdnoise&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;READNSE&#39;</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;exptime&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;EXPTIME&#39;</span>\n\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">returnAllChips</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">):</span>\n\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gain&#39;</span><span class=\"p\">],</span> <span class=\"n\">pri_header</span><span class=\"p\">,</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rdnoise&#39;</span><span class=\"p\">],</span> <span class=\"n\">pri_header</span><span class=\"p\">,</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;exptime&#39;</span><span class=\"p\">],</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">,</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">])</span>\n\n <span class=\"k\">if</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"ow\">or</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"ow\">or</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;ERROR: invalid instrument task parameter&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span>\n\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_effGain</span> <span class=\"o\">=</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_assignSignature</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_chip</span><span class=\"p\">)</span> <span class=\"c1\">#this is used in the static mask</span>\n\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">doUnitConversions</span><span class=\"p\">()</span></div></div>\n\n\n<div class=\"viewcode-block\" id=\"NUVInputImage\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.NUVInputImage\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">NUVInputImage</span><span class=\"p\">(</span><span class=\"n\">STISInputImage</span><span class=\"p\">):</span>\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">filename</span><span class=\"p\">,</span> <span class=\"n\">group</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span> <span class=\"o\">=</span> <span class=\"mf\">1.0</span>\n\n <span class=\"n\">STISInputImage</span><span class=\"o\">.</span><span class=\"n\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">,</span> <span class=\"n\">group</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">)</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_detector</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;DETECTOR&quot;</span><span class=\"p\">]</span>\n\n <span class=\"c1\"># no cte correction for STIS/NUV-MAMA so set cte_dir=0.</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;WARNING: No cte correction will be made for this STIS/NUV-MAMA data.&#39;</span><span class=\"p\">)</span>\n\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">cte_dir</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">darkcurrent</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getdarkcurrent</span><span class=\"p\">()</span>\n\n<div class=\"viewcode-block\" id=\"NUVInputImage.setInstrumentParameters\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.NUVInputImage.setInstrumentParameters\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">setInstrumentParameters</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; This method overrides the superclass to set default values into</span>\n<span class=\"sd\"> the parameter dictionary, in case empty entries are provided.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">pri_header</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span>\n\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gain&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rdnoise&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;exptime&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;EXPTIME&#39;</span>\n\n <span class=\"c1\"># We need to determine if the user has used the default readnoise/gain value</span>\n <span class=\"c1\"># since if not, they will need to supply a gain/readnoise value as well</span>\n <span class=\"n\">usingDefaultGain</span> <span class=\"o\">=</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span>\n <span class=\"n\">usingDefaultReadnoise</span> <span class=\"o\">=</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span>\n\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">returnAllChips</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">):</span>\n <span class=\"c1\">#pri_header=chip.header</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">cte_dir</span><span class=\"o\">=</span><span class=\"mi\">0</span>\n <span class=\"c1\"># We need to treat Read Noise and Gain as a special case since it is</span>\n <span class=\"c1\"># not populated in the STIS primary header for the MAMAs</span>\n <span class=\"k\">if</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rdnoise&#39;</span><span class=\"p\">],</span> <span class=\"n\">pri_header</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span>\n <span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"k\">if</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gain&#39;</span><span class=\"p\">],</span> <span class=\"n\">pri_header</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span>\n <span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n\n <span class=\"c1\"># Set the default readnoise or gain values based upon the amount of user input given.</span>\n\n <span class=\"k\">if</span> <span class=\"n\">usingDefaultReadnoise</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span><span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setMAMADefaultReadnoise</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">usingDefaultGain</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setMAMADefaultGain</span><span class=\"p\">()</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_assignSignature</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_chip</span><span class=\"p\">)</span> <span class=\"c1\">#this is used in the static mask</span>\n\n\n\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;exptime&#39;</span><span class=\"p\">],</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">,</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">])</span>\n\n <span class=\"k\">if</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;ERROR: invalid instrument task parameter&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span>\n <span class=\"c1\"># Convert the science data to electrons if specified by the user.</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">doUnitConversions</span><span class=\"p\">()</span></div>\n\n\n <span class=\"k\">def</span> <span class=\"nf\">_setMAMAchippars</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setMAMADefaultGain</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setMAMADefaultReadnoise</span><span class=\"p\">()</span>\n\n\n <span class=\"k\">def</span> <span class=\"nf\">_setMAMADefaultGain</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_gain</span>\n\n\n <span class=\"k\">def</span> <span class=\"nf\">_setMAMADefaultReadnoise</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span>\n\n\n<div class=\"viewcode-block\" id=\"NUVInputImage.getdarkcurrent\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.NUVInputImage.getdarkcurrent\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getdarkcurrent</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Returns the dark current for the STIS NUV detector.</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> darkcurrent : float</span>\n<span class=\"sd\"> Dark current value in **units of electrons** (or counts, if proc_unit==&#39;native&#39;).</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">darkcurrent</span> <span class=\"o\">=</span> <span class=\"mf\">0.0013</span> <span class=\"c1\">#electrons/sec</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">proc_unit</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;native&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"n\">darkcurrent</span> <span class=\"o\">/</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_gain</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"n\">darkcurrent</span></div>\n\n<div class=\"viewcode-block\" id=\"NUVInputImage.doUnitConversions\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.NUVInputImage.doUnitConversions\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">doUnitConversions</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;Convert the data to electrons.</span>\n\n<span class=\"sd\"> This converts all science data extensions and saves</span>\n<span class=\"sd\"> the results back to disk. We need to make sure</span>\n<span class=\"sd\"> the data inside the chips already in memory is altered as well.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"k\">for</span> <span class=\"n\">det</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n\n <span class=\"n\">chip</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">det</span><span class=\"p\">]</span>\n\n <span class=\"n\">conversionFactor</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span> <span class=\"c1\">#1.</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">effGain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_conversionFactor</span> <span class=\"o\">=</span> <span class=\"n\">conversionFactor</span> <span class=\"c1\">#1.</span></div></div>\n\n<div class=\"viewcode-block\" id=\"FUVInputImage\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.FUVInputImage\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">FUVInputImage</span><span class=\"p\">(</span><span class=\"n\">STISInputImage</span><span class=\"p\">):</span>\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">,</span><span class=\"n\">group</span><span class=\"o\">=</span><span class=\"kc\">None</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span><span class=\"o\">=</span><span class=\"mf\">1.0</span>\n\n <span class=\"n\">STISInputImage</span><span class=\"o\">.</span><span class=\"n\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span><span class=\"n\">filename</span><span class=\"p\">,</span><span class=\"n\">group</span><span class=\"o\">=</span><span class=\"n\">group</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_detector</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"s2\">&quot;PRIMARY&quot;</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">[</span><span class=\"s2\">&quot;DETECTOR&quot;</span><span class=\"p\">]</span>\n\n <span class=\"c1\"># no cte correction for STIS/FUV-MAMA so set cte_dir=0.</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;WARNING: No cte correction will be made for this STIS/FUV-MAMA data.&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">cte_dir</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">chip</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">darkcurrent</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getdarkcurrent</span><span class=\"p\">()</span>\n\n<div class=\"viewcode-block\" id=\"FUVInputImage.setInstrumentParameters\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.FUVInputImage.setInstrumentParameters\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">setInstrumentParameters</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot; This method overrides the superclass to set default values into</span>\n<span class=\"sd\"> the parameter dictionary, in case empty entries are provided.</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">pri_header</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">header</span>\n <span class=\"n\">usingDefaultGain</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n <span class=\"n\">usingDefaultReadnoise</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gain&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rdnoise&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_isNotValid</span> <span class=\"p\">(</span><span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;exptime&#39;</span><span class=\"p\">],</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">]):</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;EXPTIME&#39;</span>\n\n <span class=\"k\">for</span> <span class=\"n\">chip</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">returnAllChips</span><span class=\"p\">(</span><span class=\"n\">extname</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">):</span>\n <span class=\"c1\">#pri_header=chip.header #stis stores stuff in the science data header</span>\n\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">cte_dir</span><span class=\"o\">=</span><span class=\"mi\">0</span>\n\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;exptime&#39;</span><span class=\"p\">],</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">header</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;expkeyword&#39;</span><span class=\"p\">]</span>\n <span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;ERROR: invalid instrument task parameter&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span>\n\n <span class=\"k\">if</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rdnoise&#39;</span><span class=\"p\">],</span> <span class=\"n\">pri_header</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;rnkeyword&#39;</span><span class=\"p\">]</span>\n <span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">usingDefaultReadnoise</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n\n <span class=\"k\">if</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">getInstrParameter</span><span class=\"p\">(</span>\n <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gain&#39;</span><span class=\"p\">],</span> <span class=\"n\">pri_header</span><span class=\"p\">,</span> <span class=\"n\">instrpars</span><span class=\"p\">[</span><span class=\"s1\">&#39;gnkeyword&#39;</span><span class=\"p\">]</span>\n <span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">usingDefaultGain</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n\n <span class=\"k\">if</span> <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_exptime</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;ERROR: invalid instrument task parameter&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">raise</span> <span class=\"ne\">ValueError</span>\n\n <span class=\"c1\"># We need to determine if the user has used the default readnoise/gain value</span>\n <span class=\"c1\"># since if not, they will need to supply a gain/readnoise value as well</span>\n\n <span class=\"k\">if</span> <span class=\"n\">usingDefaultReadnoise</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_rdnoise</span><span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setMAMADefaultReadnoise</span><span class=\"p\">()</span>\n\n <span class=\"k\">if</span> <span class=\"n\">usingDefaultGain</span><span class=\"p\">:</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_setMAMADefaultGain</span><span class=\"p\">()</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_assignSignature</span><span class=\"p\">(</span><span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_chip</span><span class=\"p\">)</span> <span class=\"c1\">#this is used in the static mask</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_effGain</span><span class=\"o\">=</span><span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span>\n\n <span class=\"c1\"># Convert the science data to electrons if specified by the user.</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">doUnitConversions</span><span class=\"p\">()</span></div>\n\n\n<div class=\"viewcode-block\" id=\"FUVInputImage.getdarkcurrent\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.FUVInputImage.getdarkcurrent\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">getdarkcurrent</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Returns the dark current for the STIS FUV detector.</span>\n\n<span class=\"sd\"> Returns</span>\n<span class=\"sd\"> -------</span>\n<span class=\"sd\"> darkcurrent : float</span>\n<span class=\"sd\"> Dark current value in **units of electrons** (or counts, if proc_unit==&#39;native&#39;).</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"n\">darkcurrent</span> <span class=\"o\">=</span> <span class=\"mf\">0.07</span> <span class=\"c1\">#electrons/sec</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">proc_unit</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;native&#39;</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"n\">darkcurrent</span> <span class=\"o\">/</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_gain</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"n\">darkcurrent</span></div>\n\n\n <span class=\"k\">def</span> <span class=\"nf\">_setMAMADefaultGain</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"k\">return</span> <span class=\"mi\">1</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">_setMAMADefaultReadnoise</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"k\">return</span> <span class=\"mi\">0</span>\n\n<div class=\"viewcode-block\" id=\"FUVInputImage.doUnitConversions\"><a class=\"viewcode-back\" href=\"../../stisobjects.html#drizzlepac.stisData.FUVInputImage.doUnitConversions\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">doUnitConversions</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;Convert the data to electrons.</span>\n\n<span class=\"sd\"> This converts all science data extensions and saves</span>\n<span class=\"sd\"> the results back to disk. We need to make sure</span>\n<span class=\"sd\"> the data inside the chips already in memory is altered as well.</span>\n\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n\n <span class=\"k\">for</span> <span class=\"n\">det</span> <span class=\"ow\">in</span> <span class=\"nb\">range</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_numchips</span><span class=\"o\">+</span><span class=\"mi\">1</span><span class=\"p\">,</span><span class=\"mi\">1</span><span class=\"p\">):</span>\n\n <span class=\"n\">chip</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_image</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">scienceExt</span><span class=\"p\">,</span><span class=\"n\">det</span><span class=\"p\">]</span>\n\n <span class=\"n\">conversionFactor</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_gain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span> <span class=\"c1\">#1.</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">effGain</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">effGain</span>\n <span class=\"n\">chip</span><span class=\"o\">.</span><span class=\"n\">_conversionFactor</span> <span class=\"o\">=</span> <span class=\"n\">conversionFactor</span> <span class=\"c1\">#1.</span></div></div>\n</pre></div>\n\n </div>\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"related\" role=\"navigation\" aria-label=\"related navigation\">\n <h3>Navigation</h3>\n <ul>\n <li class=\"right\" style=\"margin-right: 10px\">\n <a href=\"../../genindex.html\" title=\"General Index\"\n >index</a></li>\n <li class=\"right\" >\n <a href=\"../../py-modindex.html\" title=\"Python Module Index\"\n >modules</a> |</li>\n <li class=\"nav-item nav-item-0\"><a href=\"../../index.html\">DrizzlePac 2.1.16 (05-June-2017) documentation</a> &#187;</li>\n <li class=\"nav-item nav-item-1\"><a href=\"../index.html\" >Module code</a> &#187;</li> \n </ul>\n </div>\n <div class=\"footer\" role=\"contentinfo\">\n &#169; Copyright 2017, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom, Mihai Cara.\n Created using <a href=\"http://sphinx-doc.org/\">Sphinx</a> 1.5.1.\n </div>\n </body>\n</html>" } ]
12
grigoz/sqlpractice
https://github.com/grigoz/sqlpractice
97a2e088ab48914dff9b73e0f24e413615f39e14
839e59a61349a848daf9543a5474ec353def4b38
d07fed922ad4af35fbd254fa0368eb6dded76b7c
refs/heads/master
2020-03-25T11:03:53.291242
2018-10-19T15:00:37
2018-10-19T15:00:37
143,717,798
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7193370461463928, "alphanum_fraction": 0.7309392094612122, "avg_line_length": 47.91891860961914, "blob_id": "14da9814009d847b2690ddf3fbac40c45d9fea13", "content_id": "cc4aba5d214f91cbacb4bc3a89bf350b7656edbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1810, "license_type": "no_license", "max_line_length": 187, "num_lines": 37, "path": "/finalsql.py", "repo_name": "grigoz/sqlpractice", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom sqlalchemy import create_engine\n\nengine = create_engine('postgresql://postgres:@{}:{}'.format('127.0.0.1', '5431'))\ncharacteristics = pd.read_sql('select * from characteristics', engine)\nmedals = pd.read_sql('select* from medals', engine)\ngames = pd.read_sql('select*from games',engine)\n\nnum_people = pd.read_sql('select count( distinct id) from characteristics;',engine)\nnum_people_2 = characteristics.groupby(by=['id'])['id'].count().drop_duplicates\nprint(num_people)\nprint(num_people_2)\n\nfederer_games = pd.read_sql(\"select distinct games from public.games INNER JOIN public.characteristics On characteristics.id=games.id where name='Roger Federer' ;\",engine)\ngam_char = characteristics.merge(games, how='inner', left_on='id',right_on='id')\nfederer_games_2 = gam_char[(gam_char.name == 'Roger Federer')].games.unique()\nprint(federer_games)\nprint(federer_games_2)\n\ntop_tall = pd.read_sql(\"select DISTINCT id, name ,height from characteristics where height!='NA' ORDER BY height DESC limit 5;\",engine)\ntop_tall_2 = characteristics[(characteristics.height!='NA')].sort_values('height',ascending=False).head(5)\nprint(top_tall)\nprint(top_tall_2)\n\navg_age = pd.read_sql(\"select AVG(cast(age as integer)) from characteristics where age !='NA';\",engine)\navg_age_w = characteristics.agg({'age':['mean']})\nprint(avg_age)\nprint(avg_age_2)\n\n\navg_age_gold = pd.read_sql(\"select AVG(cast(age as integer)) from public.characteristics INNER JOIN public.medals On characteristics.id=medals.id where age!='NA'and medal='Gold';\",engine)\nchar_med = characteristics.merge(medals, how='inner', left_on='id',right_on='id')\navg_age_gold_2 = char_med[(char_med.medal=='Gold')].agg({'age':['mean']})\nprint(avg_age_gold)\nprint(avg_age_gold_2)\n\nexport_csv = top_tall.to_csv('/Users/alexanderzayonts/Documents/top_tall.csv')\n" }, { "alpha_fraction": 0.7354211807250977, "alphanum_fraction": 0.7656587362289429, "avg_line_length": 43.095237731933594, "blob_id": "f0e16c4772d44c5cea69f05426d90fc57dbb8ac6", "content_id": "4e00df59248d776f969cd540daff38236ab27fdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 167, "num_lines": 21, "path": "/hw1.sql", "repo_name": "grigoz/sqlpractice", "src_encoding": "UTF-8", "text": "SELECT ('Григорий Зайонц');\n-- первый запрос\nSELECT * FROM ratings LIMIT 10;\n\n-- второй запрос\nSELECT * FROM (SELECT * FROM (SELECT * FROM links WHERE links.movieid<1000) links WHERE 100<links.movieid) links WHERE links.imdbid like'%42%' LIMIT 10;\n\n-- третий запрос\nSELECT * FROM public.links INNER JOIN public.ratings ON links.movieid=ratings.movieid WHERE ratings.rating = 5 LIMIT 10;\n\n--четвертый запрос\nselect COUNT(movieid) FROM ratings WHERE ratings.movieid IS NULL;\n\n--пятый запрос\nSELECT DISTINCT userid, AVG(rating) as avg_rating FROM ratings GROUP BY userid HAVING AVG(rating) > 3.5 LIMIT 10;\n\n--шестой запрос\nSELECT imdbid, AVG(rating) as avg_rating FROM public.links INNER JOIN public.ratings ON links.movieid=ratings.movieid GROUP BY imdbid HAVING AVG(rating)>3.5 LIMIT 10;\n\n--седьмой запрос\nSelect userId, AVG(rating) as avg_rating, COUNT(userId) as count from ratings GROUP BY userid HAVING COUNT(userId)>10 LIMIT 10;\n" }, { "alpha_fraction": 0.7606322765350342, "alphanum_fraction": 0.7854723334312439, "avg_line_length": 63.780487060546875, "blob_id": "491e97eef3f900c120a72830d060ae2c752aa481", "content_id": "3f6656f1aa1071957f5f5603ffc394ceac9c3e94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 3478, "license_type": "no_license", "max_line_length": 260, "num_lines": 41, "path": "/finalsql.sql", "repo_name": "grigoz/sqlpractice", "src_encoding": "UTF-8", "text": "\n-- 1.\tНачнем с простого запроса и просто посчитаем сколько всего людей участвовало во всех официально задокументированных ОИ (кроме игр 2018 года).\nselect count( distinct id) from characteristics;\n--Ответ: 135571 человек.\n\n--2.\tПосчитаем средний возраст участников ОИ.\nselect AVG(cast(age as integer)) from characteristics where age !='NA';\n--Ответ:25.56 лет.\n\n--3.\tТеперь посчитаем средний возраст золотых медалистов участников ОИ.\nselect AVG(cast(age as integer)) from public.characteristics INNER JOIN public.medals On characteristics.id=medals.id where age!='NA'and medal='Gold'; Ответ: 26,625 лет, что говорит о том, что золотой медалист в среднем старше чем средний возраст участника ОИ.\n\n--4.\tПосчитаем так же средний рост золотых медалистов.\nselect AVG(cast(height as integer)) from public.characteristics INNER JOIN public.medals On characteristics.id=medals.id where height!='NA'and medal='Gold';\n--Ответ: 176.31см.\n\n--5.\tПосчитаем количество медалей за всю историю у сборной России.\nselect COUNT(medal) from public.nationality INNER JOIN public.medals On nationality.id=medals.id where medal!='NA' and team='Russia';\n--Ответ: 4816 медалей.\n\n--6.\tСколько из них золотых?\nselect COUNT(medal) from public.nationality INNER JOIN public.medals On nationality.id=medals.id where medal='Gold' and team='Russia';\n--Из них 1669 медалей золотые.\n\n--7.\tНайдем 5 самых высоких участников ОИ.\nselect DISTINCT id, name ,height from characteristics where height!='NA' ORDER BY height DESC limit 5;\n\n--8.\tСоздадим оконную функцию и посчитаем для каждого спортсмена в каком количестве соревнований он принимал участие и пронумеруем их.\nselect id,name, ROW_NUMBER() OVER (PARTITION BY id) as events_attended from characteristics order by id limit 5;\n\n--9.\tУзнаем на каких ОИ выступал Роджер Федерер.\nselect distinct games from public.games INNER JOIN public.characteristics On characteristics.id=games.id where name='Roger Federer' ;\n--Ответ: 2000, 2004, 2008 и 2012.\n\n--10.\tНайдем также и результаты выступлений Роджера на этих играх, для этого соединим три таблицы.\nselect distinct games, medal from public.games INNER JOIN public.characteristics On characteristics.id=games.id INNER JOIN public.medals ON games.id=medals.id where name='Roger Federer' ;\n\n--11.\tСоздадим представление достижений Федерера на ОИ.\nCREATE VIEW Roger_Federer AS select distinct games, medal from public.games INNER JOIN public.characteristics On characteristics.id=games.id INNER JOIN public.medals ON games.id=medals.id where name='Roger Federer' ;\n\n--12.\tСоздадим представление всех теннисистов завоевавших медали на ОИ.\nCREATE VIEW tennis AS select * from medals where sport='Tennis' and medal='NA';\n" }, { "alpha_fraction": 0.5091185569763184, "alphanum_fraction": 0.5151975750923157, "avg_line_length": 27.60869598388672, "blob_id": "0cee3e98a6980d087028f64ec1b5ecfd1aa9255c", "content_id": "351e792e725ecbc42328739d8785f00259e77667", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 861, "license_type": "no_license", "max_line_length": 107, "num_lines": 23, "path": "/hw5.sh", "repo_name": "grigoz/sqlpractice", "src_encoding": "UTF-8", "text": "/*\n Написать запрос, который выводит общее число тегов\n*/\nprint(\"tags count: \", db.tags.count());\n/*\n Добавляем фильтрацию: считаем только количество тегов woman\n*/\nprint(\"woman tags count: \", db.tags.count({'name': 'woman'}));\n/*\n Очень сложный запрос: используем группировку данных посчитать количество вхождений для каждого тега\n и напечатать top-3 самых популярных\n*/\n\nprintjson(\n db.tags.aggregate([\n {\"$group\": {\n _id: \"$name\",\n tags: {$sum:1}\n }\n },{$sort:{\"tags\": -1}},\n {$limit: 3}\n ])['_batch']\n);\n" }, { "alpha_fraction": 0.7382971048355103, "alphanum_fraction": 0.7663103342056274, "avg_line_length": 72.32432556152344, "blob_id": "1307a61dc8724427682944d7491b1217f59aad4f", "content_id": "6dddfe24f79b8fe1221226f0030b4aff416b8292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 3571, "license_type": "no_license", "max_line_length": 348, "num_lines": 37, "path": "/hw4.sql", "repo_name": "grigoz/sqlpractice", "src_encoding": "UTF-8", "text": "SELECT ('ФИО: Зайонц Григорий');\n\n-- спользуя функцию определения размера таблицы, вывести top-5 самых больших таблиц базы\n\nSELECT table_name from information_schema.tables order by pg_relation_size('' || table_schema || '.' || table_name || '') DESC limit 5;\n\n-- array_agg: собрать в массив все фильмы, просмотренные пользователем (без повторов)\nSELECT userID, array_agg(movieId) as user_views FROM ratings group by userID;\n\n-- таблица user_movies_agg, в которую сохраните результат предыдущего запроса\nSELECT userID, user_views INTO public.user_movies_agg FROM (SELECT userID, array_agg(movieId) as user_views FROM ratings group by userID) as userr;\n\n\n-- Используя следующий синтаксис, создайте функцию cross_arr оторая принимает на вход два массива arr1 и arr2.\n-- Функциия возвращает массив, который представляет собой пересечение контента из обоих списков.\n-- Примечание - по именам к аргументам обращаться не получится, придётся делать через $1 и $2.\n\nCREATE OR REPLACE FUNCTION cross_arr (int[], int[]) RETURNS int[] language sql as $FUNCTION$ select array((SELECT UNNEST($1)) INTERSECT (SELECT UNNEST($2))); ; $FUNCTION$;\n\n-- Сформируйте запрос следующего вида: достать из таблицы всевозможные наборы u1, r1, u2, r2.\n-- u1 и u2 - это id пользователей, r1 и r2 - соответствующие массивы рейтингов\n-- ПОДСКАЗКА: используйте CROSS JOIN\nSELECT w1.userid as u1, w2.userid as u2, w1.user_views as ar1, w2.user_views as ar2 from public.user_movies_agg w1 cross join public.user_movies_agg w2 where w1.userid<>w2.userid;\n\n-- Оберните запрос в CTE и примените к парам <ar1, ar2> функцию CROSS_ARR, которую вы создали\n-- вы получите триплеты u1, u2, crossed_arr\n-- созхраните результат в таблицу common_user_views\nDROP TABLE IF EXISTS common_user_views;\nWITH user_pairs as (SELECT w1.userid as u1, w2.userid as u2, w1.user_views as ar1, w2.user_views as ar2 from public.user_movies_agg w1 cross join public.user_movies_agg w2 where w1.userid<>w2.userid) SELECT u1, u2, cross_arr(ar1, ar2) INTO common_user_views FROM user_pairs order by array_length(cross_arr(ar1, ar2),1) limit 10 ;\n\n\n\n-- Создайте по аналогии с cross_arr функцию diff_arr, которая вычитает один массив из другого.\n-- Подсказка: используйте оператор SQL EXCEPT.\nWITH user_pairs as (SELECT w1.userid as u1, w2.userid as u2, w1.user_views as ar1, w2.user_views as ar2 from public.user_movies_agg w1 cross join public.user_movies_agg w2 where w1.userid<>w2.userid) SELECT u1, u2, cross_arr(ar1, ar2), diff_arr(ar1,ar2) INTO common_user_views FROM user_pairs order by array_length(cross_arr(ar1, ar2),1) limit 10 ;\n\n-- в последнем запросе немного зачитерил, но получилось даже интереснее рекомендации от пользователей у которых больше всего совпадений\n" }, { "alpha_fraction": 0.7166528701782227, "alphanum_fraction": 0.7282518744468689, "avg_line_length": 49.29166793823242, "blob_id": "c64417504cd364cff39ce4520bb5135baec46906", "content_id": "63f100dc6ad3e6198cd9a3a775024039095b347a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1233, "license_type": "no_license", "max_line_length": 112, "num_lines": 24, "path": "/hw3.sql", "repo_name": "grigoz/sqlpractice", "src_encoding": "UTF-8", "text": "SELECT('Григорий Зайонц');\n\n--первое задание\nSELECT DISTINCT userId,movieid,\n (rating - MIN(rating) OVER (PARTITION BY userId))/((MAX(rating) OVER (PARTITION BY userId)) - \n (MIN(rating) OVER (PARTITION BY userid)))normed_rating, AVG(rating) OVER (PARTITION BY userId) avg_rating\n from ratings\n ORDER BY userId LIMIT 30;\n \n \n --второе задание\n psql --host $APP_POSTGRES_HOST -U postgres -c \"CREATE TABLE IF NOT EXISTS keywords(id INT, tags VARCHAR)\"\n psql --host $APP_POSTGRES_HOST -U postgres -c \"\\\\copy keywords FROM '/data/k\neywords.csv' DELIMITER ',' CSV HEADER\"\n\nWITH top_rated as (SELECT movieid, AVG(rating) as avg_rating, COUNT(userid) as count from ratings\nGROUP BY movieid HAVING COUNT(userid)>50 ORDER BY movieid ASC, avg_rating DESC LIMIT 150) \nSELECT * FROM top_rated JOIN keywords ON top_rated.movieid=keywords.id LIMIT 50;\n\nWITH top_rated as (SELECT movieid, AVG(rating) as avg_rating, COUNT(userid) as count from ratings \nGROUP BY movieid HAVING COUNT(userid)>50 ORDER BY movieid ASC, avg_rating DESC LIMIT 150) \nSELECT movieid, avg_rating into top_rated_tags from top_rated;\n\n\\copy (select * from top_rated_tags) to '/data/ratings_file' with CSV header delimiter as E'\\t';\n" }, { "alpha_fraction": 0.7906976938247681, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 13.333333015441895, "blob_id": "f79725659f5f94da8dd261c3fa4ae011be5ec8f3", "content_id": "6170ee9300a48f872c46bb4c2059e26f4c544151", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/README.md", "repo_name": "grigoz/sqlpractice", "src_encoding": "UTF-8", "text": "# pythonpractice\n# MLpractice\n# MLpractice\n" } ]
7
yanioaioan/BookletStructureGenerator
https://github.com/yanioaioan/BookletStructureGenerator
47056a90dbddd750ae1e100453e404b3c0c71a7e
584ce792b6a017581f74b930527658e448ca1b0f
20d890c3d5238ed69a10c471db9db17a14cf4f35
refs/heads/master
2021-06-06T09:15:12.545263
2019-06-13T08:55:50
2019-06-13T08:55:50
95,005,066
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6663568615913391, "alphanum_fraction": 0.6728624701499939, "avg_line_length": 31.606060028076172, "blob_id": "e493c6d2bb3ae471ea11e2d2dbf288c7a85c1fc8", "content_id": "c519093c85fbceb51992d10edb8889241ea0bdf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8608, "license_type": "no_license", "max_line_length": 189, "num_lines": 264, "path": "/2017/readXLSBookletStructureGenerator.py", "repo_name": "yanioaioan/BookletStructureGenerator", "src_encoding": "UTF-8", "text": "try:\n\timport pandas,os,re\nexcept:\n\tprint 'The following python libraries need to be installed, so if you have admin rights under linux \\n \\\n\topen a terminal and execute the following commands:'\n\tprint 'sudo easy_install pip'\n\tprint 'sudo pip install pandas'\t\n\tprint 'sudo pip install xlrd'\n\texit(0)\n\nimport shutil\n\n#! /usr/bin/bash\n#title :readXLSBookletStructureGenerator\n#description :This script is a booklet folder structure generator, used for undergraduate and postgraduate degree showreel booklets\n#author\t\t :Ioannis Ioannidis\n#date :21/06/2017\n#version :1.0\n#usage :python readXLSBookletStructureGenerator\n# : you need a .xlsl file to read which is provided from the user\n#date modified :--/--/----\n#==============================================================================\n\n\n#How to use the `readXLSBookletStructureGenerator script`:\n#\n#1)open bash profile: geany ~/.bashrc\n#2)append this line to the end: export PATH=$PATH:/public/bin/yanScripts\n#3)Save and Close bash\n#4)Source bash: source ~/.bashrc\n#5)make sure there's an excel file to read from (usually this is retrieved from Google Drive after students have submitted their information)\n#6)run the following command: `./readXLSBookletStructureGenerator`\n\n\n\ndef isNaN(num):\n return num != num\n\ndef assure_path_exists(path):\n dir = os.path.dirname(path)\n print dir\n if not os.path.exists(path):#if not there - create it\n os.makedirs(path)\n return True\n if os.path.exists(path):#if there - delete first and then create it\n print \"PATH already exists: %s \"%(path)\n shutil.rmtree(path)\n print \"PATH deleted so as to be recreated: %s \"%(dir)\n #os.makedirs(path)\n\n print path\n print dir\n\n\n\n\nexcelFile=raw_input(\"Please Give the absolute path to the excel file: such as '/public/bin/yanScripts/excel/DegreeShow2017.xlsx'\")\nphotosPath=raw_input(\"Please Give the absolute path to the photos: such as '/public/bin/yanScripts/photos'\")\n\n#excelFile=\"/public/bin/yanScripts/excel/DegreeShow2017.xlsx\"\n#photosPath=\"/public/bin/yanScripts/photos\"\n\ndf = pandas.read_excel(open(excelFile,'rb'), sheetname='Sheet1')\n#print the column names\n#print df.columns\ncols=df.columns\n\nsumbissions=[]\ntimestamp,name,inumber,email,phone,url,affiliation,projectname,description,skills,software = \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"\n\n'''\nfor j in df.index:#get the values for a given\n #print \"ROW\",j\n for i in cols:#get the values for a given column\n #print \"COLUMN:\",i,df[i][j]\n sumbissions.append(df[i][j])\n'''\n\n#get rows number\n#print df.count\n\n'''\nrow=df.loc[0][:]#1st row all elements EXCLUDING numbering on the left hand side\nprint len(row)\nprint row\nprint row[0]\nprint row[10]\n'''\n\n#add all row into the list submissions\nfor i in df.index:\n row=df.loc[i][:]\n sumbissions.append(row)\n #print i\n\n#create outter folder named \"studentsFolders\"\ncwd = os.getcwd()\noutterStudentsFolder=os.path.join(cwd,\"studentsFolder\")\nassure_path_exists(outterStudentsFolder)\n\ngroupsStudentsFolder=os.path.join(outterStudentsFolder,\"_____GROUPS_____\")\nassure_path_exists(groupsStudentsFolder)\nprint groupsStudentsFolder\n\nindividualsStudentsFolder=os.path.join(outterStudentsFolder,\"_____INDIVIDUALS_____\")\nassure_path_exists(individualsStudentsFolder)\nprint individualsStudentsFolder\n\n\nimport unicodedata\n\nfor row in sumbissions:#each row\n #for rowElement in row:#individual elements of each row\n timestamp,name,inumber,email,phone,url,affiliation,projectname,description,skills,software = row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10]\n\n\n #encoding\n timestamp = ((timestamp)).encode('utf-8')\n if not isNaN(name):\n name = ((name)).encode('utf-8')\n if not isNaN(inumber):\n inumber = ((inumber)).encode('utf-8')\n if not isNaN(email):\n email = ((email)).encode('utf-8')\n if not isNaN(phone):\n phone = (str(phone)).encode('utf-8')\n if not isNaN(url):\n url = (str(url)).encode('utf-8')\n if not isNaN(affiliation):\n affiliation = ((affiliation)).encode('utf-8')\n if not isNaN(projectname):\n projectname = ((projectname)).encode('utf-8')\n if not isNaN(description):\n #description = (str(description)).encode('utf-8')\n\n print \"DESCRIPTION=\",description\n\n #It's important to notice that using the ignore option is dangerous because it silently drops any unicode(and internationalization) support from the code that uses it, as seen here:\n description=unicodedata.normalize('NFKD', description).encode('ascii','ignore')\n\n if not isNaN(skills):\n skills = (str(skills)).encode('utf-8')\n if not isNaN(software):\n software = ((software)).encode('utf-8')\n\n print timestamp,name,inumber,email,phone,url,affiliation,projectname,description,skills,software\n #name,inumber,email,phone,url,affiliation,projectname,description,skills,software = \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"\n\n #now for this row create a folder in the following format (\"name - projectname\") if it's an individual project otherwise,\n #create a folder in the following format (\"projectname\") and under this one create a folder in the following format (\"name\") for each of the members of the group\n studentFoldername=\"\"\n #cwd = os.getcwd()\n\n #dive into either Groups or Individual Folder depending on the specified affiliation\n affiliationFolderChosen=\"\"\n if not isNaN(affiliation):\n affiliation = ((affiliation)).encode('utf-8')\n\n if affiliation == \"group\":\n os.chdir(groupsStudentsFolder)\n affiliationFolderChosen = os.getcwd()\n\n #list individual folders each group AND in there.. for each student\n groupProjectFoldername=\"%s\"%(projectname)\n studentFoldername=\"%s\"%(name)\n studentFoldername=os.path.join(groupProjectFoldername,studentFoldername)\n\n print studentFoldername\n\n\n elif affiliation == \"individual\":\n\n #list individual folders for each student\n studentFoldername=\"%s-%s\"%(name,projectname)\n studentFoldername=str(studentFoldername)\n print studentFoldername\n\n os.chdir(individualsStudentsFolder)\n affiliationFolderChosen = os.getcwd()\n\n print os.path.join(affiliationFolderChosen,studentFoldername)\n\n #update studentFoldername\n studentFoldername=os.path.join(affiliationFolderChosen,studentFoldername)#outterStudentsFolder+studentFoldername\n print studentFoldername\n\n studnetFolderCreatedSucessfully=assure_path_exists(studentFoldername)#studentFoldername\n\n #then for this foldername create a description txt file of the following format (\"name - inumber\")\n if studnetFolderCreatedSucessfully:\n\n\n\n #dive into studentFoldername\n os.chdir(studentFoldername)\n cwd = os.getcwd()\n print cwd\n\n #create local images under each person\n localImages=os.path.join(studentFoldername,\"images\")\n assure_path_exists(localImages)\n #print localImages\n\n #search, find & copy inumber-related image to pre-created local folder named 'images'\n filesMatched = [f for f in os.listdir(photosPath) if re.match(r'.*'+str(inumber)+'.*', f)]\n print filesMatched\n\n for file in filesMatched:\n\n #print localImages\n imagepath=dir = os.path.join(os.path.abspath(photosPath),file)\n\n print imagepath\n shutil.copy2(imagepath, localImages)\n\n\n\n\n studentFileDescriptionName=\"%s-%s.txt\"%(name,inumber)\n\n file = open(studentFileDescriptionName,\"w\")\n\n\n if not isNaN(name) and not isNaN(inumber):\n file.write(\"name: %r\\t%r\\n\\n\"%(name,inumber))\n if not isNaN(email):\n file.write(\"email: %r\\n\"%(email))\n if not isNaN(phone):\n file.write(\"phone: %r\\n\\n\"%(phone))\n if not isNaN(url):\n file.write(\"url: %r\\n\\n\"%(url))\n if not isNaN(affiliation):\n file.write(\"affiliation: %r\\n\"%(affiliation))\n if not isNaN(projectname):\n file.write(\"projectname: %r\\n\\n\"%(projectname))\n if not isNaN(description):\n file.write(\"description: \\n%r\\n\\n\"%(description))\n if not isNaN(skills):\n file.write(\"skills :%r\\n\"%(skills))\n if not isNaN(software):\n file.write(\"software: %r\\n\"%(software))\n file.close()\n\n\n\n\n\n\n\n\n\n\n\n\n#for s in sumbissions:#list holding all submission\n# print s\n# break\n\n\n\n#values = df['Arm_id'].values\n#get a data frame with selected columns\n#FORMAT = ['Arm_id', 'DSPName', 'Pincode']\n#df_selected = df[FORMAT]\n" }, { "alpha_fraction": 0.6747084259986877, "alphanum_fraction": 0.6814395189285278, "avg_line_length": 36.70100402832031, "blob_id": "f9901133308688dff5ca9566583704d26e91d916", "content_id": "6fabc3d26c361cf04e046fc199bc0d33b463335b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15005, "license_type": "no_license", "max_line_length": 308, "num_lines": 398, "path": "/2018/readXLSBookletStructureGenerator2018Test/readXLSBookletStructureGenerator.py", "repo_name": "yanioaioan/BookletStructureGenerator", "src_encoding": "UTF-8", "text": "try:\n\timport pandas,os,re\nexcept:\n\tprint 'The following python libraries need to be installed, so if you have admin rights under linux \\n \\\n\topen a terminal and execute the following commands:'\n\tprint 'sudo easy_install pip'\n\tprint 'sudo pip install pandas'\t\n\tprint 'sudo pip install xlrd'\n\texit(0)\n\nimport shutil\n\n#! /usr/bin/bash\n#title :readXLSBookletStructureGenerator\n#description :This script is a booklet folder structure generator, used for undergraduate and postgraduate degree showreel booklets\n#author\t\t :Ioannis Ioannidis\n#date :21/06/2017\n#version :1.0\n#usage :python readXLSBookletStructureGenerator\n# : you need a .xlsl file to read which is provided from the user\n#date modified :--/--/----\n#==============================================================================\n\n\n#How to use the `readXLSBookletStructureGenerator script`:\n#\n#1)open bash profile: geany ~/.bashrc\n#2)append this line to the end: export PATH=$PATH:/public/bin/yanScripts\n#3)Save and Close bash\n#4)Source bash: source ~/.bashrc\n#5)make sure there's an excel file to read from (usually this is retrieved from Google Drive after students have submitted their information)\n#6)run the following command: `./readXLSBookletStructureGenerator`\n\n\n\ndef isNaN(num):\n return num != num\n\ndef assure_path_exists(path):\n dir = os.path.dirname(path)\n print dir\n if not os.path.exists(path):#if not there - create it\n os.makedirs(path)\n return True\n if os.path.exists(path):#if there - delete first and then create it\n print \"PATH already exists: %s \"%(path)\n shutil.rmtree(path)\n print \"PATH deleted so as to be recreated: %s \"%(dir)\n #os.makedirs(path)\n\n print path\n print dir\n\n\n\n\nexcelFile=raw_input(\"Please Give the absolute path to the excel file: such as '/public/bin/yanScripts/excel/DegreeShow2017.xlsx'\")\nphotosPath=raw_input(\"Please Give the absolute path to the photos: such as '/public/bin/yanScripts/photos'\")\n\n#excelFile=\"/public/bin/yanScripts/excel/DegreeShow2017.xlsx\"\n#photosPath=\"/public/bin/yanScripts/photos\"\n\n#excelFile=\"/home/yioannidis/Downloads/BookletStructureGenerator/2018/readXLSBookletStructureGenerator2018Test/excel/Booklet2018-Downloaded.xlsx\"\n#photosPath=\"/home/yioannidis/Downloads/BookletStructureGenerator/2018/readXLSBookletStructureGenerator2018Test/photos/Work_Collector\"\n\ndf = pandas.read_excel(open(excelFile,'rb'), sheetname='Sheet1')\n#print the column names\n#print df.columns\ncols=df.columns\n\nsumbissions=[]\ntimestamp,name,inumber,email,phone,url,affiliation,projectname,description,skills,software = \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"\n\n'''\nfor j in df.index:#get the values for a given\n #print \"ROW\",j\n for i in cols:#get the values for a given column\n #print \"COLUMN:\",i,df[i][j]\n sumbissions.append(df[i][j])\n'''\n\n#get rows number\n#print df.count\n\n'''\nrow=df.loc[0][:]#1st row all elements EXCLUDING numbering on the left hand side\nprint len(row)\nprint row\nprint row[0]\nprint row[10]\n'''\n\n#add all row into the list submissions\nfor i in df.index:\n row=df.loc[i][:]\n sumbissions.append(row)\n #print i\n\n\n#create outter folder named \"studentsFolders\"\ncwd = os.getcwd()\noutterStudentsFolder=os.path.join(cwd,\"studentsFolder\")\nassure_path_exists(outterStudentsFolder)\n\ngroupsStudentsFolder=os.path.join(outterStudentsFolder,\"_____GROUPS_____\")\nassure_path_exists(groupsStudentsFolder)\nprint groupsStudentsFolder\n\n\ndef recursive_glob(treeroot, pattern):\n results = []\n for base, dirs, files in os.walk(treeroot):\n goodfiles = fnmatch.filter(files, pattern)\n results.extend(os.path.join(base, f) for f in goodfiles)\n return results\n\nindividualsStudentsFolder=os.path.join(outterStudentsFolder,\"_____INDIVIDUALS_____\")\nassure_path_exists(individualsStudentsFolder)\nprint individualsStudentsFolder\n\n\nimport unicodedata\n\n#Collect inumbers to check whether there are inumbers in the project photos folder submitted,\n#that don't correspond to the submission inumbers on the booklet information excel sheet\n#all inumbers submtitted in the excel\nexcellInumbers=[]\nfor row in sumbissions:\n timestamp,name,inumber,email,phone,url,affiliation,projectname,description,skills,software = row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10]\n excellInumbers.append(((inumber)).encode('utf-8'))\nprint 'excellInumbers',excellInumbers\n\nt=[f for f in os.listdir(photosPath) if re.match(r'.*'+str('i')+'.*_.*', f)]\ninumbersDetectedInPhotosPath = []\nfor i in t:\n print i\n inumbersDetectedInPhotosPath.append(i.split('_')[0])\n\nprint '\\ninumbersDetectedInPhotosPath\\n',inumbersDetectedInPhotosPath\n\nsuspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo=[]\n#detect the inumbers that have been correctly submitted as part of the name of the project photos BUT NOT correctly submitted as part of the booklet info submission\nfor inum in inumbersDetectedInPhotosPath:\n if (inum not in excellInumbers) and (inum not in suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo):#i number not detected in the booklet sumbission excel sheet AND not already in suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo list, then add it for further investigation\n suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo.append(inum)\n\ngroupMembersThaHaveNotSubmittedAnyProjectFilesThemselves=[]\n#detect the inumbers that have NOT been correctly submitted as part of the name of the project photos BUT correctly submitted as part of the booklet info submission\nfor inum in excellInumbers:\n if (inum not in inumbersDetectedInPhotosPath) and (inum not in groupMembersThaHaveNotSubmittedAnyProjectFilesThemselves):#i number not detected in the booklet sumbission excel sheet AND not already in suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo list, then add it for further investigation\n print inum,(inum not in inumbersDetectedInPhotosPath)\n groupMembersThaHaveNotSubmittedAnyProjectFilesThemselves.append(inum)\n\n\n#if not empty\nif suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo:\n print '\\nAttention possible WRONG inumbers sumbitted\\n as part of the excel booklet info that don\\'t match the project photos inumbers.\\nPlease, investigate further inumbers sumbitted'\n print 'Please investigate submissions of the following inumbers\\n\\n'\n\n print '----------------------------------------------------------------------------------------------'\n print '----------------------------------------------------------------------------------------------'\n print ' INUMBER ERRORS IN BOOKLET SUBMISSION - INVESTIGATE THE FOLLOWING \\'%d\\' INUMBER SUBMISSIONS'%(len(suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo))\n print '----------------------------------------------------------------------------------------------'\n print '----------------------------------------------------------------------------------------------'\n counter = 1\n for i in suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo:\n print counter,') !!!!!-Investigate booklet submission of -->',i,'-!!!!!\\n'\n counter+=1\n print '\\nAlso check groupMembersThaHaveNotSubmittedAnyProjectFilesThemselves:\\n'\n print 'double check booklet submissions of the follwoing inumbers too are indeed members of a groups that haven\\'t submitted project photos because another group member did!'\n print groupMembersThaHaveNotSubmittedAnyProjectFilesThemselves\n\n\n file = open(outterStudentsFolder+str('/ATTENTION.txt'),\"w\")\n for i in suspiciousInumberMisMatchBetweenProjectPhotosAndBookletInfo:\n file.write('!!!!!-Investigate booklet submission of -->%s-!!!!!, inumber possibly mistyped\\n'%(i))\n file.close()\n\n\n'''\nfor inumber in excellInumbers:\n print 'Testing inumber-->..',str(inumber)\n t=[f for f in os.listdir(photosPath) if re.match(r'.*'+str(inumber)+'.*', f)]\n print 'matched',t\nexit()\n'''\n\nfor row in sumbissions:#each row\n #for rowElement in row:#individual elements of each row\n timestamp,name,inumber,email,phone,url,affiliation,projectname,description,skills,software = row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10]\n\n\n #encoding\n timestamp = ((timestamp)).encode('utf-8')\n if not isNaN(name):\n name = ((name)).encode('utf-8')\n if not isNaN(inumber):\n inumber = ((inumber)).encode('utf-8')\n if not isNaN(email):\n email = ((email)).encode('utf-8')\n if not isNaN(phone):\n phone = (str(phone)).encode('utf-8')\n if not isNaN(url):\n url = (str(url)).encode('utf-8')\n if not isNaN(affiliation):\n affiliation = ((affiliation)).encode('utf-8')\n if not isNaN(projectname):\n projectname = ((projectname)).encode('utf-8')\n if not isNaN(description):\n #description = (str(description)).encode('utf-8')\n\n print \"DESCRIPTION=\",description\n\n #It's important to notice that using the ignore option is dangerous because it silently drops any unicode(and internationalization) support from the code that uses it, as seen here:\n description=unicodedata.normalize('NFKD', description).encode('ascii','ignore')\n\n if not isNaN(skills):\n skills = (str(skills)).encode('utf-8')\n if not isNaN(software):\n software = ((software)).encode('utf-8')\n\n print timestamp\n print name \n print inumber\n print email\n print phone\n print url\n print affiliation\n print projectname\n print description\n print skills\n print software\n #name,inumber,email,phone,url,affiliation,projectname,description,skills,software = \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"\n\n #now for this row create a folder in the following format (\"name - projectname\") if it's an individual project otherwise,\n #create a folder in the following format (\"projectname\") and under this one create a folder in the following format (\"name\") for each of the members of the group\n studentFoldername=\"\"\n #cwd = os.getcwd()\n\n #dive into either Groups or Individual Folder depending on the specified affiliation\n affiliationFolderChosen=\"\"\n if affiliation:\n affiliation = ((affiliation)).encode('utf-8')\n\n if affiliation == \"As part of a group\":\n os.chdir(groupsStudentsFolder)\n print 'group affiliation'\n affiliationFolderChosen = os.getcwd()\n\n #list individual folders each group AND in there.. for each student\n groupProjectFoldername=\"%s\"%(projectname)\n studentFoldername=\"%s\"%(name)\n studentFoldername=os.path.join(groupProjectFoldername,studentFoldername)\n\n print studentFoldername\n\n\n elif affiliation == \"As an individual\":\n\n #list individual folders for each student\n print 'individual affiliation'\n studentFoldername=\"%s-%s\"%(name,projectname)\n studentFoldername=str(studentFoldername)\n print studentFoldername\n\n os.chdir(individualsStudentsFolder)\n affiliationFolderChosen = os.getcwd()\n\n print os.path.join(affiliationFolderChosen,studentFoldername)\n\n #update studentFoldername\n studentFoldername=os.path.join(affiliationFolderChosen,studentFoldername)#outterStudentsFolder+studentFoldername\n print \"studentFoldername %r\"%(studentFoldername)\n\n studnetFolderCreatedSucessfully=assure_path_exists(studentFoldername)#studentFoldername\n\n #then for this foldername create a description txt file of the following format (\"name - inumber\")\n if studnetFolderCreatedSucessfully:\n\n\n\n #dive into studentFoldername\n os.chdir(studentFoldername)\n cwd = os.getcwd()\n print cwd\n\n #create local images under each person\n localImages=os.path.join(studentFoldername,\"images\")\n assure_path_exists(localImages)\n #print localImages\n\n #search, find & copy inumber-related image to pre-created local folder named 'images'\n filesMatched = [f for f in os.listdir(photosPath) if re.match(r'.*'+str(inumber)+'.*', f)]\n print filesMatched\n\n for file in filesMatched:\n\n #print localImages\n imagepath=dir = os.path.join(os.path.abspath(photosPath),file)\n\n print imagepath\n shutil.copy2(imagepath, localImages)\n\n\n studentFileDescriptionName=\"%s-%s.txt\"%(name,inumber)\n\n file = open(studentFileDescriptionName,\"w\")\n\n\n if not isNaN(name) and not isNaN(inumber):\n file.write(\"name: %r\\t%r\\n\\n\"%(name,inumber))\n if not isNaN(email):\n file.write(\"email: %r\\n\"%(email))\n if not isNaN(phone):\n file.write(\"phone: %r\\n\\n\"%(phone))\n if not isNaN(url):\n file.write(\"url: %r\\n\\n\"%(url))\n if not isNaN(affiliation):\n file.write(\"affiliation: %r\\n\"%(affiliation))\n if not isNaN(projectname):\n file.write(\"projectname: %r\\n\\n\"%(projectname))\n if not isNaN(description):\n file.write(\"description: \\n%r\\n\\n\"%(description))\n if not isNaN(skills):\n file.write(\"skills :%r\\n\"%(skills))\n if not isNaN(software):\n file.write(\"software: %r\\n\"%(software))\n file.close()\n\n\n#Check folders not having images\nimport os,glob\n\nempty_dirs = []\nfor root, dirs, files in os.walk(outterStudentsFolder):\n newpath = ''\n noImagesAtAllInProject = 1\n\n if not len(dirs) and not len(files):\n print '\\nroot',root\n groupProjectPath=root.split('/')\n newpath = ''\n for i in range(len(groupProjectPath)-2):\n newpath += str(groupProjectPath[i])+'/'\n print '\\nnewpath',newpath\n\n\n noImagesAtAllInProject=1\n import fnmatch\n matches = []\n\n matchesjpg = recursive_glob(newpath,'*.jpg')\n matchespng = recursive_glob(newpath,'*.png')\n matchesJPG = recursive_glob(newpath,'*.JPG')\n matchesPNG = recursive_glob(newpath,'*.PNG')\n matchesjpeg = recursive_glob(newpath,'*.jpeg')\n matchesJPEG = recursive_glob(newpath,'*.JPEG')\n\n if matchesjpg:\n noImagesAtAllInProject=0\n print i\n\n elif matchespng:\n noImagesAtAllInProject=0\n print i\n\n elif matchesJPG:\n noImagesAtAllInProject=0\n print i\n\n elif matchesPNG:\n noImagesAtAllInProject=0\n print i\n elif matchesjpeg:\n noImagesAtAllInProject=0\n print i\n\n elif matchesJPEG:\n noImagesAtAllInProject=0\n print i\n\n else:\n print '\\nNo images in this project at all -->%s\\n'%(newpath)\n file = open(outterStudentsFolder+str('/ATTENTION.txt'),\"a\")\n file.write('\\nInvestigate Project %s, as there are no images at all there (ex. project title mistyping or anything really / Email the corresponding students \\n'%(newpath))\n file.close()\n\n\n#for s in sumbissions:#list holding all submission\n# print s\n# break\n\n\n\n#values = df['Arm_id'].values\n#get a data frame with selected columns\n#FORMAT = ['Arm_id', 'DSPName', 'Pincode']\n#df_selected = df[FORMAT]\n" }, { "alpha_fraction": 0.8867924809455872, "alphanum_fraction": 0.8867924809455872, "avg_line_length": 52, "blob_id": "fb47cb7ba34d2a1e3871ce07846fff4cbadada8b", "content_id": "0b29ee3f6387c1fbfbeadd11b89353f48db7335f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/2019/readXLSBookletStructureGenerator-06-2019/readXLSBookletStructureGenerator2019/README.md", "repo_name": "yanioaioan/BookletStructureGenerator", "src_encoding": "UTF-8", "text": "RUN with: python readXLSBookletStructureGenerator.py\n" } ]
3
jssandh2/DPA_GA
https://github.com/jssandh2/DPA_GA
dc734f2061f938e0a0d7dcf32b117f9153410033
885c9123a95b4a35dcf810e1e00b06845723435a
8e2a144d5ccd04be1156e378828ffe1f432a6af5
refs/heads/master
2016-08-08T06:49:20.565157
2015-04-24T19:22:11
2015-04-24T19:22:11
33,282,048
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46862348914146423, "alphanum_fraction": 0.4746963679790497, "avg_line_length": 25.70270347595215, "blob_id": "52a05ba8346de96a7fdc43b006bd7621cad38d66", "content_id": "89ffe21058bb9f31a0a7cb51432246e3f290e18d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "permissive", "max_line_length": 57, "num_lines": 37, "path": "/change.py", "repo_name": "jssandh2/DPA_GA", "src_encoding": "UTF-8", "text": "__author__ = 'Jus'\n\n\ndef change(denom, amount):\n hash_table = {}\n denom.sort()\n for i in range(len(denom)):\n hash_table[len(denom) - i - 1] = denom[i]\n a = []\n for i in range(len(denom)):\n array = []\n for j in range(len(denom)):\n array.append(0)\n a.append(array)\n print(a)\n for i in range(len(denom)):\n amount_needed = amount\n for j in range(i, len(denom)):\n a[i][j] = int(amount_needed/hash_table[j])\n amount_needed = amount_needed % hash_table[j]\n if amount_needed == 0:\n break\n min_coins = 0\n print(a)\n for i in range(len(a)):\n min_curr = 0\n for j in range(len(a[i])):\n min_curr += a[i][j]\n if min_curr < min_coins:\n min_coins = min_curr\n else:\n if i == 0:\n min_coins = min_curr\n else:\n continue\n print(min_coins, min_curr)\n return min_coins\n" } ]
1
mauser7x63/fastAPI_socnet
https://github.com/mauser7x63/fastAPI_socnet
114cdb9bb194fc43617686aba8a070168a160efa
6a4ffdebaca7ecb3be58eaba15b9e22f90ebeedb
e66c282af1134f006e8c91a29d136058b3f57bee
refs/heads/main
2023-05-10T13:44:04.467547
2021-05-23T15:26:04
2021-05-23T15:26:04
368,251,091
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 26.5, "blob_id": "80f0438070edb44440ccbdf5cbcdb9da93614323", "content_id": "43f38f4ed17bcffeaf88bb93be7ebf61dc758d18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/README.md", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "# fastAPI_socnet\ntest FastApi project. SQLAlchemy, JWT\n" }, { "alpha_fraction": 0.6776878833770752, "alphanum_fraction": 0.6853179335594177, "avg_line_length": 38.327274322509766, "blob_id": "f8c628b67fe22a4c0a7c7eade479fc45c467317d", "content_id": "afc5e4e27aa6803e6d8c25cea17136d1b2698319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4325, "license_type": "no_license", "max_line_length": 128, "num_lines": 110, "path": "/main.py", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "from typing import List\nfrom fastapi import Depends, FastAPI, HTTPException, Security\nfrom fastapi.security import HTTPAuthorizationCredentials, HTTPBearer\nfrom sqlalchemy.orm import Session\nimport crud, models, schemas\nfrom database import SessionLocal, engine\nfrom auth import Auth\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\nsecurity = HTTPBearer()\nauth_handler = Auth()\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\[email protected]('/', response_model=List[schemas.Post])\ndef get_all_posts(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n all_posts = crud.get_posts(db, skip=skip, limit=limit)\n return all_posts\n\[email protected]('/post/{post_id}', response_model=schemas.Post)\ndef get_post(post_id: int, db: Session=Depends(get_db)):\n db_post = crud.get_post(db, post_id)\n return db_post\n\[email protected]('/post/{post_id}/like', response_model=schemas.Post, dependencies=[Depends(auth_handler.decode_token)])\ndef like_post(post_id: int, db: Session=Depends(get_db)):\n return crud.update_post(db, post_id, like=1)\n\[email protected]('/post/{post_id}/dislike', response_model=schemas.Post)\ndef dislike_post(post_id: int, db: Session=Depends(get_db), token: str=Depends(auth_handler.decode_token)):\n print('there is!')\n return crud.update_post(db, post_id, like=-1)\n\[email protected]('/users/', response_model=schemas.User)\ndef create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_email(db, email=user.email)\n if db_user:\n raise HTTPException(status_code=400, detail=\"Email already registered.\")\n return crud.create_user(db=db, user=user)\n\n\[email protected]('/users/', response_model=List[schemas.User])\ndef get_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n users = crud.get_users(db, skip=skip, limit=limit)\n return users\n\n\[email protected]('/users/{user_id}', response_model=schemas.User)\ndef get_user(user_id: int, db: Session = Depends(get_db)):\n db_user = crud.get_user(db, user_id=user_id)\n if not db_user:\n raise HTTPException(status_code=404, detail=\"User not found.\")\n return db_user\n\n\[email protected]('/newpost/', response_model=schemas.Post)\ndef create_user_post(content:schemas.PostCreate, db: Session = Depends(get_db), token: str=Depends(auth_handler.decode_token)):\n return crud.create_user_post(db=db, user_id=content.user_id, content=content.content)\n\n\[email protected]('/users/{user_id}/posts/', response_model=List[schemas.Post])\ndef get_user_posts(user_id: int, db: Session = Depends(get_db)):\n posts = crud.get_posts_by_user(db, user_id=user_id)\n if not posts:\n raise HTTPException(status_code=404, \n detail=f'There is no posts by {user_id}')\n return posts\n\n####auth futures#######\[email protected]('/signup')\ndef signup(user_details: schemas.AuthModel, db: Session = Depends(get_db)):\n if crud.get_user_by_email(db, email=user_details.username) != None:\n return 'Account already exists'\n try:\n hashed_password = auth_handler.encode_password(user_details.password)\n user = {'key': user_details.username, 'password': hashed_password}\n return crud.create_user(db, user=user)\n except:\n error_msg = 'Failed to signup user'\n return error_msg \n\[email protected]('/login')\ndef login(user_details: schemas.AuthModel, db: Session = Depends(get_db)):\n user = crud.get_user_by_email(db, email=user_details.username)\n if (user is None):\n return HTTPException(status_code=401, detail='Invalid username')\n if (not auth_handler.verify_password(user_details.password, user.hashed_password)):\n return HTTPException(status_code=401, detail='Invalid password')\n\n access_token = auth_handler.encode_token(user.email)\n refresh_token = auth_handler.encode_refresh_token(user.email)\n return {'access_token': access_token, 'refresh_token': refresh_token}\n\[email protected]('/refresh_token')\ndef refresh_token(credentials: HTTPAuthorizationCredentials = Security(security)):\n refresh_token = credentials.credentials\n new_token = auth_handler.refresh_token(refresh_token)\n return {'access_token': new_token}\n###################################################################\nif __name__ == \"__main__\":\n import uvicorn\n uvicorn.run(app, host=\"0.0.0.0\", port=9000)" }, { "alpha_fraction": 0.47636815905570984, "alphanum_fraction": 0.6890547275543213, "avg_line_length": 15.408163070678711, "blob_id": "21db875905b1ec59494a9b86765ea5be6e1b429e", "content_id": "2fe0311b2a44f35f587c7a1868fd2c94887fe914", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 804, "license_type": "no_license", "max_line_length": 27, "num_lines": 49, "path": "/requirements.txt", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "aiofiles==0.5.0\naniso8601==7.0.0\nasync-exit-stack==1.0.1\nasync-generator==1.10\nbcrypt==3.2.0\ncertifi==2020.12.5\ncffi==1.14.5\nchardet==4.0.0\nclick==7.1.2\ncryptography==3.4.7\ndnspython==2.1.0\necdsa==0.14.1\nemail-validator==1.1.2\nfastapi==0.64.0\ngraphene==2.1.8\ngraphql-core==2.3.2\ngraphql-relay==2.0.1\ngreenlet==1.1.0\nh11==0.12.0\nhttptools==0.1.2\nidna==3.1\nitsdangerous==1.1.0\nJinja2==2.11.3\njwt==1.2.0\nMarkupSafe==1.1.1\norjson==3.5.2\npasslib==1.7.4\npromise==2.3\npyasn1==0.4.8\npycparser==2.20\npydantic==1.8.1\nPyJWT==2.1.0\npython-dotenv==0.17.1\npython-jose==3.2.0\npython-multipart==0.0.5\nPyYAML==5.4.1\nrequests==2.25.1\nrsa==4.7.2\nRx==1.6.1\nsix==1.16.0\nSQLAlchemy==1.4.14\nstarlette==0.13.6\ntyping-extensions==3.10.0.0\nujson==3.2.0\nurllib3==1.26.4\nuvicorn==0.13.4\nuvloop==0.15.2\nwatchgod==0.7\nwebsockets==8.1\n" }, { "alpha_fraction": 0.5524511337280273, "alphanum_fraction": 0.5588011145591736, "avg_line_length": 30.503999710083008, "blob_id": "a6fba1150ccf847c827f5a229940e2fbc1675e0b", "content_id": "7db861c60bd491425cd6ef4b32afce75f527dcc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4051, "license_type": "no_license", "max_line_length": 98, "num_lines": 125, "path": "/bot.py", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "import requests\nimport json\nfrom faker import Faker\nfrom requests.api import post\n\nfake = Faker()\nFaker.seed(3)\nendpoint = 'http://127.0.0.1:8000/'\nheaders = {\n 'accept': 'application/json',\n 'Content-Type': 'application/json'\n }\n\ndef loadFromJSNON(fileName):\n '''\n загрузить из JSON. Принимает строку с именем файла \n '''\n with open(fileName) as f:\n fileStuff = f.read()\n loadedStructure = json.loads(fileStuff)\n return loadedStructure\n \ndef saveToJSON(fileName, data):\n '''\n сохранить структуру в JSON-формате. Принимает строку с именем файла и структуру для сохранения\n '''\n with open(fileName, 'w') as outfile:\n json.dump(data, outfile, indent=4, sort_keys=True)\n return\n\ndef loadBots():\n bots = loadFromJSNON('fastAPI_socnet/bots.json')\n if rules['number_of_users']>len(bots):\n for user in range(0, rules['number_of_users']-len(bots)):\n bot = Bot(name=fake.first_name_nonbinary(), passwd=fake.pystr())\n print(f'user: {bot.username} with passwd: {bot.password}')\n bots.update({\n bot.username: bot.password\n })\n saveToJSON(fileName='fastAPI_socnet/bots.json', data=bots)\n print(len(bots), ' bots has been saved')\n return bots\n\ndef prepearBots(botsDict):\n for user in botsDict:\n user = Bot(name=user, passwd=botsDict[user])\n print(user.username)\n \n\nclass Bot():\n def __init__(self, name, passwd):\n self.username = name\n self.password = passwd\n self.token = self.login()\n\n def signUp(self):\n url = endpoint+'signup'\n body = {\n 'username': self.username,\n 'password': self.password \n }\n res = requests.post(url=url, headers=headers, json=body)\n if res.status_code == 200:\n print(f'request to {url} is ok')\n return res.json()\n else:\n print('something goes wrong, error: ', res.status_code)\n return res.status_code \n\n def login(self):\n url = endpoint+'login'\n body = {\n 'username': self.username,\n 'password': self.password \n }\n res = requests.post(url=url, headers=headers, json=body)\n if res.status_code == 200:\n print(f'request to {url} is ok')\n token = res.json().get('access_token')\n if not token:\n print('login failed. try to signup')\n self.signUp()\n token = self.login()\n return token\n else:\n print('something goes wrong, error: ', res.status_code)\n return res.status_code \n \n def createPost(self, user_id, text):\n url = f'{endpoint}newpost/?token={self.token}'\n print(url)\n body = {\n \"content\": text,\n \"user_id\": user_id,\n \"token\": self.token\n }\n res = requests.post(url=url, headers=headers, json=body)\n if res.status_code == 200:\n return res.json()\n else:\n print(\"failed to post\", res.status_code)\n return res.status_code\n\n def ratePost(self, post_id, like=True):\n if like: \n rate = 'like'\n else:\n rate = 'dislike'\n url = f'{endpoint}post/{post_id}/{rate}?token={self.token}'\n print('request to URL:', url)\n res = requests.post(url=url, headers=headers, data={})\n if res.status_code == 200:\n pprint(res.json())\n return {'message': f'post id={post_id} was liked'}\n else:\n print('something goes wrong, error: ', res.status_code)\n return res.status_code \n\nif __name__==\"__main__\":\n from pprint import pprint\n print('bot standalone runned')\n rules = loadFromJSNON('fastAPI_socnet/bots_config.json')\n pprint(rules)\n users = loadBots()\n prepearBots(users)" }, { "alpha_fraction": 0.6511784791946411, "alphanum_fraction": 0.6579124331474304, "avg_line_length": 27.037734985351562, "blob_id": "453d1e813a2561d576ae9f1ca8c12e92bc4f32e0", "content_id": "1ea40b457166f3d54ed92b3b0e3f14b3be1d645d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 73, "num_lines": 53, "path": "/crud.py", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "from sqlalchemy.orm import Session\nfrom models import User, Post\nimport schemas\n\n\ndef get_user(db: Session, user_id: int):\n return db.query(User).filter(User.id == user_id).first()\n\ndef get_post(db: Session, post_id: int):\n return db.query(Post).filter(Post.id == post_id).first()\n\ndef get_user_by_email(db: Session, email: str):\n return db.query(User).filter(User.email == email).first()\n\n\ndef get_users(db: Session, skip: int = 0, limit: int = 100):\n return db.query(User).offset(skip).limit(limit).all()\n\n\ndef create_user(db: Session, user: schemas.UserCreate):\n db_user = User(email=user['key'], hashed_password=user['password'])\n print('ok')\n db.add(db_user)\n db.commit()\n db.refresh(db_user)\n return db_user\n\n\n\ndef get_posts(db: Session, skip: int = 0, limit: int = 100):\n return db.query(Post).offset(skip).limit(limit).all()\n\n\ndef create_user_post(db: Session, user_id: int, content: str):\n db_post = Post(author_id=user_id, content=content)\n db.add(db_post)\n db.commit()\n db.refresh(db_post)\n return db_post\n\ndef get_posts_by_user(db: Session, user_id: int):\n print('*'*50)\n print(db.query(Post).filter(Post.author_id == user_id).all())\n return db.query(Post).filter(Post.author_id == user_id).all()\n\ndef update_post(db:Session, post_id:int, like):\n db_post = db.query(Post).get(post_id)\n print(db_post)\n db_post.likes+=like\n print(db_post.likes)\n db.commit()\n db.refresh(db_post)\n return db_post" }, { "alpha_fraction": 0.6937500238418579, "alphanum_fraction": 0.6968749761581421, "avg_line_length": 30.950000762939453, "blob_id": "b9631e918601e4543dcf842689b6388bbcf478ed", "content_id": "fed8a9fcc75180105d799951adce02e2a987e0e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/models.py", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "from sqlalchemy import Boolean, Column, ForeignKey, Integer, String\n#from sqlalchemy.orm import relationship\nfrom database import Base\n\n\nclass User(Base):\n __tablename__ = 'user'\n id = Column(Integer, primary_key=True, index=True)\n email = Column(String, unique=True, index=True)\n hashed_password = Column(String)\n is_active = Column(Boolean, default=True)\n\n\nclass Post(Base):\n __tablename__ = 'Post'\n id = Column(Integer, primary_key=True, index=True)\n content = Column(String)\n likes = Column(Integer, default=0)\n dislikes = Column(Integer, default=0)\n author_id = Column(Integer, ForeignKey('user.id'))\n\n" }, { "alpha_fraction": 0.6460980176925659, "alphanum_fraction": 0.6497277617454529, "avg_line_length": 15.727272987365723, "blob_id": "c0cb5bd53d4cab7d599ca33f77dd3baab6cb9a8f", "content_id": "3551b3a431fab51ddb5da34b3d92f81c6928aeed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "no_license", "max_line_length": 33, "num_lines": 33, "path": "/schemas.py", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "from typing import List, Optional\nfrom pydantic import BaseModel\n\n\nclass Post(BaseModel):\n id: int\n content: str\n author_id: int\n likes: int = 0\n dislikes: int = 0\n class Config:\n orm_mode = True\n\nclass PostCreate(BaseModel):\n content: str\n user_id: int\n token: str\n\nclass UserBase(BaseModel):\n email: str\n\nclass UserCreate(UserBase):\n password: str\n\nclass User(UserBase):\n id: int\n is_active: bool\n class Config:\n orm_mode = True\n\nclass AuthModel(BaseModel):\n username: str\n password: str" }, { "alpha_fraction": 0.5707195997238159, "alphanum_fraction": 0.5856079459190369, "avg_line_length": 35.65151596069336, "blob_id": "97d15940783ba3eae4f8a430b7f6fc9e39143879", "content_id": "4fa4d06f009d698714ccfdd8c7b2c5e77fd00e28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2418, "license_type": "no_license", "max_line_length": 91, "num_lines": 66, "path": "/auth.py", "repo_name": "mauser7x63/fastAPI_socnet", "src_encoding": "UTF-8", "text": "import os\nimport jwt\nfrom fastapi import HTTPException\nfrom passlib.context import CryptContext\nfrom datetime import datetime, timedelta\n\nclass Auth():\n hasher = CryptContext(schemes=['bcrypt'])\n #secret = os.getenv(\"APP_SECRET_STRING\")\n secret = \"1b0b1cd761525c45be721743ce1a0cf9b3d053e04f7976ffdc4ff8e2e3279634\"\n\n def encode_password(self, password):\n return self.hasher.hash(password)\n \n def verify_password(self, password, encoded_password):\n return self.hasher.verify(password, encoded_password)\n \n def encode_token(self, username):\n payload = {\n 'exp' : datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat' : datetime.utcnow(),\n 'scope' : 'access_token',\n 'sub' : username\n }\n return jwt.encode(\n payload,\n self.secret,\n algorithm = 'HS256'\n )\n\n def decode_token(self, token):\n try:\n payload = jwt.decode(token, self.secret, algorithms=['HS256'])\n if (payload['scope'] == 'access_token'):\n return payload['sub']\n raise HTTPException(status_code=401, detail = 'Scope for the token is invalid')\n except jwt.ExpiredSignatureError:\n raise HTTPException(status_code=401, detail = 'Token expired')\n except jwt.InvalidTokenError:\n raise HTTPException(status_code=401, detail='Token expired')\n \n def encode_refresh_token(self, username):\n payload = {\n 'exp' : datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat' : datetime.utcnow(),\n 'scope' : 'refrsh_token',\n 'sub' : username\n }\n print(payload)\n return jwt.encode(\n payload,\n self.secret,\n algorithm = 'HS256'\n )\n def refresh_token(self, refresh_token):\n try:\n payload = jwt.decode(refresh_token, self.secret, algorithms=['HS256'])\n if (payload['scope'] == 'refrsh_token'):\n username = payload['sub']\n new_token = self.encode_token(username)\n return new_token\n raise HTTPException(status_code=401, detail='Invalid scope for token')\n except jwt.ExpiredSignatureError:\n raise HTTPException(status_code=401, detail='Refresh token expired')\n except jwt.InvalidTokenError:\n raise HTTPException(status_code=401, detail='Invalid refresh token')" } ]
8
AnasNadeem/job_search
https://github.com/AnasNadeem/job_search
00a6205d3f173136fbb2f9c399a782adfb9b3c58
4a18238e91e20a60a1a3fc87a9493cf450815422
41b3ec5bf3bc187fc40ff00a9a73c50a198b2c9a
refs/heads/main
2023-07-29T12:40:55.175314
2021-09-10T04:49:10
2021-09-10T04:49:10
404,701,421
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6316981315612793, "alphanum_fraction": 0.65132075548172, "avg_line_length": 44.72413635253906, "blob_id": "35bdfcc0f16936dad51293a8bcc31f60bb2b7617", "content_id": "68deaaa08c8c1963e8860e5defb61da0102fb500", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1325, "license_type": "no_license", "max_line_length": 122, "num_lines": 29, "path": "/linkedin_jobs.py", "repo_name": "AnasNadeem/job_search", "src_encoding": "UTF-8", "text": "# https://in.linkedin.com/jobs/search?keywords=Website%20development\n# &location=India&geoId=102713980&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nparam_query = {'keywords':'website',\n 'location':'India',\n # 'geoId':102713980,\n 'trk':'public_jobs_jobs-search-bar_search-submit',\n 'pageNum':0\n }\nr = requests.get('https://in.linkedin.com/jobs/search', params=param_query).text\nsoup = BeautifulSoup(r, 'lxml')\n\nfor i in soup.find_all('div', class_='base-card base-card--link base-search-card base-search-card--link job-search-card'):\n # Getting link of job \n # all_job_link.append(f\"https://in.indeed.com/{i.attrs['href']}\")\n job_link = i.find('a', class_=\"base-card__full-link\").attrs['href']\n # Getting all the other data\n job_card = i.find('div', class_='base-search-card__info')\n # Job Title \n job_title = job_card.find('h3', class_='base-search-card__title').text\n # Company Title\n job_comp_title = job_card.find('h4', class_='base-search-card__subtitle').text\n # Location Title\n job_comp_location = job_card.find('span', class_='job-search-card__location').text\n\n print(job_title.strip(), job_comp_title.strip(), job_comp_location.strip())" }, { "alpha_fraction": 0.5882511734962463, "alphanum_fraction": 0.5908170342445374, "avg_line_length": 50.075862884521484, "blob_id": "e0a71b6c78918abaf147a71624f5c3acb4fadd94", "content_id": "ecd40f170b5e55d41a2c6dbe9c0870bbbf86b3f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7405, "license_type": "no_license", "max_line_length": 156, "num_lines": 145, "path": "/main_app.py", "repo_name": "AnasNadeem/job_search", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request\nimport requests\nfrom bs4 import BeautifulSoup\n\napp = Flask(__name__)\n\[email protected]('/')\ndef home_page():\n return render_template('index.html', title='Home')\n\[email protected]('/search-job/', methods=['GET', 'POST'])\ndef search_job():\n if request.method=='POST':\n job_position = request.form.get('job_position')\n job_location = request.form.get('job_location')\n # List Structure: [comp_title, job_pos, job_loc, job_salary, job_link, job_website, job_skills]\n all_jobs_list = []\n all_jobs_list.append(indeed_jobs(job_position, job_location))\n all_jobs_list.append(linkedin_jobs(job_position, job_location))\n all_jobs_list.append(shine_jobs(job_position, job_location))\n # all_jobs_list.append(times_jobs(job_position, job_location))\n return render_template('resultscard.html',results_list = all_jobs_list)\n\ndef indeed_jobs(job_position, job_loc):\n indeed_jobs_list = []\n try:\n # Indeed Queries \n indeed_param_query = {'q':f'{job_position}','l':f'{job_loc}'}\n indeed_request = requests.get('https://in.indeed.com/jobs', params=indeed_param_query).text\n indeed_soup = BeautifulSoup(indeed_request, 'lxml')\n for i in indeed_soup.find_all('a', class_='tapItem'):\n # Getting link of job \n indeed_job_link = f\"https://in.indeed.com{i.attrs['href']}\"\n # Getting all the other data\n indeed_job_card = i.find('div', class_='job_seen_beacon')\n # Job Title \n indeed_job_pos = indeed_job_card.find('h2', class_='jobTitle')\n indeed_job_pos_text = indeed_job_pos.find('span', title=True).text\n # Company Title\n indeed_job_comp_title = indeed_job_card.find('span', class_='companyName').text\n # Location Title\n indeed_job_comp_location = indeed_job_card.find('div', class_='companyLocation').text\n # Salary Title\n try:\n indeed_job_salary = indeed_job_card.find('span', class_='salary-snippet').text\n except:\n indeed_job_salary= ''\n # Job Skills \n indeed_job_skills = indeed_job_card.find('div', class_=\"job-snippet\")\n indeed_job_skills_list = []\n for skill in indeed_job_skills.find_all('li'):\n indeed_job_skills_list.append(skill.text)\n indeed_job_list = [indeed_job_comp_title, indeed_job_pos_text, indeed_job_comp_location, indeed_job_salary, indeed_job_link, 'Indeed']\n indeed_jobs_list.append(indeed_job_list)\n except Exception as e:\n print(f'Error occured {e}')\n return indeed_jobs_list\n\ndef linkedin_jobs(job_position, job_loc):\n linkedin_jobs_list = []\n try:\n # Linkedin Queries \n linkedin_param_query = {'keywords':f'{job_position}',\n 'location':f'{job_loc}',\n 'trk':'public_jobs_jobs-search-bar_search-submit',\n 'pageNum':0\n }\n linkedin_request = requests.get('https://in.linkedin.com/jobs/search', params=linkedin_param_query).text\n linkedin_soup = BeautifulSoup(linkedin_request, 'lxml')\n for i in linkedin_soup.find_all('div', class_='base-card base-card--link base-search-card base-search-card--link job-search-card'):\n # Getting link of job \n linkedin_job_link = i.find('a', class_=\"base-card__full-link\").attrs['href']\n # Getting all the other data\n linkedin_job_card = i.find('div', class_='base-search-card__info')\n # Job Title \n linkedin_job_position = linkedin_job_card.find('h3', class_='base-search-card__title').text\n # Company Title\n linkedin_job_comp_title = linkedin_job_card.find('h4', class_='base-search-card__subtitle').text\n # Location Title\n linkedin_job_comp_location = linkedin_job_card.find('span', class_='job-search-card__location').text \n linkedin_job_list = [linkedin_job_comp_title, linkedin_job_position, linkedin_job_comp_location, 'Not Mentioned', linkedin_job_link, 'Linkedin']\n linkedin_jobs_list.append(linkedin_job_list)\n except Exception as e:\n print(f'Error occured {e}')\n return linkedin_jobs_list\n\ndef shine_jobs(job_position, job_loc):\n shine_jobs_list = []\n try:\n # https://www.shine.com/job-search/website-developer-jobs-in-mumbai\n job_position_text = job_position.replace(' ', '-')\n if job_loc:\n shine_request = requests.get(f'https://www.shine.com/job-search/{job_position_text}-jobs-in-{job_loc}').text\n shine_soup = BeautifulSoup(shine_request, 'lxml')\n for i in shine_soup.find_all('li', class_='result-display__profile'):\n shine_job_card = i.find('div', class_='w-90 ml-25')\n # Job Title \n shine_job_title = shine_job_card.ul.li.h2.text\n # Job Link\n shine_job_link = f\"https://www.shine.com{shine_job_card.ul.li.h2.a['href']}\"\n # Company Title\n shine_job_comp_title = shine_job_card.find('span', class_='result-display__profile__company-name').text\n shine_year_title = shine_job_card.find_all('li', class_=\"result-display__profile__years\")\n shine_location = shine_year_title[1].text\n shine_job_list = [shine_job_comp_title, shine_job_title, shine_location, 'Not Mentioned', shine_job_link, 'Shine']\n shine_jobs_list.append(shine_job_list)\n else:\n shine_request = requests.get(f'https://www.shine.com/job-search/{job_position_text}-jobs').text\n shine_soup = BeautifulSoup(shine_request, 'lxml')\n for i in shine_soup.find_all('li', class_='result-display__profile'):\n shine_job_card = i.find('div', class_='w-90 ml-25')\n # Job Title \n shine_job_title = shine_job_card.ul.li.h2.text\n # Job Link\n shine_job_link = f\"https://www.shine.com{shine_job_card.ul.li.h2.a['href']}\"\n # Company Title\n shine_job_comp_title = shine_job_card.find('span', class_='result-display__profile__company-name').text\n shine_year_title = shine_job_card.find_all('li', class_=\"result-display__profile__years\")\n shine_location = shine_year_title[1].text\n shine_job_list = [shine_job_comp_title, shine_job_title, shine_location, 'Not Mentioned', shine_job_link, 'Shine']\n shine_jobs_list.append(shine_job_list)\n except Exception as e:\n print(f'Error occured {e}')\n return shine_jobs_list\n\n# def times_jobs(job_position, job_loc):\n# times_jobs_list = []\n# try:\n# tj_param_query = {\n# \"searchType\":\"personalizedSearch\",\n# \"from\":\"submit\",\n# \"txtKeywords\":f\"{job_position}\",\n# \"txtLocation\":f\"{job_loc}\"\n# }\n# tj_request = requests.get(f'https://www.timesjobs.com/candidate/job-search.html', params=tj_param_query).text\n# tj_soup = BeautifulSoup(tj_request, 'lxml')\n# print(tj_soup.title)\n\n# except Exception as e:\n# print(f'Error occured {e}')\n# return times_jobs_list\n\n\nif __name__ == '__main__':\n app.run()" }, { "alpha_fraction": 0.6200153231620789, "alphanum_fraction": 0.6211656332015991, "avg_line_length": 40.31745910644531, "blob_id": "617193ec746fb5734b62fe8228a6fc2245643e76", "content_id": "6e44e4b560e3f933460880cdaa76f5144fbec581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2608, "license_type": "no_license", "max_line_length": 142, "num_lines": 63, "path": "/shine_jobs.py", "repo_name": "AnasNadeem/job_search", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\n# position_title = 'website'\n# r = requests.get(f'https://www.shine.com/job-search/{position_title}-jobs').text\n# soup = BeautifulSoup(r, 'lxml')\n# print(soup.title)\n\ntry:\n indeed_param_query = {'q':'website','l':''}\n indeed_request = requests.get('https://in.indeed.com/jobs', params=indeed_param_query).text\n indeed_soup = BeautifulSoup(indeed_request, 'lxml')\n indeed_jobs_list = []\n for i in indeed_soup.find_all('a', class_='tapItem'):\n # Getting link of job \n indeed_job_link = f\"https://in.indeed.com{i.attrs['href']}\"\n # Getting all the other data\n indeed_job_card = i.find('div', class_='job_seen_beacon')\n # Job Title \n indeed_job_pos = indeed_job_card.find('h2', class_='jobTitle')\n indeed_job_pos_text = indeed_job_pos.find('span', title=True).text\n # Company Title\n indeed_job_comp_title = indeed_job_card.find('span', class_='companyName').text\n # Location Title\n indeed_job_comp_location = indeed_job_card.find('div', class_='companyLocation').text\n\n indeed_job_skills_list = []\n indeed_job_skills = indeed_job_card.find('div', class_=\"job-snippet\")\n for skill in indeed_job_skills.find_all('li'):\n indeed_job_skills_list.append(skill.text)\n print(indeed_job_skills_list)\n\n # Salary Title\n try:\n indeed_job_salary = indeed_job_card.find('span', class_='salary-snippet').text\n except:\n indeed_job_salary= ''\n indeed_job_list = [indeed_job_comp_title, indeed_job_pos_text, indeed_job_comp_location, indeed_job_salary, indeed_job_link, 'Indeed']\n indeed_jobs_list.append(indeed_job_list)\n # all_jobs_list.append(indeed_jobs_list)\n\nexcept Exception as e:\n print(f'Error occured {e}')\n# # Get all the joblist\n# all_job_link = []\n# for i in soup.find_all('a', class_='tapItem'):\n# # Getting link of job \n# # all_job_link.append(f\"https://in.indeed.com/{i.attrs['href']}\")\n\n# # Getting all the other data\n# job_card = i.find('div', class_='job_seen_beacon')\n\n# # Job Title \n# job_title = job_card.find('h2', class_='jobTitle')\n# job_title_text = job_title.find('span', title=True).text\n# # Company Title\n# job_comp_title = job_card.find('span', class_='companyName').text\n# # Location Title\n# job_comp_location = job_card.find('div', class_='companyLocation').text\n# # Salary Title\n# try:\n# job_salary = job_card.find('span', class_='salary-snippet').text\n# except:\n# job_salary= ''\n \n" }, { "alpha_fraction": 0.8141592741012573, "alphanum_fraction": 0.8141592741012573, "avg_line_length": 55.5, "blob_id": "edd950a38e718c7200529bc36bb21762eee71864", "content_id": "e3d25e6644b0f1d72b88350df1438f12e39f012d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 113, "license_type": "no_license", "max_line_length": 99, "num_lines": 2, "path": "/README.md", "repo_name": "AnasNadeem/job_search", "src_encoding": "UTF-8", "text": "# job_search\nA flask based web application that fetches the job data through scraping the job providing website.\n" }, { "alpha_fraction": 0.7483296394348145, "alphanum_fraction": 0.7505567669868469, "avg_line_length": 36.5, "blob_id": "18b41319500b45ab4da9a8f4e1115b5ab8374c30", "content_id": "fa30167cd3375fb839cab43ddd533bf8ad4c8066", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "no_license", "max_line_length": 146, "num_lines": 12, "path": "/timesjob.py", "repo_name": "AnasNadeem/job_search", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nposition_title = 'website'\nurl = \"https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=Website+Development&txtLocation=\"\nparam_query = {\n \"searchType\":\"personalizedSearch\",\n \"from\":\"submit\",\n \"txtKeywords\":\"\"\n}\nr = requests.get(f'https://www.shine.com/job-search/{position_title}-jobs').text\nsoup = BeautifulSoup(r, 'lxml')\nprint(soup.title)" } ]
5
le114195/Machine_Learning
https://github.com/le114195/Machine_Learning
6a0d9a4e1dc5c304a969661a8958ab54fbe66151
efd308bf69363e7d70c1574caed866ed767755a9
152e62cd78bc2e3618018e05e464f23a64bd3e1c
refs/heads/master
2020-06-16T03:08:28.656289
2016-12-01T09:42:02
2016-12-01T09:42:02
75,250,300
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5996326804161072, "alphanum_fraction": 0.6161616444587708, "avg_line_length": 28.432432174682617, "blob_id": "10a16107ca7c27fd3e3836cff57b372f50beab1c", "content_id": "2580be88d98a018babc9c75d0d487e0ae21decbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1089, "license_type": "no_license", "max_line_length": 78, "num_lines": 37, "path": "/python/PythonProject/algorithm/logistics/logistics.py", "repo_name": "le114195/Machine_Learning", "src_encoding": "UTF-8", "text": "import numpy\nimport math\n\ndef openfile():\n dataMat = []; labelMat = []\n fr = open('testSet3.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\ndef sigmoid(inX):\n return 1.0/(1 + math.exp(-inX))\n\ndef gradAscent(dataMatIn, classLabels):\n dataMatrix = numpy.mat(dataMatIn)\n labelMat = numpy.mat(classLabels).transpose() #convert to NumPy matrix\n m,n = numpy.shape(dataMatrix)\n alpha = 0.01\n maxCycles = 50\n weights = numpy.ones((n,1))\n h = numpy.ones((m, 1))\n for k in range(maxCycles): #heavy on matrix operations\n xx = dataMatrix*weights\n for hk in range(m):\n h[hk, 0] = sigmoid(xx[hk, 0])\n\n error = (labelMat - h) #vector subtraction\n weights = weights + alpha * dataMatrix.transpose()* error #matrix mult\n return weights\n\ndataMatIn, classLabels = openfile()\n\nhhh = gradAscent(dataMatIn, classLabels)\n\nprint('hhh = ', hhh)\n" }, { "alpha_fraction": 0.5433149933815002, "alphanum_fraction": 0.5658487677574158, "avg_line_length": 16.490739822387695, "blob_id": "76f9399ec1fb844b7e3e1d19321780de1e9a4a75", "content_id": "dd8f14a932bc449c838a29870fed17ec902f9fd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4178, "license_type": "no_license", "max_line_length": 54, "num_lines": 216, "path": "/c_c++/svm/main.cpp", "repo_name": "le114195/Machine_Learning", "src_encoding": "GB18030", "text": "#include \"svm.h\"\r\n#include <iostream>\r\n#include <list>\r\n#include <iterator>\r\n#include <vector>\r\n#include <string>\r\n#include <ctime>\r\n\r\nusing namespace std;\r\n\r\n#ifdef WIN32\r\n#pragma warning (disable: 4514 4786)\r\n#endif\r\n\r\n\r\n\r\nsvm_parameter param;\r\nsvm_problem prob;\r\nsvm_model *svmModel;\r\nlist<svm_node*> xList;\r\nlist<double> yList ;\r\nconst int MAX=10;\r\nconst int nTstTimes=10;\r\nint nodeNum;\r\nvector<int> predictvalue;\r\nvector<int> realvalue;\r\nint trainNum=0;\r\nvoid setParam()\r\n{\r\n param.svm_type = C_SVC;\r\n\tparam.kernel_type = RBF;\r\n\tparam.degree = 3;\r\n\tparam.gamma = 0.5;\r\n\tparam.coef0 = 0;\r\n\tparam.nu = 0.5;\r\n\tparam.cache_size = 40;\r\n\tparam.C = 500;\r\n\tparam.eps = 1e-3;\r\n\tparam.p = 0.1;\r\n\tparam.shrinking = 1;\r\n\t// param.probability = 0;\r\n\tparam.nr_weight = 0;\r\n\tparam.weight = NULL;\r\n param.weight_label =NULL;\r\n}\r\nvoid train(char *filePath)\r\n{\r\n\t\r\n\tFILE *fp;\r\n\tint k;\r\n\tint line=0;\r\n\tint temp;\r\n \r\n\tif((fp=fopen(filePath,\"rt\"))==NULL)\r\n\t\treturn ;\r\n\twhile(1)\r\n\t{\r\n\t\t svm_node* features = new svm_node[nodeNum+1];\r\n\t\t \r\n\t\t for(k=0;k<nodeNum;k++)\r\n\t\t {\r\n\t\t \tfscanf(fp,\"%d\",&temp);\r\n \r\n\t\t\t\t\r\n\t \r\n\t\t\tfeatures[k].index = k + 1;\r\n\t\t\tfeatures[k].value = temp/(MAX*1.0) ;\r\n\t\t}\r\n\t\t\t\r\n\t\tfeatures[nodeNum].index = -1;\r\n\r\n\r\n\t\tfscanf(fp,\"%d\",&temp);\r\n\t\txList.push_back(features);\r\n\t\tyList.push_back(temp);\r\n \r\n\t\tline++;\r\n\t\ttrainNum=line;\r\n\t\tif(feof(fp)) \r\n\t\t\tbreak; \r\n\t}\r\n\r\n\r\n \t\r\n \r\n setParam();\r\n\tprob.l=line;\r\n\tprob.x=new svm_node *[prob.l]; //对应的特征向量\r\n\tprob.y = new double[prob.l]; //放的是值\r\n\tint index=0;\t\r\n\twhile(!xList.empty())\r\n\t{\r\n\t\tprob.x[index]=xList.front();\r\n\t\tprob.y[index]=yList.front();\r\n\t\txList.pop_front();\r\n\t\tyList.pop_front();\r\n\t\tindex++;\r\n\t}\r\n\t//std::cout<<prob.l<<\"list end\\n\";\r\n\tsvmModel=svm_train(&prob, &param);\r\n\r\n\t//std::cout<<\"\\n\"<<\"over\\n\";\r\n\t//保存model\r\n\tsvm_save_model(\"model.txt\",svmModel);\r\n\r\n\t//释放空间\r\n\tdelete prob.y;\r\n\tdelete [] prob.x;\r\n\tsvm_free_and_destroy_model(&svmModel);\r\n}\r\nvoid predict(char *filePath)\r\n{\r\n svm_model *svmModel = svm_load_model(\"model.txt\");\r\n\r\n \tFILE *fp;\r\n\tint line=0;\r\n\tint temp;\r\n\r\n\tif((fp=fopen(filePath,\"rt\"))==NULL)\r\n\t\treturn ;\r\n\t\r\n\r\n\twhile(1)\r\n\t{\r\n\t\t svm_node* input = new svm_node[nodeNum+1];\r\n\t\t for(int k=0;k<nodeNum;k++)\r\n\t\t {\r\n\t\t \tfscanf(fp,\"%d\",&temp);\r\n\t\t\tinput[k].index = k + 1;\r\n\t\t\tinput[k].value = temp/(MAX*1.0);\r\n\t\t}\r\n\t\tinput[nodeNum].index = -1;\r\n\r\n\r\n\t\t\r\n \tint predictValue=svm_predict(svmModel, input);\r\n\t\tpredictvalue.push_back(predictValue);\r\n\r\n\t\tcout<<predictValue<<endl;\r\n\t\tif(feof(fp)) \r\n\t\t\tbreak; \r\n\t}\r\n\r\n}\r\nvoid writeValue(vector<int> &v,string filePath)\r\n{\r\n \r\n \tFILE *pfile=fopen(\"result.txt\",\"wb\");\r\n\r\n\tvector<int>::iterator iter=v.begin();\r\n\tfor(;iter!=v.end();++iter)\r\n\t{\r\n fprintf(pfile, \"%d\\n\", *iter);\r\n\t}\r\n\tfclose(pfile);\r\n}\r\nbool getRealValue()\r\n{\r\n FILE *fp;\r\n\tint temp;\r\n\r\n\tif((fp=fopen(\"tictgts2000.txt\",\"rt\"))==NULL)\r\n\t\treturn false;\r\n\twhile(1)\r\n\t{\r\n\t\t\r\n\t\tfscanf(fp,\"%d\",&temp);\r\n\t\trealvalue.push_back(temp); \r\n\t\tif(feof(fp)) \r\n\t\t\tbreak; \r\n\t}\r\n\treturn true;\r\n}\r\ndouble getAccuracy()\r\n{\r\n if(!getRealValue())\r\n\t\treturn 0.0;\r\n\tint counter=0;\r\n\tint counter1=0;\r\n\tfor(int i=0;i<realvalue.size();i++)\r\n\t{\r\n\t\tif(realvalue.at(i)==predictvalue.at(i))\r\n\t\t{\r\n\t\t\tcounter++; //测试正确的个数\r\n\t \tif(realvalue.at(i)==1)\r\n\t\t\t counter1++;\r\n\t\t}\r\n\t}\r\n //cout<<realvalue.size()<<endl; //目标值为1的记录测试真确的个数\r\n\treturn counter*1.0/realvalue.size();\r\n}\r\nint main()\r\n{\r\n// nodeNum = 85;\r\n nodeNum = 2;\r\n \r\n \r\n clock_t t1,t2,t3;\r\n\t\r\n\tcout<<\"请稍等待...\"<<endl;\r\n\tt1=clock();\r\n\ttrain(\"ticdata1.txt\"); //训练\r\n t2=clock();\r\n\t\r\n predict(\"predict1.txt\"); //预测\r\n\tt3=clock();\r\n\twriteValue(predictvalue,\"result.txt\"); //将预测值写入到文件\r\n\tdouble accuracy=getAccuracy(); //得到正确率\r\n\tcout<<\"训练数据共:\"<<trainNum<<\"条记录\"<<endl;\r\n\tcout<<\"测试数据共:\"<<realvalue.size()<<\"条记录\"<<endl;\r\n\tcout<<\"训练的时间:\"<<1.0*(t2-t1)/nTstTimes<<\"ms\"<<endl;\r\n\tcout<<\"预测的时间:\"<<1.0*(t3-t2)/nTstTimes<<\"ms\"<<endl;\r\n cout<<\"测试正确率为:\"<<accuracy*100<<\"%\"<<endl;\r\n \r\n\treturn 0;\r\n}\r\n" }, { "alpha_fraction": 0.6305732727050781, "alphanum_fraction": 0.6305732727050781, "avg_line_length": 16.44444465637207, "blob_id": "a82873910c3e010c650e08fa9f40f155c7c4173c", "content_id": "6935f158e97b74b78accf19e81aadbd7b32e581c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 157, "license_type": "no_license", "max_line_length": 26, "num_lines": 9, "path": "/c_c++/svm/makefile", "repo_name": "le114195/Machine_Learning", "src_encoding": "UTF-8", "text": "demo:main.o svm.o\n\tg++ -o demo main.o svm.o\nmain.o:main.cpp svm.cpp\n\tg++ -c -o main.o main.cpp\ntest.o:svm.cpp svm.h\n\tgcc -c -o svm.o svm.cpp\n\nclean:\n\trm *.o\n" }, { "alpha_fraction": 0.27413588762283325, "alphanum_fraction": 0.33373063802719116, "avg_line_length": 17.2391300201416, "blob_id": "b7378dd7dc7a34db7ff8ad6b53c288093b9b6cac", "content_id": "2816171b1721038c4fcafc55bb328d32cc1f0e5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 871, "license_type": "no_license", "max_line_length": 52, "num_lines": 46, "path": "/c_c++/svm/createData/createData.cpp", "repo_name": "le114195/Machine_Learning", "src_encoding": "UTF-8", "text": "#include<iostream>\n\n\n\nint main()\n{\n int x, y;\n \n FILE *pfile=fopen(\"ticdata1.txt\",\"wb\");\n \n //第一象限\n for (int i = 0; i < 1000; i++) {\n x = rand() % 100;\n y = rand() % 100;\n \n fprintf(pfile, \"%d %d 1\\n\", x, y);\n }\n \n //第二象限\n for (int i = 0; i < 1000; i++) {\n x = -rand() % 100;\n y = rand() % 100;\n \n fprintf(pfile, \"%d %d 2\\n\", x, y);\n }\n \n //第三象限\n for (int i = 0; i < 1000; i++) {\n x = -rand() % 100;\n y = -rand() % 100;\n \n fprintf(pfile, \"%d %d 3\\n\", x, y);\n }\n \n //第四象限\n for (int i = 0; i < 1000; i++) {\n x = rand() % 100;\n y = -rand() % 100;\n \n fprintf(pfile, \"%d %d 4\\n\", x, y);\n }\n \n \n fclose(pfile);\n return 0;\n}\n" }, { "alpha_fraction": 0.5373134613037109, "alphanum_fraction": 0.5522388219833374, "avg_line_length": 7.800000190734863, "blob_id": "ce916dc30f5ec3fd4c31f29299a908e6ca142de0", "content_id": "7e3406fb6a34763b3a15471a564164a59a5d500c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 22, "num_lines": 15, "path": "/python/PythonProject/hello/hello.py", "repo_name": "le114195/Machine_Learning", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport math\nimport numpy\n\n\nprint('hello world!')\n\n\na = 2\nb = 3\nc = a**b\n\nc = math.sqrt(c)\n\nprint('c = ', c, 'ff')\n\n\n" } ]
5
usmansafdar28/kvdb
https://github.com/usmansafdar28/kvdb
f7949614a99bdc3860eca2fbc7652be50c24ae12
e88594f194400ac4e7ed7f992b08b5b1dab0e85c
15101e316518a53a0be39550d43a3509daadd884
refs/heads/master
2020-04-04T21:18:26.104553
2018-11-05T21:48:08
2018-11-05T21:48:08
156,281,337
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6854051351547241, "alphanum_fraction": 0.6983373165130615, "avg_line_length": 71.82691955566406, "blob_id": "f457440c317c883ce2850f98c384a72a97d51d71", "content_id": "eae991c7d9814ab49d84b6be57607e4a52906d1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3789, "license_type": "no_license", "max_line_length": 179, "num_lines": 52, "path": "/README.md", "repo_name": "usmansafdar28/kvdb", "src_encoding": "UTF-8", "text": "# kvdb\nKVDB's REST API testing using PYTHON\n\n# Python Modules Used:\n 1- json (Sending Json data in POST requests)\n 2- unittest (Testing Framework which gives the capability for compiling, asserting and executing the Test Suits)\n 3- urllib (This Module is being used for sending HTTP's POST, GET, UPDATE and DELETE REST reuests and receive responses)\n# Python Version used for Development:\n 3.7.1\n# How to RUN the Tests\n Open the \"kvdbApiTest\" file in python IDE and Press \"F5\"\n# Main Class:\n \"KvdbApiTesting\" is main class which extends the \"unittest.TestCase\" class\n - All the Testcases are written in the form of Methods which executes in alphabetical order.\n - If All the testcases Pass no error appears, result Appear as:\n - Ran XX tests in XXX seconds\n - \"OK\" appears at the bottom\n# Methods in the Class:\n Here are eight methods created to cover the API testing scope which executes in Alphabetical order. \n Each method starts with \"test\" keywword which is unittest module naming convention requirement for methods to be considered for auto execution:\n # 1- test_aa_createSimpleBucket(self):\n This function creates a Bucket, Asserion is applied on Bucket String length, if its is equal to 22 characters this case considered as pass\n # 2- test_bb_updateBucketSecretKey(self):\n This function perform three jobs:\n 1- update the Bucket with Secret key which then required to fetch the Bucket Keys.\n 2- Send the JSON Data (after encoding in UTF-8) in the POST request.\n 3- Assertion: After POST request, if HTTP status Code is 200 then this Testcase is pass\n # 3- test_dd_retrieveWithoutSecretKey(self):\n This Function verifies that Key/Values cannot be retrieved without providing Secret key.\n Assertion: If HTTP Status code is 404 then this Testcase is pass as URL cannot be found without providing secret key\n # 4- test_cc_getKeyValues(self):\n This Function verifies that Key/Values can be retrieved when Secret Key is provided.\n Assertion: is applied on Response Received of the GET request and on the Data which we POSTED for this Key\n\n # 5- test_ee_updateBucketSecretAndWriteKeys(self):\n This function perform following multiple jobs:\n 1- Updates the Bucket with Secret key which then required to fetch the Bucket Keys.\n 2- Updates the Bucket with Write key which then required to write in the Bucket.\n 3- Send the JSON Data (after encoding in UTF-8) in the POST request.\n 4- Assertion 1: After POST request, if HTTP status Code is 200 then this Testcase is pass\n 5- Assertion 2: \"test_cc_getKeyValues\" function is called to verify data is successfully posted and can be retrieved after updating the Keys.\n # 6- test_ff_updateBucketDefaultTtl(self):\n 1- This Function update the default_ttl value for the Bucket\n 2- Assertion 1: After POST request, if HTTP status Code is 200 then this Testcase is passed\n 3- Assertion 2: \"test_cc_getKeyValues\" function is called to verify data is successfully posted and can be retrieved after updating the default_ttl.\n # 7- test_gg_getKeysList(self):\n This functions lists the key/values in the bucket (Secret key is required to access the data)\n Assertion: is applied on Response Received of the GET request and on the Data which we POSTED for this Key\n # 8- test_hh_deleteBucket(self):\n Bucket is Deleted in this function \n Assertion: GET request is made to verify Bucket does not exists and 404 Status code is returned.\n then testcase is pass.\n\n\n" }, { "alpha_fraction": 0.6227935552597046, "alphanum_fraction": 0.6359682679176331, "avg_line_length": 47.1698112487793, "blob_id": "fabd22e11e36e887411ef2d38cdcc7efd9a37345", "content_id": "c21c403a2b08e4b6b419259a072b3ee990989a07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7818, "license_type": "no_license", "max_line_length": 214, "num_lines": 159, "path": "/kvdbApiTest.py", "repo_name": "usmansafdar28/kvdb", "src_encoding": "UTF-8", "text": "import json\r\nimport unittest\r\nimport urllib.request as urllib2\r\nimport urllib\r\n\r\nclass KvdbApiTesting(unittest.TestCase):\r\n\r\n \"\"\"\r\n All the Testcases written in form of Methods which executes in alphabetical order.\r\n if All the testcases Pass no error appears\r\n Ran XX tests in XXX seconds\r\n \"OK\" appears at the bottom\r\n \"\"\"\r\n\r\n ## These Class Static variabales which can be accessed throughout this test suit for accessing Global Values\r\n \r\n bucket=''\r\n values = [1, 2, 3]\r\n data = {}\r\n url = 'https://kvdb.io'\r\n keyName = 'mykey'\r\n secretKey = 'mysecret'\r\n writeKey = 'myknock'\r\n default_ttl = 3600\r\n testCaseCounter=0;\r\n \r\n def setUp(self):\r\n \r\n KvdbApiTesting.testCaseCounter = KvdbApiTesting.testCaseCounter+1\r\n print(\"TestCase #: \",KvdbApiTesting.testCaseCounter,\" Execution Started!\")\r\n \r\n \r\n def test_aa_createSimpleBucket(self):\r\n '''\r\n This function creates a Bucket, if Bucket String length is equal to 22 characters this case considered as pass\r\n '''\r\n req = urllib2.Request(KvdbApiTesting.url, KvdbApiTesting.data,{'Content-Type': 'application/json'})\r\n f = urllib2.urlopen(req) ## Creating a New Bcket\r\n for x in f:\r\n KvdbApiTesting.bucket=x.decode('utf-8')\r\n self.assertEqual(len(KvdbApiTesting.bucket),22) ### Verifiy Bucket is created as per specs (String Lenght is 22 characters)\r\n\r\n def test_bb_updateBucketSecretKey(self):\r\n '''\r\n This function perform two jobs:\r\n 1- update the Bucket with Secret key which then required to fetch the Bucket Keys.\r\n 2- Send th JSON Data (after encoding in UTF-8) in the POST request.\r\n 3- After POST request, if HTTP status Code is 200 then this Testcase is passed\r\n '''\r\n \r\n KvdbApiTesting.url = KvdbApiTesting.url + \"/\" + KvdbApiTesting.bucket + \"/KEY?key=\" + KvdbApiTesting.secretKey + \"/\" + KvdbApiTesting.keyName\r\n KvdbApiTesting.data = json.dumps(KvdbApiTesting.values).encode(\"utf-8\")\r\n req = urllib2.Request(KvdbApiTesting.url, KvdbApiTesting.data,{'Content-Type': 'application/json'})\r\n f = urllib2.urlopen(req)\r\n self.assertEqual(f.getcode(),200) ### Verify API response returns Success code 200\r\n\r\n def test_dd_retrieveWithoutSecretKey(self):\r\n '''\r\n This Function verifies that Key/Values cannot be retrieved without providing Secret key.\r\n If HTTP Status code is 404 then this Testcase is pass as URL cannot be found without secret key\r\n '''\r\n urlWithoutKey = 'https://kvdb.io/' +KvdbApiTesting.bucket+ \"/\" + KvdbApiTesting.keyName\r\n req = urllib2.Request(urlWithoutKey) ## GET Request\r\n try:\r\n f = urllib2.urlopen(req)\r\n for x1 in f:\r\n fetchedResponse=x1.decode('utf-8')\r\n except urllib.error.HTTPError as errh:\r\n print(\"Status Code--- retrieveWithoutSecretKey: \",errh.code)\r\n self.assertEqual(errh.code,404) \r\n \r\n def test_cc_getKeyValues(self):\r\n '''\r\n This Function verifies that Key/Values can be retrieved when Secret/Write Keys are provided.\r\n Assertion is applied on Response Received of the GET request and on the Data which we already POSTED for this Key\r\n '''\r\n fetchedResponse = ''\r\n req = urllib2.Request(KvdbApiTesting.url) ## GET Request\r\n f = urllib2.urlopen(req)\r\n for x1 in f:\r\n fetchedResponse1=x1.decode('utf-8')\r\n fetchedResponse=x1\r\n self.assertEqual(fetchedResponse, KvdbApiTesting.data) \r\n\r\n def test_ee_updateBucketSecretAndWriteKeys(self):\r\n '''\r\n This function perform three jobs:\r\n 1- Updates the Bucket with Secret key which then required to fetch the Bucket Keys.\r\n 2- Updates the Bucket with Write key which then required to fetch the Bucket Keys.\r\n 3- Send th JSON Data (after encoding in UTF-8) in the POST request.\r\n 4- \"test_cc_getKeyValues\" function is called to verify data is successfully posted and can be retrieved after updating the Keys.\r\n '''\r\n KvdbApiTesting.url = 'https://kvdb.io/' + KvdbApiTesting.bucket +\"/KEY?key=\" + KvdbApiTesting.secretKey + \"/\" + KvdbApiTesting.writeKey + \"/\" + KvdbApiTesting.keyName\r\n KvdbApiTesting.data = json.dumps(KvdbApiTesting.values).encode(\"utf-8\")\r\n req = urllib2.Request(KvdbApiTesting.url, KvdbApiTesting.data,{'Content-Type': 'application/json'})\r\n f = urllib2.urlopen(req)\r\n self.assertEqual(f.getcode(),200) ### Verify API response returns Success code\r\n ###\r\n ### Now Calling \"test_cc_retrieveWithKey\" function to verify that data can be retrived with updated keys\r\n ###\r\n KvdbApiTesting.test_cc_getKeyValues(self)\r\n \r\n def test_ff_updateBucketDefaultTtl(self):\r\n '''\r\n 1- This Function update the default_ttl value for the Bucket\r\n 2- After POST request, if HTTP status Code is 200 then this Testcase is passed\r\n 3- \"test_cc_getKeyValues\" function is called to verify data is successfully posted and can be retrieved after updating the Keys.\r\n '''\r\n \r\n KvdbApiTesting.url = 'https://kvdb.io/' + KvdbApiTesting.bucket +\"/KEY?key=\" + KvdbApiTesting.secretKey + \"/\" + KvdbApiTesting.writeKey + \"/\" + str(KvdbApiTesting.default_ttl) + \"/\" + KvdbApiTesting.keyName\r\n KvdbApiTesting.data = json.dumps(KvdbApiTesting.values).encode(\"utf-8\")\r\n req = urllib2.Request(KvdbApiTesting.url, KvdbApiTesting.data,{'Content-Type': 'application/json'})\r\n f = urllib2.urlopen(req)\r\n self.assertEqual(f.getcode(),200) ### Verify API response returns Success code\r\n ###\r\n ### Now Calling \"test_cc_retrieveWithKey\" function to verify that data can be retrived with updated keys\r\n ###\r\n KvdbApiTesting.test_cc_getKeyValues(self)\r\n \r\n def test_gg_getKeysList(self):\r\n '''\r\n This functions lists the key/values in the bucket (Secret key is required to access the data)\r\n Assertion: is applied on Response Received of the GET request and on the Data which we POSTED for this Key\r\n '''\r\n fetchedResponse = ''\r\n listKeysUrl = 'https://kvdb.io/' + KvdbApiTesting.bucket +\"/KEY?key=\" + KvdbApiTesting.secretKey +\"/?values=true&format=json\"\r\n req = urllib2.Request(listKeysUrl) ## GET Request for Listing All Keys\r\n f = urllib2.urlopen(req)\r\n for x1 in f:\r\n fetchedResponse1=x1.decode('utf-8')\r\n self.assertEqual(fetchedResponse, KvdbApiTesting.data)\r\n \r\n def test_hh_deleteBucket(self):\r\n '''\r\n Bucket is Deleted in this function and Then GET request is made to verify it, if 404 Status code is found\r\n then testcase is pass.\r\n '''\r\n fetchedResponse = ''\r\n req = urllib2.Request(KvdbApiTesting.url) ## GET Request for Listing All Keys\r\n req.get_method = lambda: 'DELETE' ## Delete Method\r\n f = urllib2.urlopen(req)\r\n for x1 in f:\r\n fetchedResponse1=x1.decode('utf-8')\r\n fetchedResponse=x1\r\n\r\n req = urllib2.Request(KvdbApiTesting.url) ## GET Request After Deleting Bucket\r\n try:\r\n f = urllib2.urlopen(req)\r\n for x1 in f:\r\n fetchedResponse=x1.decode('utf-8')\r\n except urllib.error.HTTPError as errh:\r\n print(\"Status Code--- deleteBucket: \",errh.code)\r\n self.assertEqual(errh.code,404) ### Verify API response returns Status code 404 (Not Found)\r\n \r\n def tearDown(self):\r\n print(\"TestCase#: \", KvdbApiTesting.testCaseCounter,\" Executed\")\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n" } ]
2
rickyzhangca/temp-gatsby-portfolio
https://github.com/rickyzhangca/temp-gatsby-portfolio
d374a693b0d8bfbc5a3e47bedea57468c2ca8ce8
8cae5d25782d997b33ff32de87c153421f3711b9
e31187f17c9dfcbb604f524a876cc7a8b0bbbe2c
refs/heads/main
2022-12-31T14:02:35.223225
2020-10-16T17:05:11
2020-10-16T17:05:11
304,673,437
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4892984628677368, "alphanum_fraction": 0.49167656898498535, "avg_line_length": 29.035715103149414, "blob_id": "d52255031a717c777361644d2aac270a447cc42c", "content_id": "d2a2aa55e5de3126e47f0a205889b4ccb9a53cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1682, "license_type": "no_license", "max_line_length": 85, "num_lines": 56, "path": "/src/components/Navbar.js", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "import React, { useState } from 'react'\nimport { Link, withPrefix } from 'gatsby'\n\nexport const Navbar = React.memo(() => {\n const [isBurgerActive, setBurgerActive] = useState(false)\n\n return (\n <nav className='navbar' role='navigation' aria-label='main navigation'>\n <div className='container'>\n <div className='navbar-brand'>\n <a className='navbar-item' href='https://www.rickyzhang.me' title='Ricky ZHang'>\n <img\n src={withPrefix('/img/favicon/favicon.png')}\n width='28'\n height='28'\n />\n </a>\n\n <a\n role='button'\n className={`navbar-burger burger${\n isBurgerActive ? ' is-active' : ''\n }`}\n aria-label='menu'\n aria-expanded='false'\n onClick={() => setBurgerActive(prevIsActive => !prevIsActive)}\n >\n <span aria-hidden='true' />\n <span aria-hidden='true' />\n <span aria-hidden='true' />\n </a>\n </div>\n\n <div className={`navbar-menu${isBurgerActive ? ' is-active' : ''}`}>\n <div className='navbar-start has-text-centered'>\n <Link className='navbar-item' to='/'>\n Home\n </Link>\n <Link className='navbar-item' to='/archives'>\n Projects\n </Link>\n <Link className='navbar-item' to='/about'>\n About\n </Link>\n </div>\n <div className='navbar-end'>\n <div className='is-flex is-justified-center'>\n </div>\n </div>\n </div>\n </div>\n </nav>\n )\n})\n\nexport default Navbar\n" }, { "alpha_fraction": 0.7592592835426331, "alphanum_fraction": 0.7592592835426331, "avg_line_length": 52, "blob_id": "20efa62a34e2f5753a0dc7f0d60a44328defa887", "content_id": "2ad2e64ce9421734ef1978adbc5c548b4588c59c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 162, "license_type": "no_license", "max_line_length": 95, "num_lines": 3, "path": "/README.md", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "Uploaded for demonstration purpose. The live site is at rickyzhang.me. Please ignore this repo.\r\n\r\nTo build the project, use `npm install` and then `npm start`.\r\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6894736886024475, "avg_line_length": 11.699999809265137, "blob_id": "b8ed40221f9d5ccb5f0686a3b0ba2eb2da4a47a4", "content_id": "3ad1fd4495d158466429bf04b568100c5a1f0a6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 380, "license_type": "no_license", "max_line_length": 59, "num_lines": 30, "path": "/src/pages/blog/2019-04-29-Post5.md", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "---\nlayout: blog-post\ndraft: true\ndate: 2019-05-01T00:00:00.005Z\ntitle: Project 5\ndescription: Project Description\nquote:\n author: Author\n content: >-\n Quote Content\n source: ''\ntags:\n - Project 5\n---\n\n# h1\n\nText Text Text Text Text Text\n\n## h2\n\nText Text Text Text Text Text Text Text Text\n\n### h3\n\nText Text Text Text Text Text Text Text Text Text Text Text\n\n<br>\n\n`code`" }, { "alpha_fraction": 0.6301020383834839, "alphanum_fraction": 0.6862244606018066, "avg_line_length": 11.677419662475586, "blob_id": "9c046794b46f15bd576bd7181c79cf0c027cb603", "content_id": "153035c037b6521800b75307b5c99d4bfa227166", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 392, "license_type": "no_license", "max_line_length": 59, "num_lines": 31, "path": "/src/pages/blog/2019-04-26-Post2.md", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "---\nlayout: blog-post\ndraft: false\ndate: 2019-02-01T00:00:00.002Z\ntitle: Project 2\ndescription: Project Description\nquote:\n author: Author\n content: >-\n Quote Content\n source: ''\ntags:\n - Project 2\n - dwdwdw\n---\n\n# h1\n\nText Text Text Text Text Text\n\n## h2\n\nText Text Text Text Text Text Text Text Text\n\n### h3\n\nText Text Text Text Text Text Text Text Text Text Text Text\n\n<br>\n\n`code`" }, { "alpha_fraction": 0.6265356540679932, "alphanum_fraction": 0.6805896759033203, "avg_line_length": 11.75, "blob_id": "9b2c977eaf7797e4972e21a1c333bc8abecea7a5", "content_id": "46b11382cfd03dd2afba6bd6e865624a31710c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 411, "license_type": "no_license", "max_line_length": 59, "num_lines": 32, "path": "/src/pages/blog/2019-04-22-Post1.md", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "---\nlayout: blog-post\ndraft: false\ndate: 2019-01-01T00:00:00.001Z\ntitle: Project 1\ndescription: Project Description\nquote:\n author: Author\n content: >-\n “Quote Content”\n source: ''\ntags:\n - Project 1\n - dwdwd\n - Tadwdwddw\n---\n\n# h1\n\nText Text Text Text Text Text\n\n## h2\n\nText Text Text Text Text Text Text Text Text\n\n### h3\n\nText Text Text Text Text Text Text Text Text Text Text Text\n\n<br>\n\n`code`" }, { "alpha_fraction": 0.5592972040176392, "alphanum_fraction": 0.5907759666442871, "avg_line_length": 28.673913955688477, "blob_id": "8a2f74b4e646e2dbfa0aeb360b0650b292fbf9cc", "content_id": "f5d15d8297f5bed4ffb528bb000dd0052c28e566", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 63, "num_lines": 46, "path": "/static/img/post/font-cover-catch-in-python/front_catch.py", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os,sys,re,string\nimport json,urllib2,cookielib\nimport eyed3\n\ndef mp3_find(source_path):\n\tmp3_list = []\n\tfor root, dirs, files in os.walk(source_path):\n\t\tmap(lambda x: mp3_list.append((root, x[:-4])),\n\t\t\tfilter(lambda x: x[-4:] == '.mp3', files))\n\treturn mp3_list\n\ndef mp3_load(mp3_path, mp3_name):\n\ttry:\n\t\tid3 = eyed3.load(os.path.join(mp3_path, mp3_name+'.mp3'))\n\t\treturn id3\n\texcept IOError:\n\t\tprint ' '.join([mp3_name,'read error.'])\n\t\tsys.exit(1)\n\ndef mp3_burn(id3, img):\n id3.tag.images.set(type=3,\n img_url=None,\n img_data=urllib2.urlopen(img).read(),\n mime_type='image/jpeg',\n description=u\"Front cover\")\n id3.tag.save(version = (2, 3, 0), encoding = 'latin1')\n\ndef mp3_process(mp3_list):\n\tfaild_list = []\n\tfor path, name in mp3_list:\n\t\tprint 'Writing %s...' %name\n\t\tid3 = mp3_load(path, name)\n\t\timg = search(' '.join(map(str,filter(lambda x: x,\n\t\t\t[id3.tag.title,id3.tag.artist,id3.tag.album]))))\n\t\tif not img: img = search(name)\n\t\tif img: mp3_burn(id3, img)\n\t\telse: faild_list.append(name)\n\treturn faild_list\n\nif __name__ == '__main__':\n\tfaild_list = mp3_process(mp3_find(r'test'))\n\tif faild_list: \n\t\tprint str(len(faild_list)) + ' file(s) faild.'\n\t\tfor i in faild_list: print '-- ' + i\n\telse: print 'Success!'\n\t" }, { "alpha_fraction": 0.5545023679733276, "alphanum_fraction": 0.649289071559906, "avg_line_length": 11.470588684082031, "blob_id": "ab467d382c902569922540c461e488f283d4277c", "content_id": "a770029a362b9db3288c55ec501f079606acf3b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 211, "license_type": "no_license", "max_line_length": 30, "num_lines": 17, "path": "/src/pages/about/index.md", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "---\nlayout: 'about-page'\npath: /about\ndate: 2019-04-13T12:00:00.000Z\ntitle: About Page\n---\n\nThis is the about page \n\n<style>\nsvg.social {\n width: 1em;\n height: 1em;\n position: relative;\n top: 2px;\n}\n</style>" }, { "alpha_fraction": 0.48621830344200134, "alphanum_fraction": 0.4906284511089325, "avg_line_length": 20.11627960205078, "blob_id": "9ce4b2ff374322da205fa872ac6979e5c2fbf9e2", "content_id": "f41b3c3307f3f061ce689ca492902d8d73efeabd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 908, "license_type": "no_license", "max_line_length": 60, "num_lines": 43, "path": "/src/components/Footer.js", "repo_name": "rickyzhangca/temp-gatsby-portfolio", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport { graphql, withPrefix, useStaticQuery } from 'gatsby'\n\nconst Footer = React.memo(() => {\n const data = useStaticQuery(graphql`\n query FooterQuery {\n site {\n siteMetadata {\n social {\n github {\n url\n }\n }\n }\n }\n }\n `)\n const { social } = data.site.siteMetadata\n\n return (\n <footer className='site-footer'>\n <div className='site-description'>\n <a\n className='has-text-info'\n href={social.github.url}\n target='_blank'\n rel='nofollow noreferrer noopener'\n >\n Github\n </a>\n {' | '}\n <a className='has-text-info' href={withPrefix('/')}>\n Portfolio\n </a>\n </div>\n <div className='copyright'>\n ©2019 Ricky Zhang \n </div>\n </footer>\n )\n})\n\nexport default Footer" } ]
8
yongzhengqi/EMERITUS
https://github.com/yongzhengqi/EMERITUS
e9fe8b1787ec6ec4e26b7fe7c86b050fd5404331
13fe68e42b2e8b971069ed0be3346b51a242f6f0
3e9841e2bd0f9301c1b9ac1ad20b558caccb6fea
refs/heads/master
2020-04-04T16:20:26.613174
2018-11-18T02:22:15
2018-11-18T02:22:15
156,073,553
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5954979658126831, "alphanum_fraction": 0.5975443124771118, "avg_line_length": 40.88571548461914, "blob_id": "dd646003d1ed09d7b147252e0bae057e28055ad9", "content_id": "84caeaf0ad3c03caa98270f4cbe43c0c3cd7977c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2932, "license_type": "no_license", "max_line_length": 119, "num_lines": 70, "path": "/train.py", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport torch\nimport torch.utils.data as Data\nfrom tensorboardX import SummaryWriter\nfrom time import localtime, strftime\n\nfrom dataset import DataProvider\nfrom utils import *\nfrom model import Net\n\nif __name__ == '__main__':\n print(\"basic settings:\\ninput file name: {}\\nwindow size: {}\\ndimensionality: {}\".format(config.input_filename,\n config.window,\n config.dim))\n\n # initialize tensorboard\n tb_log_dir = 'logs/' + strftime(\"%Y-%m-%d-%H:%M:%S\", localtime())\n tb_writer = SummaryWriter(tb_log_dir)\n\n # initialize dataset\n data_provider = DataProvider(config.input_filename)\n data_loader = data_provider.get_training_set(config.dataset_size)\n loader_itr = iter(data_loader)\n\n # initialize model\n net = Net(data_provider.get_voc_size(), config.dim)\n net = net.cuda()\n net_multi_gpu = nn.DataParallel(net)\n gpu_num = torch.cuda.device_count()\n\n # specifying optimizing method\n criterion = nn.MSELoss()\n optimizer = optim.Adam(net_multi_gpu.parameters())\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=config.lr_adj_pat, min_lr=config.lr_min)\n\n # training\n for cur_epoch in tqdm(range(config.max_epoch), desc='training on {} GPUs...'.format(gpu_num)):\n try:\n mini_batch = next(loader_itr)\n except StopIteration:\n loader_itr = iter(data_loader)\n mini_batch = next(loader_itr)\n\n batched_x, batched_y = mini_batch\n batched_x, batched_y = batched_x.cuda(), batched_y.cuda()\n optimizer.zero_grad()\n output = net_multi_gpu(batched_x)\n loss = criterion(output, batched_y)\n loss.backward()\n optimizer.step()\n\n if (cur_epoch % config.tb_upd_gap) == 0:\n loss_var = loss.data.cpu().numpy()\n print('training loss: {}'.format(loss_var))\n tb_writer.add_scalar('training loss', loss_var, cur_epoch)\n if (cur_epoch % config.ckpt_save_gap) == 0:\n print('saving check point...')\n embed_vec = net_multi_gpu.module.fe.weight.detach().cpu().numpy()\n save_model(data_provider.get_voc(), embed_vec, './results/{}-epoch.ckpt'.format(cur_epoch))\n if (cur_epoch % config.latest_upd_gap) == 0:\n print('updating latest model...')\n embed_vec = net_multi_gpu.module.fe.weight.detach().cpu().numpy()\n save_features(embed_vec, cur_epoch, tb_log_dir, data_provider.word2idx)\n save_model(data_provider.get_voc(), embed_vec, config.latest_ckpt_dir)\n cur_epoch += 1\n scheduler.step(loss)\n\n embed_vec = net_multi_gpu.module.fe.weight.detach().cpu().numpy()\n save_model(data_provider.get_voc(), embed_vec, config.output_filename)\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 13.05555534362793, "blob_id": "d77d41297130da18e4d54a9787b0221cc43df652", "content_id": "d76a4563a4004a16355a03b804b496a659ea6910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 252, "license_type": "no_license", "max_line_length": 88, "num_lines": 18, "path": "/Makefile", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "LOG_DIR = ./logs\nDATA_DIR = ./data\nDATA_URL = https://ml.qizy.tech/wp-content/uploads/2018/11/quora_questions_gbk_fixed.txt\n\nall:\n\n\ntrain:\n\t./train.py\n\ntb:\n\ttensorboard --logdir $(LOG_DIR)\n\neval:\n\t./tester.py\n\nget data:\n\twget -P $(DATA_DIR) $(DATA_URL)" }, { "alpha_fraction": 0.762499988079071, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 25.66666603088379, "blob_id": "194c4572f9b1e2a5821c9005aad74c3fe66f4327", "content_id": "d45b6a97a749506867992f3545cb7061cb7e2265", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 80, "license_type": "no_license", "max_line_length": 58, "num_lines": 3, "path": "/evaluation/ITC/eval.sh", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\npython3 eval_wordsim.py word_list.txt matrix wordsim_quora\n" }, { "alpha_fraction": 0.6206896305084229, "alphanum_fraction": 0.6255012154579163, "avg_line_length": 30.174999237060547, "blob_id": "d718014adfbc5d23f0eb764882d7f81fb77b3ef7", "content_id": "92796d913e9137423f39a0883dcf59e965024459", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1247, "license_type": "no_license", "max_line_length": 80, "num_lines": 40, "path": "/utils.py", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "import torch.nn as nn\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom common import config\nimport json\n\n\ndef save_model(words, features, filename):\n output_file = open(filename, 'w', encoding='utf-8')\n\n for word, feature in zip(words, features):\n output_file.write('{} '.format(word))\n\n for num in feature:\n output_file.write('{} '.format(num))\n output_file.write('\\n')\n\n output_file.close()\n print('result saved to {}'.format(filename))\n\n\ndef save_features(feats, cur_epoch, tb_log_dir, word2idx):\n word_list_file_name = 'evaluation/ITC/word_list.txt'\n word_list = []\n for idx, line in enumerate(open(word_list_file_name, 'r')):\n word_list.append(line.strip())\n\n feat_lst = []\n for idx, word in enumerate(word_list):\n if word in word2idx.keys():\n feat_lst.append(feats[word2idx[word]])\n else:\n print('word \\'{}\\' not found...'.format(word))\n feat_lst.append(np.random.randint(low=-1, high=1, size=config.dim))\n feat_lst = [feat.tolist() for feat in feat_lst]\n\n check_point = {'feats': feat_lst, 'epoch': cur_epoch, 'log_dir': tb_log_dir}\n json.dump(check_point, open(config.valida_ckpt_dir, 'w'))\n" }, { "alpha_fraction": 0.6364124417304993, "alphanum_fraction": 0.6729323267936707, "avg_line_length": 27.212121963500977, "blob_id": "b38877d30e994b0d243c2064a66e1882edec10e4", "content_id": "da284460b10453149db30ed9a3d99d2fc9d29e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1862, "license_type": "no_license", "max_line_length": 103, "num_lines": 66, "path": "/common.py", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "class Config:\n # the size of windows of skip-gram\n window = 3\n\n # the number of dimensions of features\n dim = 300\n\n # where you saved your corpus\n input_filename = './data/quora_questions_gbk_fixed.txt'\n\n # where you want to save the representation of your words\n output_filename = './results/output.window={}.dim={}'.format(window, dim)\n\n # if a word appears less than word_min_cnt times, it will be replaced\n word_min_cnt = 30\n\n # the max number of sentence used for training\n # set to None if you want to ignore this limit\n dataset_size = None\n\n # batch size of SGD\n batch_size = 2048\n\n # parameter in Negative sampling\n # see more at https://arxiv.org/abs/1301.3781\n ng_pow = 0.75\n\n # parameter in Negative sampling\n # see more at https://arxiv.org/abs/1301.3781\n ng_table_sz = 100000000\n\n # parameter in Negative sampling\n # see more at https://arxiv.org/abs/1301.3781\n ng_k = 5\n\n # if to lazy load the training set\n saved_training_set = None # 'data/training_set.json'\n\n # run how many mini-batches between two updates on tensorboard\n tb_upd_gap = 500\n\n # run how many mini-batches between updates on saved models\n latest_upd_gap = 5000\n\n # the gap between check points\n ckpt_save_gap = 5000\n\n # max mini-batch to train\n max_epoch = 300000\n\n # where to save check latest models\n latest_ckpt_dir = './results/latest'\n\n # where to save file for testing on validation set\n valida_ckpt_dir = './results/latest.json'\n\n # hyper-parameter on optimizing\n # see more at https://pytorch.org/docs/stable/optim.html#torch.optim.lr_scheduler.ReduceLROnPlateau\n lr_adj_pat = 1e4\n\n # min learning rate\n # see more at https://pytorch.org/docs/stable/optim.html#torch.optim.lr_scheduler.ReduceLROnPlateau\n lr_min = 1e-5\n\n\nconfig = Config()\n" }, { "alpha_fraction": 0.5979255437850952, "alphanum_fraction": 0.6116534471511841, "avg_line_length": 28.258928298950195, "blob_id": "129a9cfc9fa7035eb3dd5a07d63ecab712a5f911", "content_id": "68dc629dad0904d257106e2d1d40201110ee526d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3278, "license_type": "no_license", "max_line_length": 103, "num_lines": 112, "path": "/tester.py", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import print_function\n\nword_num = 3000\n\nimport numpy as np\nfrom common import config\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport json\nimport time\n\n\ndef read_word_list(fin):\n word_list = []\n word2idx = {}\n for idx, line in enumerate(fin):\n word_list.append(line.strip())\n word2idx[line.strip()] = idx\n fin.close()\n return word_list, word2idx\n\n\ndef read_gold_standard(fin, word2idx):\n gold_standard = []\n for line in fin:\n word1, word2, sim = line.strip().split()\n if word1 not in word2idx or word2 not in word2idx:\n continue\n gold_standard.append((word2idx[word1], word2idx[word2], float(sim)))\n fin.close()\n return gold_standard\n\n\ndef eval_ITC(gold_standard, matrix):\n rs = 0\n my_similarity = []\n for wid1, wid2, _ in gold_standard:\n my_similarity.append(matrix[wid1][wid2])\n n = len(my_similarity)\n\n my_similarity_rank = {item[1]: item[0] for item in\n enumerate(sorted(range(len(my_similarity)), key=lambda k: my_similarity[k]))}\n gold_similarity_rank = sorted(enumerate(gold_standard), key=lambda x: x[1][2])\n for rkg in range(len(gold_similarity_rank)):\n pair_id = gold_similarity_rank[rkg][0]\n rkm = my_similarity_rank[pair_id]\n rs += (rkg - rkm) ** 2\n rs = 1 - 6 * (rs) / n / (n * n - 1)\n return rs\n\n\ndef get_norm(a):\n return (a ** 2).sum() ** 0.5\n\n\ndef similarity(vec_a, vec_b):\n vec_b, vec_a = np.array(vec_a), np.array(vec_b)\n dot = np.dot(vec_a, vec_b)\n cos_dis = dot / get_norm(vec_a) / get_norm(vec_b)\n return cos_dis\n\n\ndef get_matrix(feat_lst):\n dis_matrix = []\n for i in tqdm(range(word_num), desc='creating distance matrix'):\n dis_matrix.append([])\n for j in range(word_num):\n dis = similarity(feat_lst[i], feat_lst[j])\n dis_matrix[-1].append(dis)\n\n return dis_matrix\n\n\nif __name__ == \"__main__\":\n word_list_file_name = 'evaluation/ITC/word_list.txt'\n gold_standard_file_name = 'evaluation/ITC/wordsim_quora'\n\n word_list, word2idx = read_word_list(open(word_list_file_name))\n gold_standard = read_gold_standard(open(gold_standard_file_name), word2idx)\n\n last_epoch = -1\n\n while last_epoch < config.max_epoch - 1:\n data_pack = json.load(open(config.valida_ckpt_dir, 'r'))\n feats = data_pack['feats']\n epoch = data_pack['epoch']\n log_dir = data_pack['log_dir']\n\n tb_writer = SummaryWriter(log_dir)\n\n if epoch == last_epoch:\n print('latest model is the same, sleep for 30s')\n time.sleep(30)\n continue\n\n avg_norm = np.array([get_norm(np.array(feat)) for feat in feats]).mean()\n print('features\\' average norm: {}'.format(avg_norm))\n tb_writer.add_scalar('average norm', avg_norm, epoch)\n\n last_epoch = epoch\n\n print('evaluating epoch {}...'.format(epoch))\n\n matrix = get_matrix(feats)\n validation_var = eval_ITC(gold_standard, matrix)\n tb_writer.add_scalar('validation score', validation_var, epoch)\n\n print('evaluation done, score = {}'.format(validation_var))\n\n" }, { "alpha_fraction": 0.5193701982498169, "alphanum_fraction": 0.5266211032867432, "avg_line_length": 35.29323196411133, "blob_id": "1c2f8fd5a55a3eeb104cdd8c3c28d061db9a5161", "content_id": "6fa7cc68c54345fab259fc5fcd4d8ca54e263ba9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4827, "license_type": "no_license", "max_line_length": 108, "num_lines": 133, "path": "/dataset.py", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "from tqdm import tqdm\nimport torch\nimport math\nimport numpy as np\nimport json\nimport torch.utils.data as Data\n\nfrom common import config\n\n\nclass DataProvider:\n class NegativeTable:\n def __init__(self, vocab, word2idx):\n prob_sum = 0.0\n for word, cnt in vocab.items():\n if cnt >= config.word_min_cnt:\n prob_sum += math.pow(cnt, config.ng_pow)\n\n neg_table = []\n for word, cnt in tqdm(vocab.items(), desc='Initializing Negative Table'):\n if cnt >= config.word_min_cnt:\n ins = math.pow(cnt, config.ng_pow) / prob_sum * config.ng_table_sz\n id = word2idx[word]\n for i in range(int(ins)):\n neg_table.append(id)\n\n while len(neg_table) < config.ng_table_sz:\n neg_table.append(0)\n\n self.neg_table = neg_table\n\n def sample(self, x):\n idxs = np.random.randint(low=0, high=config.ng_table_sz, size=x)\n return [self.neg_table[idx] for idx in idxs]\n\n def __init__(self, input_file):\n self.input_file = input_file\n text_file = open(input_file, 'r', encoding='utf-8')\n self.input_file_sz = 0\n\n vocab = {}\n\n for idx, line in tqdm(enumerate(text_file), desc='Reading corpus'):\n self.input_file_sz += 1\n line_words = line.split()\n for word in line_words:\n if self.is_word(word):\n if word not in vocab.keys():\n vocab[word] = 0\n vocab[word] = vocab[word] + 1\n\n vocab_lst = []\n for word, word_cnt in vocab.items():\n if word_cnt >= config.word_min_cnt:\n vocab_lst.append(word)\n print('{} words valid'.format(len(vocab_lst)))\n\n word2idx = {}\n for idx, word in enumerate(vocab_lst):\n word2idx[word] = idx\n\n for word, word_cnt in vocab.items():\n if word_cnt < config.word_min_cnt:\n word2idx[word] = -1\n\n self.word2idx = word2idx\n self.vocab = vocab_lst\n self.ntable = self.NegativeTable(vocab, word2idx)\n\n def get_training_set(self, set_size):\n training_set = []\n\n if config.saved_training_set is not None:\n print('loading saved training set: {}'.format(config.saved_training_set))\n training_set = json.load(open(config.saved_training_set, 'r', encoding='utf-8'))\n print('using saved training set: {}'.format(config.saved_training_set))\n else:\n text_file = open(self.input_file, 'r', encoding='utf-8')\n\n for idx, line in tqdm(enumerate(text_file), desc='preparing dataset', total=self.input_file_sz):\n line_words = line.split()\n line_words = [self.word2idx[word] if self.is_word(word) else -1 for word in line_words]\n for idx, word in enumerate(line_words):\n anchor = line_words[idx]\n if anchor >= 0:\n negative_samples = self.ntable.sample(config.ng_k * config.window)\n for negative_sample in negative_samples:\n training_set.append([[anchor, negative_sample], 0])\n\n beg = max(0, idx - config.window)\n end = min(len(line_words) - 1, idx + config.window) + 1\n for pos_idx in range(beg, end):\n positive = line_words[pos_idx]\n if pos_idx != idx and positive >= 0:\n training_set.append([[anchor, positive], 1])\n\n if set_size is not None and len(training_set) > set_size:\n break\n\n # json.dump(training_set, open('./data/training_set.json', 'w', encoding='utf-8'))\n\n if set_size is not None:\n training_set = training_set[:set_size]\n\n print('{} pairs ready...'.format(len(training_set)))\n x = torch.LongTensor([pair[0] for pair in training_set])\n y = torch.Tensor([pair[1] for pair in training_set])\n dataset_combined = torch.utils.data.TensorDataset(x, y)\n\n gpu_num = torch.cuda.device_count()\n\n dataset_dataloader = Data.DataLoader(\n dataset=dataset_combined,\n batch_size=config.batch_size, # * gpu_num,\n shuffle=True,\n num_workers=1,\n )\n\n print('DataLoader ready...')\n\n return dataset_dataloader\n\n def get_voc(self):\n return self.vocab\n\n def get_voc_size(self):\n return len(self.vocab)\n\n def is_word(self, _word):\n for ch in _word:\n if (ch < '0' or ch > '9') and (ch < 'a' or ch > 'z'):\n return False\n return True\n" }, { "alpha_fraction": 0.6776556968688965, "alphanum_fraction": 0.7124541997909546, "avg_line_length": 23.840909957885742, "blob_id": "c082d74733f2dcdcd1bed852c40b6d6f772992fb", "content_id": "3bc41faf0627600c20ebeaba0aa4833aacc0d3c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1092, "license_type": "no_license", "max_line_length": 154, "num_lines": 44, "path": "/README.md", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "# EMERITUS: MID-TERM HOMEWORK OF INTRODUCTION TO COMPUTER SCIENCE\n\n## 0. Report\n\nPlease visit [https://ml.qizy.tech/wp-content/uploads/2018/11/EMERITUS.pdf](https://ml.qizy.tech/wp-content/uploads/2018/11/EMERITUS.pdf) for full report.\n![https://ml.qizy.tech/wp-content/uploads/2018/11/EMERITUS_preview.png](https://ml.qizy.tech/wp-content/uploads/2018/11/EMERITUS_preview.png)\n\n## 1. TL;DR.\n\n### 1.0 Before training\nView `common.py` and make sure the basic setting of this project fits to your wish.\n\n### 1.1 Get training data\n```bash\nmake get data\n```\n\n### 1.2 Start training\n```bash\nmake train\n```\n\n### 1.3 Start tensorboard\n```bash\nmake tb\n```\n\n### 1.4 Testing your models\n```bash\nmake eval\n``` \n\n## 2. Structure of this project\n\n* Hyper-parameters are stored in `common.py`.\n* The model itself is defined in `model.py`.\n* Modules to construct training data are in `daatset.py`.\n* The evaluation module is define in `tester.py`.\n* The training function is stored in `train.py`.\n* Other functions are store in `utils.py`.\n\n\n## 3. Further Questions\nKindly contact [Yong Zhengqi](mailto:[email protected])." }, { "alpha_fraction": 0.5136541128158569, "alphanum_fraction": 0.5305591821670532, "avg_line_length": 26.464284896850586, "blob_id": "09f52b3030224abdd3a523b70c52d58201fc096b", "content_id": "ff5c72f3cdc8d1890b5ad152e5dadf58af136250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 769, "license_type": "no_license", "max_line_length": 91, "num_lines": 28, "path": "/model.py", "repo_name": "yongzhengqi/EMERITUS", "src_encoding": "UTF-8", "text": "import torch.nn as nn\nimport torch\nimport numpy as np\n\n\nclass Net(nn.Module):\n def __init__(self, vocab_sz, dim):\n super(Net, self).__init__()\n self.fe = nn.Embedding(vocab_sz, dim)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, xs):\n fe_a = self.fe(xs[:, 0])\n fe_b = self.fe(xs[:, 1])\n\n dis_dot = self.batch_dot(fe_a, fe_b).view(xs.size()[0])\n dis_cos = dis_dot / self.norm(fe_a) / self.norm(fe_b)\n dis = (self.sigmoid(dis_cos) + 1) / 2\n\n return dis\n\n def norm(self, a):\n return (a ** 2).sum(dim=1) ** 0.5\n\n def batch_dot(self, a, b):\n batch_sz = a.size()[0]\n dim = a.size()[1]\n return torch.bmm(a.view(batch_sz, 1, dim), b.view(batch_sz, dim, 1)).view(batch_sz)\n" } ]
9
ncuriale/basic-neural-net
https://github.com/ncuriale/basic-neural-net
c9673f7722a16d4f0d8f54c8889c60caa4af702f
a9aaadc7398865ca537e4264370e794e48d94e3f
767f2cf77e32ab49afc5f9a3d374f38df599cbae
refs/heads/master
2021-01-19T01:48:35.061242
2017-04-05T02:01:44
2017-04-05T02:01:44
87,254,552
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 27, "blob_id": "306b0c3759fc81ce4d0cbe2799cf2b6379e23755", "content_id": "838e410afe9a66c889639d144adad16d160dde78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 56, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/README.md", "repo_name": "ncuriale/basic-neural-net", "src_encoding": "UTF-8", "text": "# basic-neural-net\nTesting of neural network generation\n" }, { "alpha_fraction": 0.5020242929458618, "alphanum_fraction": 0.5870445370674133, "avg_line_length": 23.579999923706055, "blob_id": "2d7fd8f2dd513c3da155d2a78e4d6d7b9cd93a97", "content_id": "6b8b7f700b1c4be74896ade30d7cb955448caab8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1235, "license_type": "no_license", "max_line_length": 74, "num_lines": 50, "path": "/basicneuralnet.py", "repo_name": "ncuriale/basic-neural-net", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef nonlin(x,deriv=0):\n if(deriv==1):\n return x*(1-x)\n \n return 1/(1+np.exp(-x))\n\n\nX = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1] ])\ny = np.array([[0,1,1,0]]).T\ninp=len(X[0])\nh1=100\nh2=70\noutput=1\n\nnp.random.seed(1)\n# randomly initialize our weights with mean 0\nsyn0 = 2*np.random.random((inp,h1)) - 1\nsyn1 = 2*np.random.random((h1,h2)) - 1\nsyn2 = 2*np.random.random((h2,output)) - 1\n\nfor j in range(100000):\n # Feed forward through layers 0, 1, and 2\n l0 = X\n l1 = nonlin(np.dot(l0,syn0))\n l2 = nonlin(np.dot(l1,syn1))\n l3 = nonlin(np.dot(l2,syn2))\n \n # how much did we miss the target value?\n l3_error = y - l3\n \n if (j% 1000) == 0:\n print (\"Error:\" + str(np.mean(np.abs(l3_error))))\n \n #back-propagation through layers\n l3_delta = l3_error*nonlin(l3,deriv=1)\n l2_error = l3_delta.dot(syn2.T) \n l2_delta = l2_error*nonlin(l2,deriv=1)\n l1_error = l2_delta.dot(syn1.T)\n l1_delta = l1_error * nonlin(l1,deriv=1)\n\n syn2 += l2.T.dot(l3_delta)\n syn1 += l1.T.dot(l2_delta)\n syn0 += l0.T.dot(l1_delta)\n\n#test neural network\ntestX=[1, 0, 0]\nres= nonlin(np.dot(nonlin(np.dot( nonlin(np.dot(testX,syn0)),syn1)),syn2))\nprint (res)\n\n\n\n\n\n\n" } ]
2
S1538745720/hello_python
https://github.com/S1538745720/hello_python
19ec798519acebb42561c5722be1addd43a5a543
3198e158ed62f129760f0a7243c1e9da83777c4e
9d6104234ed59dd411dd627ce5c57e16ad477c24
refs/heads/master
2020-08-16T20:55:46.195083
2019-10-16T13:14:45
2019-10-16T13:14:45
215,540,806
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47623127698898315, "alphanum_fraction": 0.5019271969795227, "avg_line_length": 26.654321670532227, "blob_id": "331231463984c434ed6dde6f76017cd2cc6cdda6", "content_id": "03370cc4cb23d0da8a6f24d790a9e971670a6c96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2461, "license_type": "no_license", "max_line_length": 67, "num_lines": 81, "path": "/身份证验证系统/CheckIDC.py", "repo_name": "S1538745720/hello_python", "src_encoding": "UTF-8", "text": "import datetime,time\r\nimport tkinter.messagebox as messbox\r\n\r\nclass CheckIDC:\r\n def __init__(self,id_number):\r\n self.id_number=id_number\r\n # 切片身份号码\r\n self.area=id_number[:6]\r\n self.birthday=id_number[6:14]\r\n self.gender=id_number[14:17]\r\n self.exits=id_number[17:]\r\n\r\n self.lists=[]\r\n self.lists.append(self.check_birthday(self.birthday))\r\n self.lists.append(self.check_gender())\r\n self.lists.append(self.check_area())\r\n self.lists.append(self.check_number())\r\n\r\n def results(self):\r\n return self.lists\r\n\r\n def check_gender(self):\r\n if int(self.gender)%2==0:\r\n return '女'\r\n else:\r\n return '男'\r\n #获取时间\r\n def check_birthday(self,birthday):\r\n try:\r\n old_date=datetime.datetime(1970,1,2)\r\n old_time=time.mktime(old_date.timetuple())\r\n now_time=time.time()\r\n\r\n year=birthday[:4]\r\n month=birthday[4:6]\r\n day=birthday[6:]\r\n ymd=datetime.datetime(int(year),int(month),int(day))\r\n\r\n yearmd=time.mktime(ymd.timetuple())\r\n\r\n if old_time<yearmd and yearmd<now_time:\r\n return ymd.strftime(\"%Y-%m-%d\")\r\n else:\r\n return False\r\n except:\r\n messbox.showinfo('消息',\"您输入的日期不对,请重新输入\")\r\n\r\n # 验证归属地\r\n def check_area(self):\r\n # 获取到所有的归属地\r\n f=open(file='身份证归属地.txt',mode='r',encoding='utf-8')\r\n all_area=f.readlines()\r\n res_area=''\r\n for item in all_area:\r\n if self.area==item[:6]:\r\n res_area=item[6:-1]\r\n if res_area=='':\r\n return False\r\n else:\r\n return res_area\r\n\r\n # 获取验证\r\n def get_check_number(self):\r\n number=self.id_number[:17]\r\n # 系数\r\n xi_list=[7,9,10,5,8,4,2,1,6,3,7,9,10,5,8,4,2]\r\n check_number= ['1','0','X','9','8','7','6','5','4','3','2']\r\n # 验证\r\n of_num=0\r\n for index in range(len(number)):\r\n of_num+=int(number[index])*int(xi_list[index])\r\n\r\n yu_num=of_num%11\r\n return check_number[yu_num]\r\n\r\n # 校验码验证\r\n def check_number(self):\r\n if self.get_check_number()==self.exits:\r\n return '有效'\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5509883761405945, "alphanum_fraction": 0.598682165145874, "avg_line_length": 36.89024353027344, "blob_id": "7557058eda0eeedd709474872e0668520ad600dd", "content_id": "6b156f6f4a7c2908aca2e20dd56126bdcf28760f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3421, "license_type": "no_license", "max_line_length": 118, "num_lines": 82, "path": "/身份证验证系统/IDCheckGUI.py", "repo_name": "S1538745720/hello_python", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nfrom CheckIDC import *\r\n\r\nclass IDCheckGUI:\r\n def __init__(self):\r\n self.frame=Tk()\r\n self.frame.title('身份证信息校验')\r\n self.frame.geometry('700x465+200+200')\r\n self.frame['bg']='lightblue'\r\n # 图片\r\n self.image=PhotoImage(file='6L9PO.png')\r\n self.Label_image=Label(self.frame,image=self.image)\r\n self.Label_image.place(x=10,y=10)\r\n # 校验表单\r\n self.label=Label(self.frame,text='请输入身份证号码:',font=('微软雅黑',14,'bold'),bg='navy',fg='lightblue')\r\n self.label.place(x=320,y=20)\r\n\r\n self.Entry_is_input=Entry(self.frame,width=21,font=('微软雅黑',14,'bold'))\r\n self.Entry_is_input.place(x=320,y=60)\r\n\r\n self.Button_exits=Button(self.frame,command=self.get_info,text='校验',width=8,font=('微软雅黑',11,'bold'),fg='blue')\r\n self.Button_exits.place(x=600,y=60)\r\n # 显示表单\r\n self.label=Label(self.frame,text='是否有效:',font=('微软雅黑',16,'bold'),bg='lightblue',fg='blue')\r\n self.label.place(x=320,y=140)\r\n\r\n self.varEntry1=StringVar()\r\n self.Entry1=Entry(self.frame,width=8,state=DISABLED,font=('微软雅黑',14,'bold'),textvariable=self.varEntry1)\r\n self.Entry1.place(x=430,y=140)\r\n\r\n self.label=Label(self.frame,text='性别:',font=('微软雅黑',16,'bold'),bg='lightblue',fg='blue')\r\n self.label.place(x=362,y=200)\r\n\r\n self.varEntry2=StringVar()\r\n self.Entry2=Entry(self.frame,width=8,state=DISABLED,font=('微软雅黑',14,'bold'),textvariable=self.varEntry2)\r\n self.Entry2.place(x=430,y=200)\r\n\r\n self.label=Label(self.frame,text='出生日期:',font=('微软雅黑',16,'bold'),bg='lightblue',fg='blue')\r\n self.label.place(x=320,y=260)\r\n\r\n self.varEntry3=StringVar()\r\n self.Entry3=Entry(self.frame,width=18,state=DISABLED,font=('微软雅黑',14,'bold'),textvariable=self.varEntry3)\r\n self.Entry3.place(x=430,y=260)\r\n\r\n self.label=Label(self.frame,text='所在地:',font=('微软雅黑',16,'bold'),bg='lightblue',fg='blue')\r\n self.label.place(x=342,y=320)\r\n\r\n self.varEntry4=StringVar()\r\n self.Entry4=Entry(self.frame,width=18,state=DISABLED,font=('微软雅黑',14,'bold'),textvariable=self.varEntry4)\r\n self.Entry4.place(x=430,y=320)\r\n\r\n self.Button_close=Button(self.frame,text='关闭',width=8,font=('微软雅黑',12,'bold'),fg='blue',command=self.close)\r\n self.Button_close.place(x=560,y=400)\r\n\r\n self.show()\r\n def show(self):\r\n self.frame.mainloop()\r\n #关闭\r\n def close(self):\r\n self.frame.destroy()\r\n\r\n # 校验按钮事件\r\n def get_info(self):\r\n # 获取身份证号码\r\n self.id_number=self.Entry_is_input.get()\r\n checkidc=CheckIDC(str(self.id_number))\r\n result_lists=checkidc.results()\r\n\r\n # 设置到数据\r\n if result_lists[0]==False or result_lists[1]==False or result_lists[3]==False:\r\n self.varEntry1.set('无效')\r\n self.varEntry2.set('')\r\n self.varEntry3.set('')\r\n self.varEntry4.set('')\r\n else:\r\n self.varEntry1.set(result_lists[3])\r\n self.varEntry2.set(result_lists[1])\r\n self.varEntry3.set(result_lists[0])\r\n self.varEntry4.set(result_lists[2])\r\n\r\n\r\nIDCheckGUI()" } ]
2
Bharkavismveccse/project1
https://github.com/Bharkavismveccse/project1
821db5b9d8b90bc17d9a46e75cfcbca7e4f69890
2472665eb760c9be2439224f68f98a3ae53d52e7
b6a40b46b524c8019905b28a528444ef57e5f9df
refs/heads/main
2023-06-09T08:31:30.546677
2021-07-04T05:02:07
2021-07-04T05:02:07
382,766,336
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7300000190734863, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 23, "blob_id": "8eb96250c8a1aa16e78b54f95d549a0d2ed998fa", "content_id": "b7b504edbc8088942559f4fa982691c82b1cbf6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/project.py", "repo_name": "Bharkavismveccse/project1", "src_encoding": "UTF-8", "text": "import hashlib\r\nstr2hash=\"harsh\"\r\nresult=hashlib.md5(str2hash.encode())\r\nprint(result.hexdigest())\r\n" } ]
1
MeeshCompBio/my_env_setup
https://github.com/MeeshCompBio/my_env_setup
5c70e6e5c0f8fb2e0556864b552e59d6df685438
008afbaed6b961c8c5faf2ab3742c618800f7713
dd66ffbeb93196ad577cbe6e4abf1ccc9c0036d1
refs/heads/master
2021-05-18T19:58:14.205828
2021-03-21T19:18:04
2021-03-21T19:18:04
251,391,545
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 14.363636016845703, "blob_id": "d2a59a824df42614d63a09300b1d16be0acc8f21", "content_id": "1b00daf700a0c0b3cc20512867f64d2abe303caf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 168, "license_type": "no_license", "max_line_length": 27, "num_lines": 11, "path": "/Docker/simpleweb/Dockerfile", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "# base image specify\nFROM node:alpine\n\nWORKDIR /usr/app\n\n# Install some dependencits\nCOPY ./package.json ./\nRUN npm install\nCOPY ./ ./\n#set command\nCMD [\"npm\", \"start\"]" }, { "alpha_fraction": 0.7277108430862427, "alphanum_fraction": 0.7734940052032471, "avg_line_length": 51, "blob_id": "7e441450b6bd8ce64d54a5c8a0c243c1c2eb0ce6", "content_id": "513be607081937fb426c1de711dbbae743878280", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 415, "license_type": "no_license", "max_line_length": 165, "num_lines": 8, "path": "/Docker/Jupyter/Dockerfile", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "FROM jupyter/datascience-notebook\nUSER root\n\nRUN pip install jupyter_contrib_nbextensions autopep8 jupyterthemes && \\\n pip install --upgrade jupyterthemes && \\\n jupyter contrib nbextension install && \\\n jupyter notebook --generate-config -y\nRUN echo \"c.NotebookApp.password='argon2:\\$argon2id\\$v=19\\$m=10240,t=10,p=8\\$90cor1WVRvT/JAu3Vw06Pg\\$CQXrIT70DLVwvSVUEoYkqw'\">>/home/jovyan/.jupyter/jupyter_notebook_config.py" }, { "alpha_fraction": 0.7170731425285339, "alphanum_fraction": 0.7303522825241089, "avg_line_length": 33.81132125854492, "blob_id": "3443e574074899ae523af990a96255e6b8115be1", "content_id": "679879448362ef53e9af70535f62c0009a0e0f7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3690, "license_type": "no_license", "max_line_length": 190, "num_lines": 106, "path": "/my_env_setup.sh", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "set -euo pipefail\n\nWORKINGDIR=$PWD\n# install zsh\ncd\necho 'Creating GitHub repo and cloning repos'\nmkdir -p $WORKINGDIR/Software\nmkdir -p $HOME/.local\nINSTALLATION_PATH=\"$HOME/.local\"\n\ncd $WORKINGDIR/Software\nwget https://hisham.hm/htop/releases/2.2.0/htop-2.2.0.tar.gz\nwget https://github.com/tmux/tmux/releases/download/2.4/tmux-2.4.tar.gz\nwget https://github.com/downloads/libevent/libevent/libevent-2.0.19-stable.tar.gz\nwget ftp://ftp.invisible-island.net/ncurses/ncurses.tar.gz\nwget -O zsh.tar.xz https://sourceforge.net/projects/zsh/files/latest/download\n\n# htop\necho 'Installing htop'\ntar xvfvz htop-2.2.0.tar.gz\nrm htop-2.2.0.tar.gz\ncd htop-2.2.0\n./configure --prefix=$PWD && make && make install\ncd ..\n\n############\n# libevent #\n############\ntar xvzf libevent-2.0.19-stable.tar.gz\ncd libevent-2.0.19-stable\n./configure --prefix=$INSTALLATION_PATH --disable-shared\nmake\nmake install\ncd ..\n\n# need ncurses\necho \"Installing ncurses\"\ntar xvzf ncurses.tar.gz\ncd ncurses-6.2\nexport CXXFLAGS=\" -fPIC\"\nexport CFLAGS=\" -fPIC\"\n./configure --prefix=$HOME/.local --enable-shared && make && make install\nINSTALLATION_PATH=\"$HOME/.local\"\nexport PATH=$INSTALLATION_PATH/bin/:$PATH\nexport LD_LIBRARY_PATH=$INSTALLATION_PATH/lib\nexport CFLAGS=-I$INSTALLATION_PATH/include\nexport CPPFLAGS=\"-I$INSTALLATION_PATH/include\" LDFLAGS=\"-L$INSTALLATION_PATH/lib\"\ncd ..\n\n# Installing tmux\ntar xvzf tmux-2.4.tar.gz\ncd tmux-2.4\n./configure CFLAGS=\"-I$INSTALLATION_PATH/include -I$INSTALLATION_PATH/include/ncurses\" LDFLAGS=\"-L$INSTALLATION_PATH/lib -L$INSTALLATION_PATH/include/ncurses -L$INSTALLATION_PATH/include\"\nCPPFLAGS=\"-I$INSTALLATION_PATH/include -I$INSTALLATION_PATH/include/ncurses\" LDFLAGS=\"-static -L$INSTALLATION_PATH/include -L$INSTALLATION_PATH/include/ncurses -L$INSTALLATION_PATH/lib\" make\ncp tmux $INSTALLATION_PATH/bin\ncd ..\n\n#############################\n# ZSH, OH MY ZSH AND PLUGINS\n#############################\necho 'installing zsh and ohmyzsh'\ncd $WORKINGDIR/Software\nmkdir zsh && unxz zsh.tar.xz && tar -xvf zsh.tar -C zsh --strip-components 1\n# rm zsh.tar\ncd zsh\n./configure --prefix=$INSTALLATION_PATH && make && make install\n\n# copy zshrc\ncp StartupFiles/zshrc ~/.zshrc\ncd\n\n# install oh my zsh\nY | sh -c \"$(wget -O- https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)\" || true\n\n# Add plugins here\n# Add powelevel 10k\ngit clone --depth=1 https://github.com/romkatv/powerlevel10k.git ~/.oh-my-zsh/themes/powerlevel10k\nsed -i \"/ZSH_THEME=\\\"r/c\\ZSH_THEME=\\\"powerlevel10k/powerlevel10k\\\"\" ~/.zshrc ~/.zshrc\n# add syntax-highlighitng\ngit clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting\n# add auto suggestions\ngit clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions\n\n# modifications to the .zshrc to add plugins and such\nsed -i 's/.*plugins=(g.*/plugins=(git colored-man-pages zsh-syntax-highlighting zsh-autosuggestions)/' ~/.zshrc\n\n# add aliases\ncat $WORKINGDIR/bashrc_aliases.sh >> ~/.zshrc\n\n# vim setup\necho 'Setting up vim'\ncurl 'https://vim-bootstrap.com/generate.vim' --data 'langs=python&editor=vim' > ~/.vimrc\n\n# adding dracula theme\nsed -i \"/required by fugitive/i\\Plug 'dracula/vim', { 'as': 'dracula' }\" ~/.vimrc\nsed -i \"/silent\\! colorscheme/c\\silent! colorscheme dracula\" ~/.vimrc\n# vim -c PlugInstall command line to install vim plugins, but then again user need to know how to exit it....\n\n# make zsh the default shell\necho \"making zsh the default shell on startup, modifying .bashrc\"\n\ncat $WORKINGDIR/bashrc_mod.sh >> ~/.bashrc\nsouce ~/.bashrc\n\n# adding a tmux conf file\ncat $WORKINGDIR/tmux_config_options.sh > ~/.tmux.conf\n" }, { "alpha_fraction": 0.5385920405387878, "alphanum_fraction": 0.5462256073951721, "avg_line_length": 35.875, "blob_id": "d77437178d253d2908b88fff207c0ad5ff20cfd3", "content_id": "c68baa7098a1fb2eba4bf1ba4396ef875204b6ba", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "permissive", "max_line_length": 110, "num_lines": 32, "path": "/Docker/docker-airflow/dags/example_dags/R_report.py", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.contrib.sensors.file_sensor import FileSensor\nfrom airflow.operators.email_operator import EmailOperator\nfrom datetime import datetime\nimport sys\nsys.path.insert(1,\"/usr/local/airflow/dags/data_pipelines\")\n\n\ndefault_args = {\n \"start_date\" : datetime(2020, 1, 1),\n \"owner\": \"airflow\",\n 'email': ['[email protected]'],\n 'email_on_failure': True,\n }\n\nwith DAG(dag_id=\"R_scripts_dag\", schedule_interval=\"@daily\", catchup=False, default_args=default_args) as dag:\n say_hello = BashOperator(task_id='R_say_hello',\n bash_command=\"Rscript /usr/local/airflow/dags/data_pipelines/Rhello.R\"\n )\n\n email = EmailOperator(\n task_id='send_email',\n to=['[email protected]'],\n subject='early warning report Final',\n html_content=\"\"\" <h3>Email Test</h3> \"\"\",\n files=['some_file.txt'],\n dag=dag,\n catchup=False\n )\n\n say_hello >> email" }, { "alpha_fraction": 0.6563876867294312, "alphanum_fraction": 0.6563876867294312, "avg_line_length": 21.799999237060547, "blob_id": "eee5501204af596daeafe47c3158400144eabba6", "content_id": "439c1ce63c548f1d5a0b5d0162dfead2cd782903", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 227, "license_type": "no_license", "max_line_length": 72, "num_lines": 10, "path": "/bashrc_mod.sh", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "preferred_shell=\nif [ -x $HOME/.local/bin/zsh ]; then\n preferred_shell=$HOME/.local/bin/zsh\nfi\n\nif [ -n \"$preferred_shell\" ]; then\n case $- in\n *i*) SHELL=$preferred_shell; export SHELL; exec \"$preferred_shell\";;\n esac\nfi" }, { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.7460317611694336, "avg_line_length": 20.11111068725586, "blob_id": "0546e466583017031588ad4a065e2d239682f347", "content_id": "d87e5bf89807ccd61b536a1d528628c455c449b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 189, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/Docker/Dockerfile", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "# Use an existing docker image as a base\nFROM alpine\n\n# Download dependency and install\nRUN apk add --update redis\nRUN apk add --update gcc\n\n# Tell the image what to do\nCMD [\"redis-server\"]" }, { "alpha_fraction": 0.6696428656578064, "alphanum_fraction": 0.6875, "avg_line_length": 20.75, "blob_id": "df9ae3d3e55aa751021f650cca9dbd219ef7edc5", "content_id": "ab80938827b234801b84270a340f43bbcf5d09fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 784, "license_type": "no_license", "max_line_length": 43, "num_lines": 36, "path": "/tmux_config_options.sh", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "# Start window numbering at 1\nset-option -g base-index 1\nset-window-option -g pane-base-index 1\n\nsetw -g mode-keys vi\n#witch panes using Alt-arrow without prefix\nbind -n M-Left select-pane -L\nbind -n M-Right select-pane -R\nbind -n M-Up select-pane -U\nbind -n M-Down select-pane -D\n# Enable mouse mode (tmux 2.1 and above)\nset -g mouse on\n######################\n### DESIGN CHANGES ###\n######################\n\n# panes\nset -g pane-border-fg black\nset -g pane-active-border-fg brightred\n\n## Status bar design\n# status line\nset -g status-utf8 on\nset -g status-justify left\nset -g status-bg default\nset -g status-fg colour12\nset -g status-interval 2\n\n#window mode\nsetw -g mode-bg colour6\nsetw -g mode-fg colour0\n\n# window status\n\n# loud or quiet?\nset -g default-terminal \"screen-256color\"\n\n" }, { "alpha_fraction": 0.5704307556152344, "alphanum_fraction": 0.5832363367080688, "avg_line_length": 27.66666603088379, "blob_id": "59abe8145992d8754136b7827240b03f631b482a", "content_id": "9be28c32c949d2a0f1033452334f599fa4da6ded", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 859, "license_type": "permissive", "max_line_length": 58, "num_lines": 30, "path": "/Docker/docker-airflow/dags/example_dags/email_dag.py", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.email_operator import EmailOperator\n\ndefault_args = {\n 'owner': 'airflow',\n 'start_date': datetime(2018, 1, 30),\n 'email': ['[email protected]'],\n 'email_on_failure': True,\n 'retries': 2\n}\n\nwith DAG('email_failure_dag',\n default_args=default_args,\n schedule_interval='@daily',\n catchup=False) as dag:\n task_that_always_fails = BashOperator(\n task_id='task_that_always_fails',\n bash_command='exit 1',\n dag=dag,\n )\n # email = EmailOperator(\n # task_id='send_email',\n # to='[email protected]',\n # subject='Airflow Alert',\n # html_content=\"\"\" <h3>Email Test</h3> \"\"\",\n # dag=dag,\n # catchup=False\n # )" }, { "alpha_fraction": 0.6353591084480286, "alphanum_fraction": 0.6353591084480286, "avg_line_length": 24.85714340209961, "blob_id": "965a09e7c396dca61524a0d5cf94cf3447f4a114", "content_id": "090dca6ce8959bd8303ba597ca8dbb177158cb06", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "permissive", "max_line_length": 91, "num_lines": 14, "path": "/Docker/docker-airflow/dags/data_pipelines/fetching_tweets.py", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom datetime import datetime as dt\n\nLOCAL_DIR='/tmp/'\n\ndef main():\n tweets = pd.read_csv('/usr/local/airflow/dags/data/data.csv')\n \n tweets = tweets.assign(Time=pd.to_datetime(tweets.Time)).drop('row ID', axis='columns')\n \n tweets.to_csv(LOCAL_DIR + 'data_fetched.csv', index=False)\n \nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.753731369972229, "alphanum_fraction": 0.753731369972229, "avg_line_length": 37.35714340209961, "blob_id": "f1859c61e74a2d50809fa7923903ab3a5e85d4d2", "content_id": "ef413ada8cceaa673f894cf318d22ac759bfb99e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 536, "license_type": "no_license", "max_line_length": 115, "num_lines": 14, "path": "/README.md", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "# My environment set up\nA collection of script to make switching servers/going to the cloud a little easier. No root permissions required. \n\nJust run \"my_env_setup.sh\" and it will install the following\n* Tmux (also dependencies needed to install)\n* zsh (will be set to the deafault shell)\n* vim plugins\n* htop\n\n## Notes\n* Installation path is ~/.local DIR\n* All software will be installed in a ~/Software DIR\n* Add any bash aliases you want to the \"bashrc_aliases.sh\" file\n* my vimrc script is not use in the install, it is there for me" }, { "alpha_fraction": 0.745954692363739, "alphanum_fraction": 0.7653721570968628, "avg_line_length": 29.899999618530273, "blob_id": "79ca98ba64f1df1dd042b76f6a7e232682486b24", "content_id": "1df90569a4d020bd9476d661664f99798c53064e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 618, "license_type": "no_license", "max_line_length": 154, "num_lines": 20, "path": "/Docker/README.md", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "# Docker\n\n## Containers that I like\n\n### Airflow\nThis is a great way to spin up an airflow instance for demos. You have to pass in the airflow fernet env var or else you will get errors for a connection.\nKeep in mind this will only really work with the SequentialExecutor.\n\n```bash\ndocker pull puckel/docker-airflow\nYOUR_FERNET_KEY=$(openssl rand -base64 32)\ndocker run -d -e AIRFLOW__CORE__FERNET_KEY=$YOUR_FERNET_KEY -p 8888:8080 puckel/docker-airflow webserver\n```\n\nOnce inside the containers home dir you you can run these commands to get airflow running\n```bash\nmkdir dags\nairflow initdb\nairflow scheduler -D\n```\n" }, { "alpha_fraction": 0.48013246059417725, "alphanum_fraction": 0.5132450461387634, "avg_line_length": 22.230770111083984, "blob_id": "45de92f23e81e9794a75ca1c7342166edc9e13b1", "content_id": "c38cca63ed70cff389c5f4956f08d2dc8a9bf47d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 302, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/Docker/Jupyter/docker-compose.yaml", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "version: '3.7'\nservices:\n datascience-notebook:\n image: meesh-jupyter \n volumes:\n - ./:/home/jovyan\n environment:\n GRANT_SUDO: \"yes\"\n user: root\n ports:\n - 8888:8888\n container_name: jupyter_notebook\n restart: always\n" }, { "alpha_fraction": 0.7065955400466919, "alphanum_fraction": 0.7240543365478516, "avg_line_length": 31.0625, "blob_id": "1ace3fa12c35a574667f2a98f1534e0c4101c528", "content_id": "ae25a4bb30c06443611d1cbda1ec4fff077728ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2062, "license_type": "no_license", "max_line_length": 92, "num_lines": 64, "path": "/vimrc.sh", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "\" Configuration file for vim\nset modelines=0\t\t\" CVE-2007-2438\n:imap kj <Esc>\n\n\"Vundle package information\"\nset nocompatible \" be iMproved, required\nfiletype off \" required\n\n\" set the runtime path to include Vundle and initialize\nset rtp+=~/.vim/bundle/Vundle.vim\ncall vundle#begin()\n\" alternatively, pass a path where Vundle should install plugins\n\"call vundle#begin('~/some/path/here')\n\" let Vundle manage Vundle, required\nPlugin 'gmarik/Vundle.vim'\nPlugin 'dracula/vim', { 'name': 'dracula' }\n\"####Put Plugins under here####\"\nlet g:ycm_global_ycm_extra_conf = \"~/.vim/.ycm_extra_conf.py\"\n\n\n\n\" All of your Plugins must be added before the following line\ncall vundle#end() \" required\nfiletype plugin indent on \" required\n\" To ignore plugin indent changes, instead use:\n\"filetype plugin on\n\"\n\" Brief help\n\" :PluginList - lists configured plugins\n\" :PluginInstall - installs plugins; append `!` to update or just :PluginUpdate\n\" :PluginSearch foo - searches for foo; append `!` to refresh local cache\n\" :PluginClean - confirms removal of unused plugins; append `!` to auto-approve removal\n\"\n\" see :h vundle for more details or wiki for FAQ\n\" Put your non-Plugin stuff after this line\n\n\nset tabstop=4 \" The width of a TAB is set to 4\nset shiftwidth=4 \" Indents will have a width of 4\nset expandtab \" Expand TABs to spaces\n\n\n\n\" Don't write backup file if vim is being called by \"crontab -e\"\nau BufWrite /private/tmp/crontab.* set nowritebackup nobackup\n\" Don't write backup file if vim is being called by \"chpass\"\nau BufWrite /private/etc/pw.* set nowritebackup nobackup\n\n\"this is for the theme\"\n syntax enable\n \" set background=dark\n\" let g:solarized_termcolors = 256\n\"colorscheme Tomorrow-Night-Bright\ncolorscheme dracula\n \"autocmd FileType sh colorscheme industry\nautocmd BufEnter *.sh colorscheme falcon\n\nset number\nset showcmd\nset cursorline\n\n\" highlight line 80 and after line 100\nhighlight ColorColumn ctermbg=235 guibg=#2c2d27\nlet &colorcolumn=\"80,\".join(range(120,999),\",\")\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5707316994667053, "alphanum_fraction": 0.583414614200592, "avg_line_length": 38.46154022216797, "blob_id": "44417f5799264f796c3add80f20a5c42a734d3f6", "content_id": "fb64f958c3f0ca8af64c1af7f3bcf98d0c962c65", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1025, "license_type": "permissive", "max_line_length": 78, "num_lines": 26, "path": "/Docker/docker-airflow/dags/data_pipelines/subdag_dag.py", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\nimport sys\nfrom airflow.models import DAG\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nsys.path.insert(1,\"/usr/local/airflow/dags/data_pipelines\")\nfrom subdag_factory import subdag_factory\n\nPARENT_DAG_NAME='sugdag_dag'\nSUBDAG_DAG_NAME='subdag'\n\nwith DAG(\n dag_id=PARENT_DAG_NAME,\n schedule_interval='@daily',\n start_date=datetime(2020, 1, 1, 10, 00, 00),\n catchup=False\n) as dag:\n start_task = DummyOperator(task_id='start')\n subdag_task = SubDagOperator(subdag=subdag_factory(PARENT_DAG_NAME,\n SUBDAG_DAG_NAME,\n dag.start_date,\n dag.schedule_interval),\n task_id=SUBDAG_DAG_NAME\n )\n end_task = DummyOperator(task_id='end')\n start_task >> subdag_task >> end_task" }, { "alpha_fraction": 0.6376811861991882, "alphanum_fraction": 0.642951250076294, "avg_line_length": 33.54545593261719, "blob_id": "1c12824709e454d7baffca4d85ffef0ad18553e7", "content_id": "7ee22487c20286ac0a4ab094ef40bf6874af75b0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "permissive", "max_line_length": 124, "num_lines": 22, "path": "/Docker/docker-airflow/dags/data_pipelines/cleaning_tweets.py", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport re\nfrom datetime import datetime as dt\nfrom datetime import date, timedelta\n\nLOCAL_DIR='/tmp/'\n\ndef main():\n tweets= pd.read_csv(LOCAL_DIR + 'data_fetched.csv')\n\n tweets.rename(columns={'Tweet':'tweet', 'Time':'dt', 'Retween from': 'retweet_from', 'User':'tweet_user'}, inplace=True)\n tweets.drop(['tweet_user'], axis=1, inplace=True)\n tweets['before_clean_len'] = [len(t) for t in tweets.tweet]\n tweets['tweet'] = tweets['tweet'].apply(lambda tweet: re.sub(r'@[A-Za-z0-9]+','',tweet))\n yesterday = date.today() - timedelta(days=1)\n dt = yesterday.strftime(\"%Y-%m-%d\")\n tweets['dt'] = dt\n tweets.to_csv(LOCAL_DIR + 'data_cleaned.csv', index=False)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5561420321464539, "alphanum_fraction": 0.5642994046211243, "avg_line_length": 47.488372802734375, "blob_id": "c2d9ba317a01447b1eb6a554a89740fcd512477f", "content_id": "d79e5dbd7bd950f7c4f0f03ad019d0b74374f071", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2084, "license_type": "permissive", "max_line_length": 167, "num_lines": 43, "path": "/Docker/docker-airflow/dags/example_dags/twitter_dag.py", "repo_name": "MeeshCompBio/my_env_setup", "src_encoding": "UTF-8", "text": "from airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.contrib.sensors.file_sensor import FileSensor\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom datetime import datetime\nimport sys\nsys.path.insert(1,\"/usr/local/airflow/dags/data_pipelines\")\nimport fetching_tweets\nimport cleaning_tweets\n\n\ndefault_args = {\n \"start_date\" : datetime(2020, 1, 1),\n \"owner\": \"airflow\"\n }\n\nwith DAG(dag_id=\"twitter_dag\", schedule_interval=\"@daily\", default_args=default_args, catchup=False) as dag:\n waiting_for_tweets = FileSensor(task_id=\"waiting_for_tweets\",\n fs_conn_id=\"fs_tweet\",\n filepath=\"data.csv\",\n poke_interval=5\n )\n\n fetching_tweets = PythonOperator(task_id=\"fetching_tweets\",\n python_callable=fetching_tweets.main\n )\n cleaning_tweets = PythonOperator(task_id=\"cleaning_tweets\",\n python_callable=cleaning_tweets.main\n )\n \n storing_tweets = PostgresOperator(task_id='storing_tweets',\n postgres_conn_id=\"postgres_default\",\n sql='''CREATE TABLE IF NOT EXISTS tweets(Tweet varchar(250), Date varchar(50), Retweet_from varchar(50), T_User varchar(50));'''\n )\n\n update_tweets = PostgresOperator(task_id='update_tweets',\n postgres_conn_id=\"postgres_default\",\n sql='''COPY tweets(Tweet, Date, Retweet_from, T_User) FROM '/tmp/data_cleaned.csv' DELIMITER ',' CSV HEADER;'''\n )\n\n waiting_for_tweets >> fetching_tweets >> cleaning_tweets >> storing_tweets >> update_tweets" } ]
16
itb-ie/pandas-apple-stock
https://github.com/itb-ie/pandas-apple-stock
f6b9c077f2703157dd3a1cf746d1b5e2a5b1f502
b0d8fac8c35d6ea894bb6dbfe0b1e6ff1eedeae8
445953140c0ccb90c9dc09699f79d55b22f5fe34
refs/heads/master
2023-03-15T06:09:51.055810
2023-03-09T00:16:40
2023-03-09T00:16:40
222,690,966
0
9
null
null
null
null
null
[ { "alpha_fraction": 0.558282196521759, "alphanum_fraction": 0.6226993799209595, "avg_line_length": 20.733333587646484, "blob_id": "4e63553f6a5730fd08a06be292103a0b97c5505f", "content_id": "b602ee48643abc539a480e6755dfaeea6658a9fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 75, "num_lines": 15, "path": "/pandas-tutorial.py", "repo_name": "itb-ie/pandas-apple-stock", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\ns = pd.Series([1, 3, 5, np.nan, 6, 8])\n# print(s)\n\ndates = pd.date_range('20191101', periods=7)\n# print(dates)\n\ndf = pd.DataFrame(np.random.randn(7, 4), index=dates, columns=list('ABCD'))\nprint(df)\n\nprint(df.loc[[dates[2], dates[3]], [\"B\", \"C\"]])\ndf.loc[dates[3], [\"B\"]] = 77\nprint(df)\n" }, { "alpha_fraction": 0.6145339608192444, "alphanum_fraction": 0.660347580909729, "avg_line_length": 30.649999618530273, "blob_id": "4e6edf995664cb730a31023f655c1b8d27fe211a", "content_id": "01ef1e90eda22edd43f35a16bd188b1d57797a55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 101, "num_lines": 40, "path": "/graph.py", "repo_name": "itb-ie/pandas-apple-stock", "src_encoding": "UTF-8", "text": "from matplotlib import pylab as plt\nimport pandas as pd\n\n# pd.plotting.register_matplotlib_converters()\n\ndf1 = pd.read_csv(\"AAPL.csv\")\nprint(df1.head())\ndf1['Date'] = pd.to_datetime(df1.Date)\n\ndf2 = pd.read_excel(\"iphone-dates-2019.xlsx\")\nprint(df2)\ndf2['Date'] = pd.to_datetime(df2.date)\n\nindex2 = []\nfor date2 in df2.Date:\n if df1.index[df1.Date == date2].values.size:\n index2.append(int(df1.index[df1.Date == date2].values[0]))\n elif df1.index[df1.Date == date2 + pd.DateOffset(1)].values.size:\n index2.append(int(df1.index[df1.Date == date2 + pd.DateOffset(1)].values[0]))\n elif df1.index[df1.Date == date2 + pd.DateOffset(2)].values.size:\n index2.append(int(df1.index[df1.Date == date2 + pd.DateOffset(2)].values[0]))\n\n else:\n print(f\"Did not find {date2}\")\n\nprint(index2, len(index2))\n\n\nmean = df1[\"Close\"].mean()\n\n\nplt.figure(\"Apple Stock\")\nplt.plot(df1[\"Date\"], df1[\"Close\"], 'r-', linewidth=0.6, label=\"APPL Stock price, mean=\"+str(mean))\n# or the same can be:\n# plt.plot(\"Date\", \"Close\", 'r-', linewidth=0.6, label=\"APPL Stock price, mean=\"+str(mean), data=df1)\nplt.plot(df1[\"Date\"], df1[\"Close\"], 'o', ms=7, markevery=index2, label=\"Iphone launch date\")\nplt.xlabel(\"Dates\")\nplt.legend(loc=\"upper left\")\n\nplt.show()\n" } ]
2
njuptml/MTR-GL
https://github.com/njuptml/MTR-GL
c01c1e783855c224dd3aa6c47240a548413fcc11
74d00dadbf84c0c843b58a64da620995f5f4e5e1
f9f190c7fc421a61564720fd1abcdf0e074c17dc
refs/heads/master
2020-06-20T01:45:08.890582
2019-07-15T07:36:15
2019-07-15T07:36:15
196,936,469
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7686178088188171, "alphanum_fraction": 0.7722954154014587, "avg_line_length": 97.8787841796875, "blob_id": "ca95b4659431077b5dab99084cf507af5cf8b30a", "content_id": "99c13d21ac7c96967be4d93678173bdb0a20b46a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3273, "license_type": "no_license", "max_line_length": 670, "num_lines": 33, "path": "/README.md", "repo_name": "njuptml/MTR-GL", "src_encoding": "UTF-8", "text": "# __MTR-GL__ \n \nMTR-GL (multi-task regression learning with group lasso) is a novel method to precisely model and interpret bioactivities of ligand molecules by combining homologous GPCRs. MTR-GL was examined on a set of thirty-five representative GPCRs datasets that cover nine subfamilies of human GPCRs. The results demonstrate that, the combination of homologous GPCRs in learning bioactivates of ligands can effectively improve the performance and interpretation of models by utilizing similar information of interaction and ligand samples cross these GPCR proteins. This is of great significance for understanding GPCR-ligand interaction and developing new drugs targeting GPCRs. \n \n## ___Reference:___ \nJiansheng Wu, et.al. Homologous G Protein-coupled Receptors Boost the Modelling and Interpretation of Bioactivities of Ligand Molecules. (In review) \n \n### ___ATTN1:___ \n* This package is free for academic usage. You can run it at your own risk. \n* For other purposes, please contact PH.D Jiansheng Wu ([email protected]). \n* For any problem concerning the code, please feel free to contact PH.D Jiansheng Wu. \n\n## ___Requirement:___ \nTo use this package, the Matlab, JAVA JDK and Python (Ver 2.7) environment must be available. \n\n### ___ATTN2:___ \nThis package was developed by Miss Yi Sun and Mr. Ben Liu. \n\n### ___ATTN3: Code usage of MTR-GL___ \nWe have developed two demo programs for different applications in ligand-based virtual screening, with the source codes and datasets released through http://cbi.njupt.edu.cn/MTR-GL/. \nThe code for MTR-GL was written in Matlab2014 and Python 2.7. This provides a general framework for ligand-based virtual screening through multi-task learning with joint feature learning, which allows users to develop their own key substructure recognition and virtual screening tools for drug targets of their interest on the basis of our code. The pipelines have two major functions. \n* __demo_new:__ This provides a general framework on ligand-based virtual screening, and it is easy for users to develop their own virtual screening tools for drug targets of their choice on the basis of our code. \n___Input:___ \nCompounds in the format of canonical SMILES and their bioactivity values. \n___Output:___ \nModel performance (r^2, RMSE). \n___The procedure is as follows:___ \nTo input compounds in the format of canonical SMILES and their bioactivity values →To generate Pubchem fingerprints → To train the multi-task regression learning with group lasso (MTR-GL) model → To obtain the model performance.\n* __demo_activity:__ This offers the ligand-based virtual screening models for thirty-five important human GPCR drug targets, and users can predict the bioactivities of new compounds acting with these targets, which is important in the implementation of drug design against these drug targets, the prediction of side effects of multi-target drugs, and the risk assessment of drug development. \n#### ___Input:___ \nCompounds in the format of canonical SMILES. \n#### ___Output:___ \nBioactivity values interacting with the GPCR drug target of interest. The steps are as follows: Input compounds in the format of canonical SMILES →To generate Pubchem fingerprints→To obtain the bioactivity values based on our trained MTR-GL models.\n" }, { "alpha_fraction": 0.5675276517868042, "alphanum_fraction": 0.5867158770561218, "avg_line_length": 25.693878173828125, "blob_id": "0bba0a039f2ff0a5feadc902a2ea0ecdeb6c7285", "content_id": "d7dfbab9274ff3150bd3bd094cc2cc4ee001a2f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 93, "num_lines": 49, "path": "/demo_predict.py", "repo_name": "njuptml/MTR-GL", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 21 17:51:44 2019\r\n\r\n@author: Benli\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport os\r\n\r\ndef trans(res):\r\n tmp = -res\r\n return 10**tmp\r\nlocal_path = os.getcwd()\r\ncmd1 = '''javac -classpath \".;'''+ local_path +'''\\*\" Pubchem_fp.java'''\r\nos.popen(cmd1)\r\ncmd2 = '''java -classpath \".;'''+ local_path +'''\\*\" Pubchem_fp'''\r\nos.popen(cmd2)\r\nUserInput = pd.read_csv('UserInputData.txt',sep='\\t')\r\nweight = pd.read_csv('Weight_all.csv',sep=',')\r\nwith open('output_fp.txt',\"r\") as f:\r\n error = f.readline()\r\n data = f.readlines()\r\ntarget = []\r\nsmiles = []\r\nfp = []\r\nfor line in data:\r\n tmp_target_name = line.strip().split('\\t')[0]\r\n tmp_smiles = line.strip().split('\\t')[1]\r\n tmp_fp = line.strip().split('\\t')[2:]\r\n target.append(tmp_target_name)\r\n smiles.append(tmp_smiles)\r\n fp.append(tmp_fp)\r\ntarget_name = target[0]\r\ndf_fp = pd.DataFrame(fp,dtype=int)\r\nres = []\r\nfor i in range(len(smiles)):\r\n W = weight[target_name]\r\n fp = df_fp.iloc[i]\r\n tmp_res = sum(W*fp)\r\n res.append(tmp_res) \r\npredict=pd.DataFrame(res).apply(trans)\r\noutput = UserInput\r\noutput['predict'] = predict\r\n\r\nwith open('predict_result.txt', 'w') as ff:\r\n for j in range(len(output)):\r\n res_line = '{}\\t{}\\t{}\\n'.format(output.iloc[j,0],output.iloc[j,1],output.iloc[j,2] )\r\n ff.write(res_line)" } ]
2
kolynos/price_prediction
https://github.com/kolynos/price_prediction
e24f946cd9d02a42973ef66a7be489f365d56402
77b244837357436efc7e918965228cc0763406bc
6a0395d6bf55efb327abf07d4c98c03e8cff1353
refs/heads/master
2016-09-06T05:21:17.286202
2014-10-10T19:53:07
2014-10-10T19:53:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.6086956262588501, "avg_line_length": 10.5625, "blob_id": "a51d1cfe8272565275680e86cde0787b59ed9fb0", "content_id": "f7529b6290365c1e850e315b20bcbc8d49c068de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "no_license", "max_line_length": 57, "num_lines": 16, "path": "/cronTest.py", "repo_name": "kolynos/price_prediction", "src_encoding": "UTF-8", "text": "'''\nCreated on 30.09.2014\n\n@author: colinos\n'''\nimport time\n\nlog_file='log_'+time.strftime(\"%d_%m_%Y_%H_%M_%S\")+'.csv'\n\nprint log_file\n\nf=open(log_file,'w')\n\nf.write('test')\n\nf.close()" }, { "alpha_fraction": 0.5824443101882935, "alphanum_fraction": 0.5970933437347412, "avg_line_length": 38.421531677246094, "blob_id": "baa2f966499506c598be88f13bc74d4692bfa283", "content_id": "25182d96a9a36bbb9fbc675e896fcd93ded398a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43211, "license_type": "no_license", "max_line_length": 320, "num_lines": 1096, "path": "/ebayDownloader.py", "repo_name": "kolynos/price_prediction", "src_encoding": "UTF-8", "text": "'''\nCreated on 09.09.2014\n\n@author: colinos\n'''\nimport datetime\nimport time\nfrom ebaysdk.finding import Connection as finding\nfrom ebaysdk.trading import Connection as trading\nfrom ebaysdk.shopping import Connection as shopping\nfrom ebaysdk.exception import ConnectionError as EbayConnectionError\nfrom requests.exceptions import ConnectionError,Timeout\nfrom copy import copy,deepcopy\nfrom itertools import islice\n#import psycopg2 as pg\nimport pandas.io.sql as psql\nimport numpy as np\nimport pandas as pd\n\n#import os\nimport os.path\nimport pickle\nimport psycopg2\nimport sys\n#from ebaysdk.soa.finditem import Connection as FindItem\n\n\ndef connect_db():\n global db_connection,db_cursor\n db_connection = psycopg2.connect(\"dbname=ebaypredictor user=postgres password=quejodes\")\n db_cursor = db_connection.cursor()\n\ndef disconnect_db():\n global db_connection,db_cursor\n db_connection.close()\n db_connection=None\n \n\n\ndef connect_api():\n global trading_api,finding_api,shopping_api\n trading_api=trading(config_file='ebay.yaml',siteid=77,token='AgAAAA**AQAAAA**aAAAAA**gNoiVA**nY+sHZ2PrBmdj6wVnY+sEZ2PrA2dj6wJkYWoDpmGqA2dj6x9nY+seQ**PXkCAA**AAMAAA**hzDKea9fTGWVfQz6a+ULxm8p20w2dRegRC1r3fy5UQjAbb/gQc7m76reGs4JHLA8/Dq6hdxVx6kNmP/78rG9ii4NXu3UIjyFPDA2G4Yg+BMZ7BfDdCVgBPtigPhrrf6GJxTQF/yP0nWbFW0hDN6ZwFURMEcFXazayEUbqNDD9V0zj35Z1X7WE3xV8tnph37SDZDhplGC1ha3MjcDRZqZURhz74TxmH1nugJ9pSsjY7GsDvmVgFk0BP/AFgA8jOXiA6avHSAzWstY9AK/Pxmcj5fO3Df+w5WYrjnNMPZcwPMyFeb+oAxABG8KkYmKbWmh/D57JVHn0G6J2tEysCQMUXUZa8VxbP9noKjmIADhmYh6pjx7PzAtjB0feAdFOb9BGmVeczGgYJrMaBhujrwPB9beaC6d1EuUnw4dON/RJqHWRq9ecQkAMsVB5+eki+/HS0LA4ZVA01FaMW6QlgNRefwj5fAJbv20mggfYx5dA+e8STAmk05ThzUktiaRzHxZEtIiVsf3xqZkSFfCZVpFskdWsBb+ifLYpjEAf+KG+56zijYC50HOP+oTcVqfUcQ9EaBZyiS6+VrYR2KVkGiVGNWyOdY56Y7Q7yJfhMxMi2YAxEFyhPZt6qVg02Ka6ubV0Mcm6Xn1jq02XUzkq3+Q0QMYV2D37MGYxaq0qVCfvp3l6GuY+hZ1S7z9BPu2uXDC8h0t+JurlSveX+/2cj+2loCX+nneCgikupnwrmZCFbqV4jmX36qrIDbE7KU2d7TE')\n finding_api=finding(config_file='ebay.yaml',siteid='EBAY-DE')\n shopping_api=shopping(config_file='ebay.yaml',siteid=77)\n \n\n\n\n\ndef get_categories():\n global trading_api\n try:\n response = trading_api.execute('GetCategories',{'CategorySiteID':77,'DetailLevel':'ReturnAll','CategoryParent':58058})\n print(response.dict())\n except ConnectionError as e:\n print(e)\n print(e.response.dict())\n\ndef get_item_complete_description(item):\n global trading_api\n \n query={}\n query['DetailLevel']='ItemReturnDescription'\n query['ItemID']=item\n trading_api.execute('GetItem', query)\n print trading_api.response_dict()\n \n\ndef get_attribute_type_id_from_db(category,attribute_names):\n global db_connection,db_cursor\n \n attribute_id=None\n try:\n #attribute_ids=['categoryID']+['attributeName_'+str(x) for x in range(1,9)]\n query=' AND '.join(['attributeName_'+str(i)+' LIKE %s' for i in range(1,len(attribute_names)+1)])\n query=db_cursor.mogrify('Select attributeTypeID from itemAttributeIds where '+query,tuple(attribute_names))\n db_cursor.execute(query)\n attribute_id=db_cursor.fetchone()[0]\n \n except psycopg2.Error as e:\n print e\n return attribute_id\n \ndef add_attribute_id_to_db(category,attribute_names):\n global db_connection,db_cursor\n try:\n attribute_ids=['categoryID']+['attributeName_'+str(x) for x in range(1,17)]\n attribute_string='('+','.join(attribute_ids)+')'\n attribute_indices='('+','.join(['%s' for x in range(1,18)])+')'\n if(len(attribute_names)<16):\n attribute_names+=['' for i in range(len(attribute_names),16)]\n attribute_names=[category]+attribute_names\n query=db_cursor.mogrify('Insert into itemAttributeIds'+attribute_string+' VALUES '+attribute_indices,tuple(attribute_names))\n db_cursor.execute(query)\n db_connection.commit()\n except psycopg2.Error as e:\n print e\n \ndef get_category_aspects(category):\n global finding_api,reverse_order\n \n category_aspects=None\n filename='category_aspects_'+str(category)+'.pkl'\n if(os.path.isfile(filename)):\n pkl_file=open(filename, 'r')\n category_aspects=pickle.load(pkl_file)\n pkl_file.close()\n else:\n try:\n category_aspects=dict()\n response=finding_api.execute('findItemsByCategory',{'categoryId':category,'outputSelector':'AspectHistogram'})\n aspects=response.dict()['aspectHistogramContainer']['aspect']\n print(aspects)\n for aspect in aspects:\n print('---')\n print aspect['_name']\n print('---')\n \n category_aspects[aspect['_name']]=list()\n values=aspect['valueHistogram']\n #values.reverse()\n #print(aspect['valueHistogram'])\n for value in values:\n category_aspects[aspect['_name']].append(value['_valueName'])\n print(value['_valueName']+': '+value['count'])\n #category_aspects[aspect['_name']].reverse()\n #print(category_aspects)\n #output = open('category_aspects_'+str(category)+'.pkl', 'w')\n #pickle.dump(category_aspects,output,-1)\n #output.close()\n except ConnectionError as e:\n print(e)\n print(e.response.dict())\n keys=category_aspects.keys()\n for i in range(len(keys)):\n #print str(i)+' '+keys[i]\n values=category_aspects[keys[i]]\n #for j in range(len(values)):\n # print(values[j])\n \n #print(category_aspects)\n return category_aspects\n\n\ndef get_aspect_filter_old(aspect_list):\n aspect_list=[{'aspectName':aspect.keys()[0],'aspectValueName':aspect[aspect.keys()[0]]} for aspect in aspect_list]\n #aspect_dict['aspectFilter']=aspect_list\n return aspect_list\n #for aspect in keys:\n\ndef get_aspect_filter(aspect_list):\n aspect_list=[{'aspectName':aspect,'aspectValueName':aspect_list[aspect]} for aspect in aspect_list]\n #print aspect_list\n return aspect_list\n \n\ndef recursive_find(category,aspect_history,category_counter,query_type='findItemsByCategory'):\n global category_aspect_dict, category_order,ebay_calls\n \n #print aspect_history\n if(category_counter==len(category_aspect_dict.keys())):\n aspect_filter=get_aspect_filter(aspect_history)\n valid=get_valid_aspects(category,aspect_filter)\n ebay_calls+=2\n \n print([x['aspectValueName'] for x in aspect_filter])\n print(ebay_calls)\n get_all_items(category,aspect_filter,query_type)\n \n #get_valid_aspects(category,aspect_filter)\n return\n #current_category=category_order[category_counter]\n current_name=category_aspect_dict.keys()[category_counter]\n aspect_filter=get_aspect_filter(aspect_history)\n ebay_calls+=1\n valid_aspects=get_valid_aspects(category,aspect_filter)\n \n if(current_name in valid_aspects.keys()):\n for aspect in category_aspect_dict[current_name]:\n new_history=copy(aspect_history) \n if(aspect in valid_aspects[current_name]['names']):\n aspect_dict={current_name:aspect}\n new_history.append(aspect_dict)\n recursive_find(category,new_history,category_counter+1)\n else:\n new_history=copy(aspect_history)\n recursive_find(category,new_history,category_counter+1)\n \ndef get_valid_aspects(category,aspect_filter,query_type='findItemsByCategory'):\n global valid_aspect_categories\n \n query=dict()\n query['outputSelector']='AspectHistogram'\n query['categoryId']=category\n query['aspectFilter']=aspect_filter\n response=finding_api.execute(query_type,query)\n result=response.dict()\n valid_aspects=dict()\n if(result.has_key('aspectHistogramContainer')):\n aspects=result['aspectHistogramContainer']['aspect']\n for aspect in aspects:\n valid_aspects[aspect['_name']]={'names':list(),'counts':list()}\n values=aspect['valueHistogram']\n # if only one element is in the list, only a dict is returned. It needs to be converted into list\n if(type(values) is dict):\n values=[values]\n value_count=-1\n for value in values:\n if(value['_valueName'].find('oder mehr')==-1 or \n (value['_valueName'].find('oder mehr')>0 and value['count']>value_count)):\n valid_aspects[aspect['_name']]['names'].append(value['_valueName'])\n valid_aspects[aspect['_name']]['counts'].append(value['count'])\n if(value['_valueName'].find('oder mehr')>0):\n value_count=value['count']\n #print(values[j]['_valueName'])\n \n #if(valid_aspect_categories.containaspect_category['_name'])\n return valid_aspects\n \n #result=response.dict()\n\ndef get_all_items_old(category,aspect_filter,query_type):\n global finding_api,ebay_calls,pickle_files\n \n try:\n query=dict()\n query['sortOrder']='StartTimeNewest'\n query['categoryId']=category\n query['aspectFilter']=aspect_filter\n response=finding_api.execute(query_type,query)\n ebay_calls+=1\n result=response.dict()\n print result['paginationOutput']\n current_page=0\n num_pages=int(result['paginationOutput']['totalPages'])\n while(current_page<num_pages):\n result=response.dict()\n result['category']=category\n result['aspectFilter']=aspect_filter\n result['page']=current_page\n output = open('items_'+str(category)+'_'+str(pickle_files)+'_'+str(current_page)+'.pkl', 'w')\n pickle.dump(result,output,-1)\n output.close()\n current_page+=1\n pickle_files+=1\n if(current_page<num_pages):\n response=finding_api.next_page()\n ebay_calls+=1\n #\n \n except ConnectionError as e:\n print(e)\n print(e.response.dict())\n \n\ndef get_all_items(category,aspect_filter,query_type='findItemsByCategory',stop_if_known=False):\n global finding_api,pickle_files,db_cursor,api_error,log_file\n try:\n api_error=False\n result_data=[]\n query=dict()\n query['sortOrder']='StartTimeNewest' \n query['categoryId']=category\n if(len(aspect_filter)>0):\n query['aspectFilter']=aspect_filter\n response=finding_api.execute(query_type,query)\n result=response.dict()\n current_page=int(result['paginationOutput']['pageNumber'])\n num_pages=int(result['paginationOutput']['totalPages'])\n f=open(log_file,'a')\n f.write('get_all_items: pages='+str(result['paginationOutput']['totalPages']))\n f.write('\\n')\n f.close()\n #while(current_page<=1):\n object_known=False\n while(current_page<=num_pages and not object_known):\n f=open(log_file,'a')\n f.write(str(result['paginationOutput']))\n f.write('\\n')\n f.close()\n for item in result['searchResult']['item']:\n if(stop_if_known):\n query=db_cursor.mogrify('SELECT * FROM itemData where itemId=%s;',(int(item['itemId']),))\n db_cursor.execute(query)\n if(db_cursor.rowcount>0):\n f=open(log_file,'a')\n f.write('found known object\\n')\n f.close()\n object_known=True\n break\n result_data.append(item['itemId'])\n if(current_page<num_pages):\n finding_api.next_page()\n response=finding_api.response\n result=response.dict()\n num_pages=min(num_pages,int(result['paginationOutput']['totalPages']))\n current_page=int(result['paginationOutput']['pageNumber'])\n else:\n current_page+=1 \n except (ConnectionError,Timeout) as e:\n print(e)\n api_error=True\n result_data=None\n return result_data\n \n\ndef get_all_items_from_db(category):\n global db_cursor\n \n query=db_cursor.mogrify('SELECT itemId FROM itemids where categoryId=%s;',(category,))\n #items=\n db_cursor.execute(query)\n items=db_cursor.fetchall()\n items=[i[0] for i in items]\n return items\n \n\ndef get_active_items_from_db(category):\n db_cursor\n query=db_cursor.mogrify('SELECT itemId FROM itemids where categoryId=%s;',(category,))\n db_cursor.execute(query)\n items=db_cursor.fetchall()\n active_items=[]\n missing_states=0\n #items=[i[0] for i in items]\n for item in items:\n query=db_cursor.mogrify('SELECT item_status,time_timestamp FROM itemstate where itemId=%s order by time_timestamp desc limit 1;',(item[0],))\n db_cursor.execute(query)\n itemstate=db_cursor.fetchone()\n if(db_cursor.rowcount==0):\n missing_states+=1\n #print 'No state data for item '+str(item[0])\n else:\n if(itemstate[0] == 'Active'):\n active_items.append((item[0],itemstate[1]))\n f=open(log_file,'a')\n f.write('nr of missing states: '+str(missing_states))\n f.write('\\n')\n f.close()\n return active_items\n\ndef add_items(items,category):\n global db_connection,db_cursor,log_file\n new_data=[]\n try:\n for item in items:\n query=db_cursor.mogrify('SELECT * FROM itemids where itemId=%s;',(item,))\n db_cursor.execute(query)\n if(db_cursor.rowcount==0):\n query=db_cursor.mogrify('Insert into itemids(itemId,categoryID) values(%s,%s);',(item,category))\n db_cursor.execute(query)\n #db_connection.commit()\n new_data.append(item)\n except psycopg2.Error as e:\n f=open(log_file,'a')\n f.write(str(e))\n f.write('\\n')\n f.close()\n db_connection.commit()\n return new_data\n #catch \n #str=\"SELECT * FROM itemids WHERE itemId = %s\", (0,)\n #print str\n \n #cur.execute(\"SELECT * FROM itemids WHERE itemId = %s\", (0,))\n #all=cur.fetchall()\n #print(all)\n\n\n#Item.ItemSpecifics!!!\n\ndef add_state(items,timestamp):\n global db_cursor,db_connection\n attribute_ids=['itemID','time_timestamp','time_end',\n 'price_current','hit_count', 'bid_count','item_status','seller_rating','seller_score','seller_feedback','seller_top'\n ]\n item_data_names=['ItemID',{'name' :'dummy','NA':timestamp},'EndTime',['CurrentPrice',['value',-1.0]],\n #['sellingStatus',['bidCount',0]],['sellingStatus',['sellingState','None']]]\n 'HitCount','BidCount','ListingStatus',\n ['Seller','FeedbackRatingStar'],['Seller',['FeedbackScore',-1]],['Seller','PositiveFeedbackPercent'],['Seller',['TopRatedSeller','false']]]\n \n \n for item in items:\n attribute_string,attribute_indices,attribute_values=generate_query_strings(attribute_ids,item_data_names,item)\n query=db_cursor.mogrify('Insert into itemState'+attribute_string+' VALUES '+attribute_indices,tuple(attribute_values))\n db_cursor.execute(query)\n db_connection.commit()\n \n\ndef generate_query_strings(query_ids,item_names,item):\n attribute_values=[]\n #print item\n #print item_names\n for item_data in item_names:\n if(type(item_data) is str): # value is dict entry on top level of item dict\n attribute_values.append(item[item_data]) \n elif(type(item_data) is int): # value is not in item dict, just insert value in db\n attribute_values.append(item_data)\n elif(type(item_data) is dict):\n if(item_data['name'] in item):\n attribute_values.append(item[item_data['name']])\n else:\n attribute_values.append(item_data['NA'])\n elif(type(item_data) is list): # value is a nested entry of item dict\n item_dict=item\n for element in range(len(item_data)-1):\n if(item_data[element] in item_dict):\n item_dict=item_dict[item_data[element]]\n #else:\n # print item_data\n # print item\n # print 'here '+item['itemId']\n last_element=item_data[-1]\n value=None\n if(type(last_element) is str): # last value of nested entry is a string\n value=item_dict[last_element]\n else:\n if(last_element[0] in item_dict): # does last dict in nested entry contain the index (or is the name in a list)?\n if(type(item_dict) is dict): # is last entry a dict?\n value=item_dict[last_element[0]]\n else: # otherwise the list contains the value?\n value=1\n else:\n if(type(item_dict) is dict): # dict does not contain the key, use predefined value instead\n value=last_element[1]\n else: # list does not contain value\n value=0\n attribute_values.append(value)\n \n attribute_string='('+','.join(query_ids)+')'\n attribute_indices='('+','.join(['%s']*len(query_ids))+')'\n return attribute_string,attribute_indices,attribute_values\n\ndef add_data_details(item,timestamp,update=False):\n global db_cursor,db_connection\n #attribute_ids\n attribute_ids=['itemID','return_policy','seller_storeowner','seller_business','pictures','start_price','has_reserve','reserve_met','buyitnow_price',\n 'buyer_feedback','buyer_score','shipping_service','insurance','item_description','time_timestamp']\n\n item_data_names=['ItemID',['ReturnPolicy',['ReturnsAcceptedOption','NA']],['Seller','SellerInfo',['StoreOwner','NA']],['Seller','SellerInfo',['SellerBusinessType','NA']],\n ['PictureDetails',['PictureURL','NA']],['StartPrice',['value',-1]],['ListingDetails',['HasReservePrice','NA']],['SellingStatus',['ReserveMet','NA']],\n ['ListingDetails','ConvertedBuyItNowPrice',['value',-1]],['SellingStatus','HighBidder',['PositiveFeedbackPercent',-1]],['SellingStatus','HighBidder',['FeedbackScore',-1]],\n ['ShippingDetails','ShippingServiceOptions',['ShippingService','NA']],['ShippingDetails','InsuranceDetails',['InsuranceOption','NA']],\n 'Description',['InsuranceDetails',['timestamp',timestamp]]]\n \n query=db_cursor.mogrify('SELECT * FROM itemDataDetails where itemId=%s;',(item['ItemID'],))\n db_cursor.execute(query)\n if(db_cursor.rowcount==0|update):\n attribute_string,attribute_indices,attribute_values=generate_query_strings(attribute_ids,item_data_names,item)\n if(attribute_values[13] is not None):\n attribute_values[13]=attribute_values[13].replace('\\n','')\n if(len(attribute_values[13])>25000):\n attribute_values[13]=attribute_values[13][0:25000]\n if(update):\n query=db_cursor.mogrify('update itemDataDetails set item_description=%s where itemid=%s',tuple([attribute_values[i] for i in[13,0]]))\n else:\n query=db_cursor.mogrify('Insert into itemDataDetails'+attribute_string+' VALUES '+attribute_indices,tuple(attribute_values))\n #print query\n #print len(attribute_values[13][0:20000])\n db_cursor.execute(query)\n db_connection.commit()\n\ndef add_data(items,timestamp):\n global db_cursor,db_connection\n attribute_ids=['itemID','title','condition_id','condition_name',\n 'listing_type','listing_bestoffer','listing_buyitnow','listing_buyitnow_price','listing_current_price',\n 'shipping_cost','shipping_type',\n 'payment_transfer','payment_pickup','payment_delivery','payment_paypal','payment_insured',\n 'url_item','url_gallery',\n 'time_timestamp','time_start','time_end','seller_id']\n #===========================================================================\n # item_data_names=['ItemID','Title',\n # ['condition','conditionId'],['condition','conditionDisplayName'],\n # ['listingInfo','listingType'],['listingInfo','bestOfferEnabled'],['listingInfo','buyItNowAvailable'],['listingInfo','buyItNowPrice',['value',-1.0]],['sellingStatus','currentPrice',['value',-1.0]],\n # ['shippingInfo','shippingServiceCost',['value',-1.0]],['shippingInfo','shippingType'],\n # ['paymentMethod',['MoneyXferAccepted',0]],['paymentMethod',['CashOnPickup',0]],['paymentMethod',['COD',0]],['paymentMethod',['PayPal',0]],0,\n # 'viewItemURL','galleryURL',\n # ['listingInfo',['timestamp',timestamp]],['listingInfo','startTime'],['listingInfo','endTime']]\n #===========================================================================\n item_data_names=['ItemID','Title',\n 'ConditionID','ConditionDisplayName',\n 'ListingType','BestOfferEnabled',{'name' :'BuyItNowAvailable','NA':'NA'},['buyItNowPrice',['value',-1.0]],['CurrentPrice',['value',-1.0]],\n ['ShippingCostSummary','ShippingServiceCost',['value',-1.0]],['ShippingCostSummary','ShippingType'],\n ['PaymentMethods',['MoneyXferAccepted',0]],['PaymentMethods',['CashOnPickup',0]],['PaymentMethods',['COD',0]],['PaymentMethods',['PayPal',0]],0,\n 'ViewItemURLForNaturalSearch',{'name' :'GalleryURL','NA':'NA'},\n ['listingInfo',['timestamp',timestamp]],'StartTime','EndTime',['Seller','UserID']]\n \n #print item\n for item in items:\n query=db_cursor.mogrify('SELECT * FROM itemData where itemId=%s;',(item['ItemID'],))\n db_cursor.execute(query)\n if(db_cursor.rowcount==0):\n attribute_string,attribute_indices,attribute_values=generate_query_strings(attribute_ids,item_data_names,item)\n query=db_cursor.mogrify('Insert into itemData'+attribute_string+' VALUES '+attribute_indices,tuple(attribute_values))\n db_cursor.execute(query)\n db_connection.commit()\n \n\ndef add_item_attributes(item,category,attributes):\n global category_attributes,aspect_filter,db_connection,db_cursor,log_file\n item_id=int(item['ItemID'])\n attribute_names=[attribute['aspectName'] for attribute in attributes]\n attribute_values=[]\n for aspect in category_attributes:\n if(aspect in attribute_names):\n #if(len(attributes[attribute_names.index(aspect)]['aspectValueName'])>64):\n # print attribute_names.index(aspect)\n # print attributes[attribute_names.index(aspect)]['aspectValueName']\n #print attributes[attribute_names.index(aspect)]['aspectValueName']\n value=attributes[attribute_names.index(aspect)]['aspectValueName']\n if(type(value) is list):\n value=','.join(value)\n if(len(value)>256):\n value=value[:255]\n attribute_values.append(value)\n else:\n attribute_values.append('')\n try:\n query=db_cursor.mogrify('SELECT * FROM itemAttributes where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n if(db_cursor.rowcount==0):\n if(len(aspect_filter)>0):\n aspect_names=[aspect['aspectName']+':|'+aspect['aspectValueName']+'|' for aspect in aspect_filter]\n aspect_names=','.join(aspect_names)\n else:\n aspect_names=''\n attribute_type_id=get_attribute_type_id_from_db(category,category_attributes)\n attribute_ids=['itemID','attributeTypeID']+['attributeValue_'+str(x) for x in range(1,17)]+['query']\n attribute_string='('+','.join(attribute_ids)+')'\n attribute_indices='('+','.join(['%s' for x in range(19)])+')'\n if(len(attribute_values)<16):\n attribute_values+=['']* (16-len(attribute_values))\n attribute_values=[item_id,attribute_type_id]+attribute_values+[aspect_names]\n \n query=db_cursor.mogrify('Insert into itemAttributes'+attribute_string+' VALUES '+attribute_indices,tuple(attribute_values))\n #print query\n db_cursor.execute(query)\n except psycopg2.Error as e:\n f=open(log_file,'a')\n f.write(str(e))\n f.write('\\n')\n f.close() \n\ndef get_item_states(category):\n a=2\n\ndef load_pickle_items(category):\n global db_connection\n counter=0\n filename='items_'+str(category)+'_'+str(counter)+'_0.pkl'\n while(os.path.isfile(filename)):\n #while(counter<1):\n print counter\n pkl_file=open(filename, 'r')\n result=pickle.load(pkl_file)\n pkl_file.close()\n #print result\n for item in result['searchResult']['item']:\n item_exists=add_items(int(item['itemId']),category)\n if(not item_exists):\n add_item_attributes(item,category,result['aspectFilter'])\n #add_item_data(item,result['timestamp'])\n #add_item_state(item,result['timestamp'])\n #if(item_exists):\n # print 'item already there'\n #print item['itemId']\n \n #print result\n counter+=1\n filename='items_'+str(category)+'_'+str(counter)+'_0.pkl'\n db_connection.commit()\n\n\n\ndef get_item_details(filename,update=False):\n global trading_api,api_error,db_cursor,log_file\n \n item_ids=pd.read_csv(filename)\n \n #item_ids=item_ids[0:10]\n #[291241712806,301305736929]\n \n max_item_size=2000\n current_item=0\n api_error=False\n for (index,item) in item_ids.iterrows():\n api_error=False\n itemID=item['x']\n query=db_cursor.mogrify('SELECT * FROM itemDataDetails where itemId=%s;',(itemID,))\n if(current_item>max_item_size):\n return\n #print query\n db_cursor.execute(query)\n if(db_cursor.rowcount==0 or update):\n f=open(log_file,'a')\n f.write(str(current_item)+' '+str(itemID)+' '+time.strftime(\"%H_%M_%S\"))\n #f.write('\\n')\n f.close()\n query=dict()\n query['DetailLevel']='ItemReturnDescription'\n query['itemID']=itemID\n try:\n response=trading_api.execute('GetItem',query)\n except (ConnectionError,Timeout) as e:\n f=open(log_file,'a')\n f.write(e['response'])\n f.write('\\n')\n f.close()\n api_error=True\n except(EbayConnectionError) as e:\n response=e\n #get_invalid_items(item_slice)\n #print response\n #print response[0]\n #print type(response[0])\n f=open(log_file,'a')\n f.write(' '+str(e[0].encode('utf8')))\n f.close()\n api_error=True\n except(EbayConnectionError) as e:\n print 'here2'\n #get_invalid_items(item_slice)\n f=open(log_file,'a')\n f.write(' '+str(e[0].encode('utf8')))\n f.write('\\n')\n f.close()\n api_error=True\n if(api_error):\n f=open(log_file,'a')\n f.write('\\n')\n continue\n result=response.dict()\n f=open(log_file,'a')\n f.write(' ok')\n f.write('\\n')\n #print result\n add_data_details(result['Item'],result['Timestamp'],update)\n current_item=current_item+1\n \n\ndef get_multiple_items(item_ids,category,filename=None):\n global shopping_api,api_error,log_file\n \n slice_begin=0\n slice_end=20\n N=len(item_ids)\n api_error=False\n while(slice_begin<N):\n f=open(log_file,'a')\n f.write(str(slice_begin))\n f.write('\\n')\n f.close()\n item_slice=item_ids[slice_begin:slice_end]\n query=dict()\n query['sortOrder']='StartTimeNewest'\n query['itemID']=item_slice\n query['IncludeSelector']='Details,ItemSpecifics,ShippingCosts'\n slice_begin=slice_end\n slice_end+=20\n try:\n response=shopping_api.execute('GetMultipleItems',query)\n except (ConnectionError,Timeout) as e:\n f=open(log_file,'a')\n f.write(str(e))\n f.write('\\n')\n f.close()\n api_error=True\n except(EbayConnectionError) as e:\n get_invalid_items(item_slice)\n continue\n result=response.dict()\n #print result\n \n data=result['Item']\n if(type(data) is dict):\n data=[data]\n add_attributes(data,category)\n #print 'adding data'\n add_data(data,result['Timestamp'])\n #print 'adding state'\n add_state(data,result['Timestamp'])\n \n if(filename is not None):\n output = open(filename+'_'+time.strftime(\"%d_%m_%Y_%H_%M_%S\")+str(slice_begin)+'.pkl', 'w')\n pickle.dump({'timestamp':result['Timestamp'],'items':result['Item']},output,-1)\n output.close()\n \n \n #print(e.response.dict())\n\ndef remove_invalid_item(item_id):\n global db_connection,db_cursor\n \n query=db_cursor.mogrify('delete FROM itemAttributes where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n query=db_cursor.mogrify('delete FROM itemData where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n query=db_cursor.mogrify('delete FROM itemIds where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n query=db_cursor.mogrify('delete FROM itemState where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n db_connection.commit()\n\ndef get_item_history(item):\n global trading_api\n item=321516040735\n query=dict()\n #query['itemID']=item\n response=None\n try:\n response=trading_api.execute('GetAllBidders',query)\n except (ConnectionError,Timeout) as e:\n print(e)\n except(EbayConnectionError) as e:\n a=2\n print(response)\n \n \n \n\ndef get_invalid_items(items):\n global finding_api\n \n query=dict()\n for item in items:\n query['itemID']=item\n try:\n shopping_api.execute('GetSingleItem',query)\n except (ConnectionError,Timeout) as e:\n f=open(log_file,'a')\n f.write(str(e))\n f.write('\\n')\n f.close()\n except(EbayConnectionError) as e:\n #print e.message\n if 'nicht vorhandene Artikelnummer' in e.message:\n f=open(log_file,'a')\n f.write('removing invalid item '+str(item))\n f.write('\\n')\n f.close()\n remove_invalid_item(item)\n\ndef add_attributes(items,category):\n global db_connection\n for item in items:\n attributeList=[]\n if('ItemSpecifics' in item):\n attributeList=item['ItemSpecifics']['NameValueList']\n \n if(type(attributeList) is dict):\n attributeList=[attributeList]\n attributes=[{'aspectName':attribute['Name'],'aspectValueName':attribute['Value']} for attribute in attributeList]\n add_item_attributes(item,category,attributes)\n db_connection.commit()\n\ndef get_items_by_category(category,category_aspects,category_order,query_type='findItemsByCategory'):\n global finding_api\n try:\n \n keys=category_aspects.keys()\n for c in category_order:\n aspects=category_aspects[keys[c]]\n print keys[c]\n for a in range(len(aspects)):\n print aspects[a]\n response=finding_api.execute(query_type,{'outputSelector':'AspectHistogram','sortOrder':'StartTimeNewest','categoryId':category,'aspectFilter':{'aspectName':keys[c],'aspectValueName':aspects[a]}})\n aspects=response.dict()['aspectHistogramContainer']['aspect']\n print(aspects)\n result=response.dict()\n print(result['paginationOutput'])\n print(result['itemSearchURL']) \n \n \n #=======================================================================\n # print(category_aspects.keys()[1])\n # print(category_aspects[category_aspects.keys()[1]][2])\n # \n # response=finding_api.execute(query_type,{'sortOrder':'StartTimeNewest','categoryId':category,'aspectFilter':{'aspectName':category_aspects.keys()[1],'aspectValueName':category_aspects[category_aspects.keys()[1]][2]}})\n # \n # result=response.dict()\n # print(result['paginationOutput'])\n # print(result['itemSearchURL'])\n # #sprint(result['searchResult'])\n # for i in range(len(result['searchResult']['item'])):\n # print(result['searchResult']['item'][i]['title'])\n # print(result['searchResult']['item'][i]['viewItemURL'])\n # #print(result['searchResult']['item'][i]['productId'])\n # \n #=======================================================================\n #finding_api.next_page()\n #response=finding_api.response\n #print(response.dict()['paginationOutput'])\n #response = finding_api.response_dict()\n #print 'here2'\n #print(finding_api.response_dict())\n #r=response.dict()\n \n #output = open('items_by_category.pkl', 'w')\n #pickle.dump(r,output,-1)\n #output.close()\n #r2=r['searchResult']\n #print(response)\n except ConnectionError as e:\n print(e)\n print(e.response.dict())\n \n\n\n\n #api = FindItem(config_file='ebay.yaml')\n #records = api.find_items_by_ids([121429757727])\n \n #print(records)\n \n #api = Connection(config_file='ebay.yaml', debug=False)\n #api = Connection(appid='ColinBau-c01c-4e3d-85cf-380674b047ac')\n #response = api.execute('findItemsAdvanced', {'keywords': 'legos'})\n \n #api = shopping(config_file='ebay.yaml',siteid=77)\n #response = api.execute('GetSingleItem', {'ItemID': '121429757727'})\n api = trading(config_file='ebay.yaml',siteid=77)\n response = api.execute('GetItem', {'ItemID': '111453544170'})\n print(response.dict())\n\n#===============================================================================\n# assert(response.reply.ack == 'Success')\n# assert(type(response.reply.timestamp) == datetime.datetime)\n# assert(type(response.reply.searchResult.item) == list)\n# \n# item = response.reply.searchResult.item[0]\n# assert(type(item.listingInfo.endTime) == datetime.datetime)\n# assert(type(response.dict()) == dict)\n# assert(type(response.dom() == _Element))\n#===============================================================================\n\ncomputer=58058\nnotebooks=175672\napple_notebooks=111422\npc_notebooks=175672\ntablets=171485\nhandys=15032\nhandys_ohne_vertrag=9355\nebay_calls=0\n\nerror=\"u'GetMultipleItems: Class: RequestError, Severity: Error, Code: 10.12, Ung\\xfcltige Artikelnummer.Ung\\xfcltige bzw. nicht vorhandene Artikelnummer.'\"\n\ninvalid_smartphones=[321527057063L, 261593630199L, 111460989198L, 331323569421L, 251649899348L, 251649897329L, 161426101663L, 261597891063L, 281443288620L, 361054528129L, 151414690690L, 161424692577L, 291241506297L, 171464449456L, 191336639227L, 231337364295L, 321527318914L, 111466612226L, 171465894456L, 331323670525L]\n\n\nvalid_attributes={\n tablets:['Marke','Produktlinie',u'Speicherkapazit\\xe4t','Betriebssystem','Farbe'],\n apple_notebooks:['Herstellergarantie', 'Betriebssystem','Marke',u'Bildschirmgr\\xf6\\xdfe', 'Prozessortyp', 'Arbeitsspeicher', u'Festplattenkapazit\\xe4t', 'Prozessorgeschwindigkeit', 'Produktfamilie', 'Erscheinungsjahr','Herstellernummer'],\n #handys_ohne_vertrag:['Marke','Modell',u'Speicherkapazit\\xe4t','Farbe','Verbindung','Produktpakete','Vertragslaufzeit']\n handys_ohne_vertrag:['Brand','Model','Memory','Color','Connection','Produktpakete','Vertragslaufzeit']\n}\n\n\ndef get_items_by_seller(category,seller):\n global finding_api\n \n query=dict()\n item_filter=[{'name':'SoldItemsOnly','value':'true'}]\n query['ItemFilter']=item_filter\n #query\n query['categoryId']=category\n #query['categoryId']=category\n \n response=finding_api.execute('findCompletedItems',query)\n print response.dict()\n\n# run once in a while...\ndef get_missing_data(category):\n global api_error,log_file\n \n all_items=get_all_items_from_db(category)\n unknown_items=get_incomplete_data(all_items)\n f=open(log_file,'a')\n f.write('there are '+str(len(unknown_items))+' missing values')\n f.write('\\n')\n f.close()\n api_error=True\n while(api_error):\n get_multiple_items(unknown_items,category)\n \n\ndef get_incomplete_data(item_ids):\n incomplete_items=[]\n for item_id in item_ids:\n incomplete=False\n query=db_cursor.mogrify('SELECT * FROM itemAttributes where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n if(db_cursor.rowcount==0):\n incomplete=True\n query=db_cursor.mogrify('SELECT * FROM itemData where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n if(db_cursor.rowcount==0):\n incomplete=True\n query=db_cursor.mogrify('SELECT * FROM itemIds where itemId=%s;',(item_id,))\n db_cursor.execute(query)\n if(db_cursor.rowcount==0):\n incomplete=True\n if(incomplete):\n incomplete_items.append(item_id)\n return incomplete_items\n\n\ndef get_past_items(category,aspect_filter):\n \n item_ids=get_all_items(category,aspect_filter,query_type='findCompletedItems')\n output = open('itemids_completed_'+str(category)+'_'+time.strftime(\"%d_%m_%Y\")+'.pkl', 'w')\n pickle.dump(item_ids,output,-1)\n output.close()\n #output = open('itemids_completed_9355_18_09_2014.pkl', 'r')\n #item_ids=pickle.load(output)\n print 'past items: nr items '+str(len(item_ids))\n \n unknown_items=get_incomplete_data(item_ids)\n \n print 'past items: nr new items '+str(len(unknown_items))\n add_items(unknown_items,category)\n \n #new_items=item_ids\n \n get_multiple_items(unknown_items,category)\n\ndef remove_items(items):\n global db_cursor\n try:\n for item in items:\n query=db_cursor.mogrify('DELETE FROM itemids where itemId=%s;',(item,))\n db_cursor.execute(query)\n query=db_cursor.mogrify('DELETE FROM itemattributes where itemId=%s;',(item,))\n db_cursor.execute(query)\n query=db_cursor.mogrify('DELETE FROM itemdata where itemId=%s;',(item,))\n db_cursor.execute(query)\n query=db_cursor.mogrify('DELETE FROM itemstate where itemId=%s;',(item,))\n db_cursor.execute(query)\n except psycopg2.Error as e:\n print e\n db_connection.commit()\n \ndef remove_items_pickle(filename):\n pickle_file = open(filename, 'r')\n items=pickle.load(pickle_file)\n print 'Removing '+str(len(items))+' items'\n pickle_file.close()\n remove_items(items)\n\ndef get_items_pickle(filename):\n pickle_file = open(filename, 'r')\n item_ids=pickle.load(pickle_file)\n pickle_file.close()\n return item_ids\n\ndef get_data_pickle(filename):\n \n items={'timestamp':None,'items':list()}\n all_item_ids=[]\n counter=0\n \n root, dirs, files=os.walk('.')\n files = [ fi for fi in files if fi.startswith(\"filename\") ]\n print files\n for f in files:\n if(not os.path.isfile(filename)):\n break\n pkl_file=open(f, 'r')\n item_data=pickle.load(pkl_file)\n pkl_file.close()\n items['timestamp']=item_data['timestamp']\n item_ids=[item['ItemID'] for item in item_data['items']]\n all_item_ids+=item_ids\n items['items'].append(item_data['items'])\n return all_item_ids,items\n\n\ndef load_items():\n a=2\n\ndef get_new_items(category,aspect_filter,stop_if_known=True,id_file=None,data_file=None):\n global api_error,log_file\n if(id_file is not None):\n item_ids=get_items_pickle(id_file)\n else:\n api_error=True\n while(api_error):\n item_ids=get_all_items(category,aspect_filter,query_type='findItemsByCategory',stop_if_known=stop_if_known)\n \n f=open(log_file,'a')\n f.write('available items: '+str(len(item_ids)))\n f.write('\\n')\n f.close()\n unknown_items=get_incomplete_data(item_ids)\n add_items(unknown_items,category)\n f=open(log_file,'a')\n f.write('new items: '+str(len(unknown_items)))\n f.write('\\n')\n f.close()\n api_error=True\n while(api_error):\n get_multiple_items(unknown_items,category)\n\ndef update_states(category,time_offset=2):\n global log_file\n \n f=open(log_file,'a')\n f.write('update states')\n f.write('\\n')\n f.close()\n active_items=get_active_items_from_db(category)\n today=datetime.datetime.now()\n outdated_items=[]\n for item in active_items:\n time_delta=today-item[1]\n if(time_delta.days>0 or (time_delta.seconds/3600.)>time_offset):\n outdated_items.append(item[0])\n f=open(log_file,'a')\n f.write('active items: '+str(len(active_items)))\n f.write('\\n')\n f.write('outdated items: '+str(len(outdated_items)))\n f.write('\\n')\n f.close()\n get_multiple_items(outdated_items,category)\n\ndef get_product_details(category_id):\n global shopping_api\n \n response = shopping_api.execute('FindProducts', {'CategoryID': category_id})\n print(response.dict()) \n\nif __name__ == '__main__':\n \n cat=int(sys.argv[1])\n #cat=5\n trading_api=None\n finding_api=None\n shopping_api=None\n db_connection=None\n db_cursor=None\n log_file='logs/log_'+str(cat)+'_'+time.strftime(\"%d_%m_%Y_%H_%M_%S\")+'.csv'\n connect_db()\n \n connect_api()\n \n if(cat==4):\n get_item_details('items.csv')\n disconnect_db()\n if(api_error):\n sys.exit(1)\n sys.exit()\n if(cat==5):\n get_item_details('item_details.csv',update=True)\n disconnect_db()\n if(api_error):\n sys.exit(1)\n sys.exit()\n category_aspect_dict={}\n #for category in [apple_notebooks]:\n categories= [apple_notebooks,tablets,handys_ohne_vertrag]\n category=categories[cat]\n category_attributes=valid_attributes[category]\n api_error=True\n while(api_error):\n update_states(category)\n \n if(category==apple_notebooks):\n aspect_filter={}\n get_missing_data(apple_notebooks)\n get_new_items(category,aspect_filter)\n \n elif(category==handys_ohne_vertrag):\n category_aspects={'Modell':['iPhone 6 Plus','iPhone 6','iPhone 5c','iPhone 5s','iPhone 5','Samsung Galaxy S 4','Samsung Galaxy S 5']}\n for m in category_aspects['Modell']:\n aspects={'Modell':m}\n aspect_filter=get_aspect_filter(aspects)\n f=open(log_file,'a')\n f.write(str(aspect_filter))\n f.write('\\n')\n f.close()\n get_missing_data(apple_notebooks)\n get_new_items(category,aspect_filter)\n \n elif(category==tablets):\n category_aspects={'Produktlinie':['Galaxy Tab','iPad 1. Generation','iPad 2','iPad 3. Generation','iPad 4. Generation','iPad Air','iPad mini','iPad mini mit Retina Display']}\n #category_aspects={'Produktlinie':['iPad 2','iPad 3. Generation','iPad 4. Generation','iPad Air','iPad mini']}\n for m in category_aspects['Produktlinie']:\n aspects={'Produktlinie':m}\n aspect_filter=get_aspect_filter(aspects)\n f=open(log_file,'a')\n f.write(str(aspect_filter))\n f.write('\\n')\n f.close()\n get_missing_data(apple_notebooks)\n get_new_items(category,aspect_filter) \n \n disconnect_db()\n \n" }, { "alpha_fraction": 0.44117647409439087, "alphanum_fraction": 0.44117647409439087, "avg_line_length": 16, "blob_id": "d0620b880033916da62e08f15d6ad4fb936d650f", "content_id": "5fde86e31bbfb1ac95edc738b567751fe8c73497", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/README.md", "repo_name": "kolynos/price_prediction", "src_encoding": "UTF-8", "text": "price_prediction\n================\n" }, { "alpha_fraction": 0.6344936490058899, "alphanum_fraction": 0.6413502097129822, "avg_line_length": 27.096296310424805, "blob_id": "3224c6b8b81deacf7ab7eddc2d9d24822cafb007", "content_id": "1a99d2c7974ef422fca91829a923d8d05e88a6e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3792, "license_type": "no_license", "max_line_length": 96, "num_lines": 135, "path": "/ebayPredictor.py", "repo_name": "kolynos/price_prediction", "src_encoding": "UTF-8", "text": "'''\nCreated on 18.09.2014\n\n@author: colinos\n'''\nimport psycopg2 as pg\nimport pandas.io.sql as psql\nimport numpy as np\nimport pandas as pd\nimport re\nimport sys\nfrom time import sleep\n\ndef connect_db():\n global db_connection\n db_connection = pg.connect(\"dbname=ebayPredictor user=postgres password=quejodes\")\n\ndef disconnect_db():\n global db_connection,db_cursor\n db_connection.close()\n db_connection=None \n\n\ndef get_all_items(category):\n global db_connection\n query=\"SELECT itemId FROM itemids where categoryid={}\".format(category)\n print query\n return psql.read_sql(query, db_connection)\n \ndef get_all_attributes():\n global db_connection\n query=\"SELECT * FROM itemAttributes\"\n return psql.read_sql(query, db_connection)\n\ndef get_all_states():\n global db_connection\n query=\"SELECT * FROM itemState\"\n return psql.read_sql(query, db_connection)\n\ndef get_item_attributes(category):\n global db_connection\n \n items=get_all_items(category)\n attributes=get_all_attributes()\n \n attributes=pd.merge(left=items,right=attributes,left_on='itemid',right_on='itemid')\n \n #print items.head()\n attributes.to_csv('attributes.csv',encoding='utf-8')\n\ndef dump_tables():\n global db_connection\n query=\"SELECT * FROM itemIds\"\n item_ids=psql.read_sql(query, db_connection)\n item_ids.to_csv('item_ids.csv',encoding='utf-8',index=False,sep='|')\n query=\"SELECT * FROM itemAttributeIds\"\n item_attribute_ids=psql.read_sql(query, db_connection)\n item_attribute_ids.to_csv('item_attribute_ids.csv',encoding='utf-8',index=False,sep='|')\n query=\"SELECT * FROM itemState\"\n item_state=psql.read_sql(query, db_connection)\n item_state.to_csv('item_state.csv',encoding='utf-8',index=False,sep='|')\n query=\"SELECT * FROM itemAttributes\"\n item_attributes=psql.read_sql(query, db_connection)\n item_attributes.to_csv('item_attributes.csv',encoding='utf-8',index=False,sep='|')\n query=\"SELECT * FROM itemData\"\n item_data=psql.read_sql(query, db_connection)\n item_data.to_csv('item_data.csv',encoding='utf-8',index=False,sep='|')\n query=\"SELECT * FROM itemDataDetails\"\n item_data=psql.read_sql(query, db_connection)\n item_data.to_csv('item_data_details.csv',encoding='utf-8',index=False,sep='|')\n \ndef remove_backslashes():\n query=\"SELECT * FROM itemData\"\n item_data=psql.read_sql(query, db_connection)\n db_cursor = db_connection.cursor()\n pattern=re.compile('[\\W_]+', re.UNICODE)\n \n for (index,row) in item_data.iterrows():\n #print row['title']\n if('\\\\' in row['title']):\n title=row['title']\n new_title=title.replace('\\\\','')\n print title\n #for word in row['title']:\n # \n # if(re.match(pattern, word)):\n # print word\n \n query='UPDATE itemData set title=\\'{}\\' where itemid={}'.format(new_title,row['itemid'])\n db_cursor.execute(query)\n \n db_connection.commit()\n #for item in item_data\n\ndef get_item_states(category):\n global db_connection\n \n items=get_all_items(category)\n states=get_all_states()\n \n states=pd.merge(left=items,right=states,left_on='itemid',right_on='itemid')\n \n #print items.head()\n states.to_csv('states.csv',encoding='utf-8')\n\n\ndef write_test():\n for i in range(10):\n print i\n sleep(1)\n \n\ndef exception_test():\n global error\n error=False\n a=[1,2,3]\n try:\n b=a[5]\n except IndexError as e:\n print e\n error=True\n return error\nif __name__ == '__main__':\n error=None\n db_connection=None\n connect_db()\n func = globals().get(sys.argv[1])\n #if func is None:\n # _usage_and_exit()\n\n func(*sys.argv[2:])\n \n disconnect_db()\n \n pass" } ]
4
m-amoit/ds
https://github.com/m-amoit/ds
bc0ba3a921fd4e31bd5eb63d326e01cc59c463eb
d8eef09f0fea1dab7c35e979be063427e7f1aefc
95d0cff60899d4b36aad0070c93905b252d5251d
refs/heads/master
2021-01-01T05:09:26.056492
2016-05-11T10:04:53
2016-05-11T10:04:53
58,534,745
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6212121248245239, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 11.730769157409668, "blob_id": "ca68b2f28b96fa64661d981e149593b02c286e19", "content_id": "7505fbb4b8e21e239dcb8fa25761a3d6e2f9f587", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 40, "num_lines": 26, "path": "/practice_tuples.py", "repo_name": "m-amoit/ds", "src_encoding": "UTF-8", "text": "fhand = open('romeo.txt')\ncounts = dict()\nfor line in fhand:\n\twords = line.split()\n\tfor word in words:\n\t\tcounts[word] = counts.get(word, 0) + 1\n\t\nprint counts\n\n# Sort dictionary by value\n\nlst = list()\nfor k, v in counts.items():\n\tlst.append( (v, k))\nprint ''\nlst.sort(reverse=True)\n\nfor k, v in lst[:10]:\n\tprint v, k\n\n\n\n\n\n\ninput()" }, { "alpha_fraction": 0.6743515729904175, "alphanum_fraction": 0.6743515729904175, "avg_line_length": 30.454545974731445, "blob_id": "9afb62b031dcc52be599a201f1e029047600b929", "content_id": "c6997d31f7f239ad05bd3d1bb601bb5a367b6d9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 694, "license_type": "no_license", "max_line_length": 71, "num_lines": 22, "path": "/8.4_list_methods.py", "repo_name": "m-amoit/ds", "src_encoding": "UTF-8", "text": "'''\nOpen the file romeo.txt and read it line by line. For each line, split \nthe line into a list of words using the split() method. The program \nshould build a list of words. For each word on each line check to see \nif the word is already in the list and if not append it to the list. \nWhen the program completes, sort and print the resulting words in \nalphabetical order. You can download the sample data at \n\n\thttp://www.pythonlearn.com/code/romeo.txt\n'''\n\nfname = raw_input(\"Enter file name: \")\nfh = open(fname)\nlst = list()\nfor line in fh:\n line = line.split()\n for word in line:\n if word not in lst:\n lst.append(word)\n lst.sort()\n \nprint lst\n\n\n" }, { "alpha_fraction": 0.4430379867553711, "alphanum_fraction": 0.5063291192054749, "avg_line_length": 15, "blob_id": "80f9974a3ffbc1b996af91ccdfe86a3159325fe2", "content_id": "6b52eae7c33f9f5e3c968ac69dc3f176a8008938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 43, "num_lines": 5, "path": "/new_shortened.py", "repo_name": "m-amoit/ds", "src_encoding": "UTF-8", "text": "c={'a':10, 'b':1, 'c':22}\nprint sorted([(v,k) for k, v in c.items()])\n\n\ninput()" }, { "alpha_fraction": 0.6923800706863403, "alphanum_fraction": 0.6980244517326355, "avg_line_length": 31.90625, "blob_id": "160bcc4a1532a204334d69f534c27f802a141629", "content_id": "04876310f21459944de3bdf7c9e2f82872a9b096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 61, "num_lines": 32, "path": "/9.4_dict_highest_freq.py", "repo_name": "m-amoit/ds", "src_encoding": "UTF-8", "text": "'''\n9.4 Write a program to read through the mbox-short.txt and \nfigure out who has the sent the greatest number of mail \nmessages. The program looks for 'From ' lines and takes the \nsecond word of those lines as the person who sent the mail. \nThe program creates a Python dictionary that maps the \nsender's mail address to a count of the number of times they \nappear in the file. After the dictionary is produced, the \nprogram reads through the dictionary using a maximum loop \nto find the most prolific committer.\n'''\n\nname = raw_input(\"Enter file: \")\nif len(name) < 1 : name = \"mbox-short.txt\"\nhandle = open(name)\nsenders=[]\ncounts = dict()\nfor line in handle:\n line = line.rstrip()\n if line.startswith('From '):\n words = line.split()\n senders.append(words[1])\nfor sender in senders:\n counts[sender] = counts.get(sender, 0) + 1\nbigcount = None\nbigsender = None\nfor sender, count in counts.items():\n if bigcount is None or count>bigcount:\n bigsender = sender\n bigcount = count\n \nprint bigsender, bigcount\n \n\n" }, { "alpha_fraction": 0.6938271522521973, "alphanum_fraction": 0.7061728239059448, "avg_line_length": 31.360000610351562, "blob_id": "3becab075cd44ddebbdecbf07cc91288623b0a89", "content_id": "ec662a85e488050e03b92b908c1f366ca9589796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "no_license", "max_line_length": 73, "num_lines": 25, "path": "/7.2_extract_count_and_average.py", "repo_name": "m-amoit/ds", "src_encoding": "UTF-8", "text": "'''\nWrite a program that prompts for a file name, then opens that file \nand reads through the file, looking for lines of the form:\n\tX-DSPAM-Confidence: 0.8475\nCount these lines and extract the floating point values from each of \nthe lines and compute the average of those values and produce an output \nas shown below. Do not use the sum() function or a variable named sum in \nyour solution.\n\n Use the file name mbox-short.txt as the file name\n'''\n\nfname = raw_input(\"Enter file name: \")\nfh = open(fname)\ncount = 0\ntotal = 0\nfor line in fh:\n if line.startswith(\"X-DSPAM-Confidence:\"):\n count +=1 # Count number of line\n num = line[20: ] #Extract the floating point\n total += float(num) #caculate total\n avg = total / count #calculate average\nprint count\nprint total\nprint avg\n\n" } ]
5
Cepesp-Fgv/tse-dados
https://github.com/Cepesp-Fgv/tse-dados
1bd463e22352af074a6d5c22aea0101f8678ae2f
74f91e871cf697d8c8a3aa84cd426bda08251558
f5cfb81654f63dd8cd0428ebfaae22f5c310896c
refs/heads/master
2023-01-08T07:37:40.704491
2020-08-27T22:49:58
2020-08-27T22:49:58
89,556,429
33
9
null
2017-04-27T04:41:54
2022-10-16T01:43:19
2023-01-04T06:20:38
Python
[ { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5898617506027222, "avg_line_length": 23.22222137451172, "blob_id": "5b74b0448fb86856861417ec3f9a1be0478f262f", "content_id": "5d676bdb8a870608344dc0aa426b3afd14eeccde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/etl/fixes/BemCandidatoSiglaUEFix.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "class BemCandidatoSiglaUEFix:\n\n def check(self, item):\n return item['database'] == 'bem_candidato' and item[\"year\"] < 2014\n\n def apply(self, df):\n df['SIGLA_UE'] = df['SIGLA_UF']\n\n return df" }, { "alpha_fraction": 0.6623634696006775, "alphanum_fraction": 0.6623634696006775, "avg_line_length": 21.377777099609375, "blob_id": "07056dc96b591a642a0bfba8b370d7fe176d7943", "content_id": "90960533d2348b5f5f572dbad5e2305f5339860a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1007, "license_type": "no_license", "max_line_length": 112, "num_lines": 45, "path": "/web/cepesp/database.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from peewee import MySQLDatabase, Model, TextField, CharField, DateTimeField, AutoField\n\nfrom web.cepesp.config import DB_DATABASE, DB_USERNAME, DB_PASSWORD, DB_HOST, DB_PORT\n\ndatabase_client = MySQLDatabase(DB_DATABASE, user=DB_USERNAME, password=DB_PASSWORD, host=DB_HOST, port=DB_PORT)\n\n\nclass BaseModel(Model):\n class Meta:\n database = database_client\n\n\nclass CacheEntry(BaseModel):\n class Meta:\n table_name = 'cache_entries'\n\n id = AutoField()\n sql = TextField()\n name = CharField()\n env = CharField()\n last_status = CharField()\n athena_id = CharField()\n created_at = DateTimeField()\n\n\ndef migrate():\n open_connection()\n database_client.create_tables([CacheEntry])\n close_connection()\n\n\ndef open_connection():\n try:\n database_client.connect(reuse_if_open=True)\n except Exception as e:\n print(e)\n pass\n\n\ndef close_connection():\n try:\n database_client.close()\n except Exception as e:\n print(e)\n pass\n" }, { "alpha_fraction": 0.7104557752609253, "alphanum_fraction": 0.7211796045303345, "avg_line_length": 22.3125, "blob_id": "4176681f7e001e97b73c4c7bdba38b544167ffd7", "content_id": "3a9c9a4fc448d03287496baf71fe1fe30a0479ce", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Markdown", "length_bytes": 759, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/.github/ISSUE_TEMPLATE/bug_report.md", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "---\nname: Reportar bug\nabout: Crie um bug report\ntitle: 'Descrição clara e concisa do problema'\nlabels: 'bug'\nassignees: ''\n\n---\n\n**Passos para reproduzir**\nPassos para reproduzir o problema:\n1. Navegue para '...'\n2. Clique em '....'\n3. Dê scroll até '....'\n4. Digite 'abc' em '...'\n5. Veja o erro\n\n**Comportamento esperado**\nUma descrição clara e concisa do que você esperava que acontecesse.\n\n**Comportamento observado**\nUma descrição clara e concisa do que realmente aconteceu.\n\n**Screenshots**\nSe aplicável, adicione screenshots para melhor explicar o problema.\n\n**Informações do dispositivo:**\n - Dispositivo: [e.g. iPhone6]\n - Versão do SO: [e.g. iOS8.1]\n\n**Contexto adicional**\nAdicione mais qualquer outro contexto sobre o problema aqui.\n" }, { "alpha_fraction": 0.539973795413971, "alphanum_fraction": 0.5406290888786316, "avg_line_length": 28.921567916870117, "blob_id": "469721a753f6b6751e32fe2bfee0ef8430a81616", "content_id": "67f7fc0d1e8341df9b1055bda40034d1fe8686ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1526, "license_type": "no_license", "max_line_length": 85, "num_lines": 51, "path": "/web/cepesp/athena/builders/party_affiliations.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.athena.builders.base import AthenaBuilder\nfrom web.cepesp.columns.filiados import PartyAffiliationsColumnsSelector\n\n\nclass PartyAffiliationsQueryBuilder(AthenaBuilder):\n\n def __init__(self, **options):\n super().__init__(**options)\n self.selector = PartyAffiliationsColumnsSelector()\n\n def build(self):\n columns_renamed = \", \".join([f\"{c} AS {c}\" for c in self.selected_columns()])\n\n return f'''\n SELECT {columns_renamed}\n FROM filiados\n WHERE {self._build_filter_party()} AND {self._build_filter_uf()}\n {self._build_filters('AND')}\n {self._build_order_by()}\n '''\n\n def _build_filter_uf(self):\n uf = self.opt('uf_filter')\n if uf:\n return f\"p_uf = '{uf}'\"\n else:\n return \"\"\n\n def _build_filter_party(self):\n party = self.opt('party')\n if party:\n party = party.lower().replace(' ', '_')\n return f\"p_partido = '{party}'\"\n else:\n return \"\"\n\n # region def _build_filters(self, start): [...]\n def _build_filters(self, start):\n where = self._build_base_filters()\n\n if self.opt('mun_filter'):\n where.append(f\"COD_MUN_TSE = '{self.options['mun_filter']}'\")\n\n if self.opt('turno'):\n where.append(f\"NUM_TURNO = '{self.options['turno']}'\")\n\n if len(where) > 0:\n return f\"{start} \" + \"\\n AND \".join(where)\n else:\n return \"\"\n # endregion\n" }, { "alpha_fraction": 0.5569019913673401, "alphanum_fraction": 0.559536337852478, "avg_line_length": 30.11475372314453, "blob_id": "532de32998e320b8cb33ffb7d549d09de5177bab", "content_id": "ab373ef7635120f8be0c5db99e9171731c43a294", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1898, "license_type": "no_license", "max_line_length": 102, "num_lines": 61, "path": "/etl/process/FixProcess.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import os\nfrom _csv import QUOTE_ALL\n\nimport inflection as inflection\nimport pandas as pd\n\n\nclass FixProcess:\n\n def __init__(self, fixes, output):\n self.fixes = fixes\n self.output = output\n self.items = []\n\n def fix_name(self, i):\n return inflection.underscore(self.fixes[i].__class__.__name__)\n\n def get_item_file(self, item, i):\n if 1 <= i < len(self.fixes):\n path = os.path.join(self.output, self.fix_name(i), item['name'])\n if not os.path.exists(path):\n return self.get_item_file(item, i - 1)\n else:\n return path\n else:\n return item['path']\n\n def get_item_df(self, item, i):\n path = self.get_item_file(item, i)\n return pd.read_csv(path, sep=';', dtype=str)\n\n def save_fix_df(self, item, df, i):\n path = os.path.join(self.output, self.fix_name(i), item['name'])\n directory = os.path.dirname(path)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n df.to_csv(path, compression='gzip', sep=';', encoding='utf-8', index=False, quoting=QUOTE_ALL)\n\n def apply_fix(self, item, i):\n if not self.fix_done(item, i) and self.fixes[i].check(item):\n print(\"STEP %s on %s\" % (self.fix_name(i), item['name']))\n\n df = self.get_item_df(item, i - 1)\n df = self.fixes[i].apply(df)\n self.save_fix_df(item, df, i)\n\n def output_files(self):\n files = []\n for item in self.items:\n files.append(self.get_item_file(item, len(self.fixes) - 1))\n return files\n\n def handle(self, item):\n self.items.append(item)\n for i in range(len(self.fixes)):\n self.apply_fix(item, i)\n\n def fix_done(self, item, i):\n path = os.path.join(self.output, self.fix_name(i), item['name'])\n return os.path.exists(path)\n" }, { "alpha_fraction": 0.5145998001098633, "alphanum_fraction": 0.5183785557746887, "avg_line_length": 28.704082489013672, "blob_id": "6b5c264ccc9e83473dcfe0aa9b36b32f23eefece", "content_id": "fe8aa5fc10633b0b77ccec89096d873134662ca9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2911, "license_type": "no_license", "max_line_length": 108, "num_lines": 98, "path": "/web/cepesp/athena/builders/base.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.utils.request import escape\nfrom web.cepesp.utils.data import guess_match_type\nfrom web.cepesp.athena.builders.utils import opt, arg\n\n\nclass AthenaBuilder:\n int_columns = ['ID_CANDIDATO', 'ID_LEGENDA', 'v.ID_CANDIDATO', 'v.ID_LEGENDA']\n\n def __init__(self, **options):\n self.options = options\n self.selector = None\n\n def opt(self, key, default=None):\n return opt(self.options, key, default)\n\n def arg(self, key):\n return arg(self.options, key)\n\n # region Options\n def selected_columns(self):\n selected = self.opt('selected_columns', [])\n all_columns = self.columns()\n if len(selected) == 0:\n return all_columns\n else:\n return [c for c in selected if c in all_columns]\n\n def table_name(self, prefix):\n reg = self.arg('reg')\n if reg == 9:\n return f\"{prefix}_votsec\"\n elif reg == 8:\n return f\"{prefix}_zona\"\n elif reg == 7:\n return f\"{prefix}_munzona\"\n elif reg == 6:\n return f\"{prefix}_mun\"\n elif reg == 5:\n return f\"{prefix}_micro\"\n elif reg == 4:\n return f\"{prefix}_meso\"\n else:\n return f\"{prefix}_uf\"\n\n def columns(self):\n return self.selector.columns()\n\n # endregion\n\n # region Custom Statement Builders\n def _map_column(self, column):\n return column\n\n def _build_filter_job(self):\n job = self.arg('job')\n if job == 7:\n return \"(p_cargo = '7' OR p_cargo = '8')\"\n else:\n return f\"(p_cargo = '{job}')\"\n\n def _build_order_by(self):\n selected = self.selected_columns()\n order_by = [f\"{self._map_column(c)} ASC\" for c in self.selector.order_by_columns() if c in selected]\n\n if len(order_by) > 0:\n order_by = \", \".join(order_by)\n return f\"ORDER BY {order_by}\"\n else:\n return \"\"\n\n def _build_base_filters(self):\n filters = self.opt('filters', {})\n columns = self.columns()\n\n where = []\n for column, value in filters.items():\n if value and column in columns:\n match_type = guess_match_type(value)\n column = self._map_column(column)\n value = escape(value)\n\n if column in self.int_columns:\n where.append(f\"{column} = {value}\")\n if match_type == \"int\":\n where.append(f\"{column} = '{value}'\")\n elif match_type == \"list\":\n value = \"', '\".join(value)\n where.append(f\"{column} IN ('{value}')\")\n else:\n value = str(value).lower()\n where.append(f\"REGEXP_LIKE(LOWER({column}), '{value}')\")\n\n return where\n\n # endregion\n\n def build(self):\n raise NotImplemented()\n" }, { "alpha_fraction": 0.5808363556861877, "alphanum_fraction": 0.6025683283805847, "avg_line_length": 24.96581268310547, "blob_id": "5d3968104572139d6813d14328ddcc7db7811267", "content_id": "c7934b1260f1021d5eeeb3156ab052fda6a40a0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3037, "license_type": "no_license", "max_line_length": 105, "num_lines": 117, "path": "/web/resources/js/cepesp.js", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import axios from 'axios';\n\nasync function getQuery(params) {\n let response = await axios.get('/api/consulta/athena/query', {\n params,\n paramsSerializer: (p) => $.param(p),\n });\n\n return {\n id: response.data.id,\n sql: response.data.sql,\n name: response.data.name,\n table: params.table,\n start: params.start,\n length: params.length\n };\n}\n\nasync function getStatus(query_id) {\n let params = {id: query_id};\n let response = await axios.get('/api/consulta/athena/status', {params});\n let {status, message} = response.data;\n\n return [status, message];\n}\n\nasync function getResult(query_id, start, length, format) {\n let params = {\n id: query_id,\n start: start,\n length: length,\n format: format || 'json',\n ignore_version: true\n };\n let response = await axios.get('/api/consulta/athena/result', {params});\n\n return response.data;\n}\n\nasync function getColumns(params) {\n let response = await axios.get('/api/consulta/athena/columns', {\n params,\n paramsSerializer: (p) => $.param(p),\n });\n\n return {\n 'columns': response.data.columns || [],\n 'translated_columns': response.data.translated_columns || {},\n 'default_columns': response.data.default_columns || [],\n 'descriptions': response.data.descriptions || {},\n }\n}\n\n\nasync function runQuery(params, onStatusUpdateCallback, sleepDelay) {\n let info = await startQuery(params, onStatusUpdateCallback, sleepDelay);\n let results = await getResult(info.id, info.start, info.length, params.format);\n return {info, results};\n}\n\n\nasync function startQuery(params, onStatusUpdateCallback, sleepDelay) {\n let info = await getQuery(params);\n let status = \"QUEUED\";\n let message = \"\";\n let sleep = sleepDelay || 1000;\n let total = 0;\n\n while (status === \"RUNNING\" || status === \"QUEUED\") {\n if (onStatusUpdateCallback) onStatusUpdateCallback(status, message, total);\n\n await wait(sleep); total += sleep;\n\n let [newStatus, newMessage] = await getStatus(info.id);\n status = newStatus;\n message = newMessage;\n }\n\n if (onStatusUpdateCallback) onStatusUpdateCallback(status, message, total);\n\n return info;\n}\n\n\nasync function lambdaQuery(params) {\n let response = await axios.get('https://api.cepespdata.io/api/query/result', {\n params,\n paramsSerializer: (p) => $.param(p),\n });\n\n return response.data;\n}\n\n\nfunction getYears(job) {\n switch (parseInt(job)) {\n case 1:\n case 2:\n case 3:\n case 4:\n case 5:\n case 6:\n case 7:\n case 8:\n case 9:\n case 10:\n return [2018, 2014, 2010, 2006, 2002, 1998];\n case 11:\n case 12:\n case 13:\n return [2016, 2012, 2008, 2004, 2000];\n default:\n return [];\n }\n}\n\nexport default {getQuery, getStatus, getResult, getYears, getColumns, runQuery, startQuery, lambdaQuery};" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 23, "blob_id": "15ea020affdee031e4b8e68cabe9a4ad54a76f59", "content_id": "b6a37b91aefc4785e6d2dc52767995c1e0ea2eca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 92, "num_lines": 10, "path": "/web/tests/run.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.tests.responses import test_repeated_macro, test_duplicated_votes, test_response_ok\n\n\ndef test():\n #test_repeated_macro.test()\n #test_duplicated_votes.test()\n test_response_ok.test()\n\nif __name__ == \"__main__\":\n test()\n" }, { "alpha_fraction": 0.7633587718009949, "alphanum_fraction": 0.7633587718009949, "avg_line_length": 31.75, "blob_id": "6427065fcc75e244a454189327d504b9df6aeb35", "content_id": "128d81806b1a6cba1aa7a5a53c3d2e6500787083", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Markdown", "length_bytes": 676, "license_type": "no_license", "max_line_length": 95, "num_lines": 20, "path": "/.github/ISSUE_TEMPLATE/feature_request.md", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "---\nname: Nova Feature\nabout: Descreva uma ideia para esse projeto\ntitle: 'Descrição clara e concisa da feature'\nlabels: ''\nassignees: ''\n\n---\n\n**A sua feature é relacionada a um problema? Descreva-o**\nUma descrição clara e concisa do que é o problema. Ex.: Eu sempre fico frustrado quando [...]\n\n**Descreva a solução que você gostaria**\nUma descrição clara e concisa do que você quer que aconteça.\n\n**Descreva alternativas que você considerou**\nUma descrição clara e concisa de quaisquer outras soluções ou alternativas que você considerou.\n\n**Contexto adicional**\nVocê pode adicionar mais contexto e / ou screenshots sobre a feature aqui, se aplicável.\n" }, { "alpha_fraction": 0.5467328429222107, "alphanum_fraction": 0.5500413775444031, "avg_line_length": 26.477272033691406, "blob_id": "37b376cfc305b9f9cb11d16543761b8043e086ee", "content_id": "33f3d657a04243dc206b7c78e1306def09a5616f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 103, "num_lines": 44, "path": "/etl/process/CoalitionsProcess.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nfrom etl.process.DimensionProcess import DimensionsProcess\n\n\nclass CoalitionsProcess(DimensionsProcess):\n columns = [\n \"ID_LEGENDA\",\n \"ANO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"COMPOSICAO_COLIGACAO\",\n \"DATA_GERACAO\",\n \"DESCRICAO_CARGO\",\n \"DESCRICAO_ELEICAO\",\n \"DESCRICAO_UE\",\n \"HORA_GERACAO\",\n \"NOME_COLIGACAO\",\n \"NOME_PARTIDO\",\n \"NUMERO_PARTIDO\",\n \"NUM_TURNO\",\n \"SEQUENCIA_COLIGACAO\",\n \"SIGLA_COLIGACAO\",\n \"SIGLA_PARTIDO\",\n \"SIGLA_UE\",\n \"SIGLA_UF\",\n \"TIPO_LEGENDA\"\n ]\n\n def get_id_column(self):\n return \"ID_LEGENDA\"\n\n def check(self, item):\n return item['database'] == \"legendas\"\n\n def get_columns(self):\n return self.columns\n\n def get_brancos_df(self, item, job):\n return pd.DataFrame([\n {'ANO_ELEICAO': str(item['year']), 'CODIGO_CARGO': str(job), 'NOME_PARTIDO': 'VOTO BRANCO',\n 'NUMERO_PARTIDO': '95'},\n {'ANO_ELEICAO': str(item['year']), 'CODIGO_CARGO': str(job), 'NOME_PARTIDO': 'VOTO NULO',\n 'NUMERO_PARTIDO': '96'}\n ], columns=self.columns).fillna('#NE#')\n" }, { "alpha_fraction": 0.5456033945083618, "alphanum_fraction": 0.5760056376457214, "avg_line_length": 26.58709716796875, "blob_id": "f689531622b412e8841dbfe744255965c6d4d715", "content_id": "8dbbd41cd4f5732a6b0b358c199b589a6b075087", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4276, "license_type": "no_license", "max_line_length": 116, "num_lines": 155, "path": "/web/cepesp/routes/queries.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from flask import session, request, render_template\n\nfrom web.cepesp.athena.options import AthenaQueryOptions\nfrom web.cepesp.utils.data import get_years\nfrom web.cepesp.utils.mun import get_uf_list, get_mun_list, get_nomes_secretarios_list\nfrom web.cepesp.utils.session import get_locale\n\n\ndef consulta_tse_2():\n session['back'] = request.path\n options = AthenaQueryOptions('tse')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"tse2.html\",\n options=options,\n page=2,\n show=show,\n mode=mode,\n years=[2018, 2016, 2014, 2012, 2010, 2008, 2006, 2004, 2002, 2000, 1998],\n uf_list=get_uf_list(),\n mun_list=get_mun_list(),\n lang=get_locale()\n )\n\n\ndef consulta_tse():\n session['back'] = request.path\n options = AthenaQueryOptions('tse')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"tse.html\",\n options=options,\n page=2,\n show=show,\n mode=mode,\n years=get_years(options.job),\n uf_list=get_uf_list(),\n mun_list=get_mun_list(),\n lang=get_locale()\n )\n\n\ndef consulta_candidatos():\n session['back'] = request.path\n options = AthenaQueryOptions('candidatos')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"candidatos.html\",\n options=options,\n page=3,\n show=show,\n mode=mode,\n years=get_years(options.job),\n uf_list=get_uf_list(),\n lang=get_locale()\n )\n\n\ndef consulta_legendas():\n session['back'] = request.path\n options = AthenaQueryOptions('legendas')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"legendas.html\",\n options=options,\n page=4,\n show=show,\n mode=mode,\n years=get_years(options.job),\n uf_list=get_uf_list(),\n lang=get_locale()\n )\n\n\ndef consulta_votos():\n session['back'] = request.path\n options = AthenaQueryOptions('votos')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"votos.html\",\n options=options,\n page=5,\n show=show,\n mode=mode,\n years=get_years(options.job),\n uf_list=get_uf_list(),\n lang=get_locale()\n )\n\n\ndef consulta_bem_candidato():\n session['back'] = request.path\n options = AthenaQueryOptions('bem_candidato')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"bem_candidato.html\",\n options=options,\n page=6,\n show=show,\n mode=mode,\n years=[2018, 2016, 2014, 2012, 2010, 2008, 2006],\n uf_list=get_uf_list(),\n lang=get_locale()\n )\n\n\ndef consulta_filiados():\n session['back'] = request.path\n options = AthenaQueryOptions('filiados')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"filiados.html\",\n options=options,\n page=7,\n show=show,\n mode=mode,\n parties=[\"avante\", \"dc\", \"dem\", \"mdb\", \"novo\", \"patri\", \"pc_do_b\", \"pcb\", \"pco\", \"pdt\", \"phs\", \"pmb\", \"pmn\",\n \"pode\", \"pp\", \"ppl\", \"pps\", \"pr\", \"prb\", \"pros\", \"prp\", \"prtb\", \"psb\", \"psc\", \"psd\", \"psdb\", \"psl\",\n \"psol\", \"pstu\", \"pt\", \"ptb\", \"ptc\", \"pv\", \"rede\", \"solidariedade\"],\n uf_list=get_uf_list(),\n lang=get_locale()\n )\n\n\ndef consulta_secretarios():\n session['back'] = request.path\n options = AthenaQueryOptions('secretarios')\n show = len(list(request.args.values())) > 0\n mode = request.args.get('mode', \"athenas\")\n\n return render_template(\n \"secretarios.html\",\n options=options,\n page=8,\n show=show,\n mode=mode,\n names_list=get_nomes_secretarios_list(),\n uf_list=get_uf_list(),\n periods=[\"1998-2002\", \"2002-2006\", \"2006-2010\", \"2010-2014\", \"2014-2018\"],\n lang=get_locale()\n )\n" }, { "alpha_fraction": 0.6554471254348755, "alphanum_fraction": 0.6609756350517273, "avg_line_length": 63.05208206176758, "blob_id": "de78d5e7ac71f230de15b5ccedc81edca1626dc7", "content_id": "81083a2d7137bd97c01e3788588fe22465d2232e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 6212, "license_type": "no_license", "max_line_length": 360, "num_lines": 96, "path": "/bookdown/06-como_usar_a_API_Rest.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# Como usar a API REST\n\nA [API Rest](https://github.com/Cepesp-Fgv/cepesp-rest) é outra forma de acessar os dados do CepespData e pode ser utilizada em diferentes _softwares_ ou programas.\n\nAqui vamos mostrar como fazer a requisição usando apenas o navegador, o _web browser_, e usando o **R**.\n\n## Estrutura das resquisições\n\nA estrutura da consulta no browser é: \n\n cepesp.io/api/consulta/athena/query?table=<TABELA>&<ARGUMENTOS>\n\nO argumento `table` indica qual é a base de dados que se deseja acessar: `tse` (banco **Resultado de eleições por cargo**), `candidatos` (banco **Perfil de candidatos**), `legendas` (**Coligações**), `votos` (banco de **Votos**), `bem_candidato` (**Bens de candidatos**), `secretarios` (**Secretários**) ou `filiados` (**Filiados**).\n\nOs demais argumentos (`<ARGUMENTOS>` acima) possíveis são: \n\n| Argumentos | Bases de dados que suportam este argumento |\n|-------------------------------|-------------------------------------------------|\n| anos | tse, candidatos, legendas, votos, bem_candidato |\n| cargo | tse, candidatos, legendas, votos |\n| agregacao_regional | tse, votos |\n| agregacao_politica | tse |\n| uf_filter | tse, votos, bem_candidato, filiados |\n| mun_filter | tse, votos |\n| only_elected | tse, candidatos |\n| brancos | tse, votos |\n| nulos | tse, votos |\n| name_filter | secretarios |\n| goverment_period | secretarios |\n| party | filiados |\n\nAlém disso, caso as colunas desejadas não estajam na lista de colunas-padrão da consulta de determinado banco, é possível selecionar colunas acrescentando o texto `&c[]=<COLUNA>` ao final do link de requisição, e/ou filtrar colunas, acrescentando o texto `&filters[<COLUNA>]=<VALOR>`. *Atenção: garanta que a coluna a ser filtrada foi devidamente selecionada.*\n\nPor exemplo, para filtrar candidatos(as) à Presidência em 2018 que se declaram pretos(as), utilizamos o seguinte link: \n\n cepesp.io/api/consulta/athena/query?table=candidatos&anos=2014&cargo=1&c[]=ANO_ELEICAO&c[]=NUM_TURNO&c[]=SIGLA_UE&c[]=DESCRICAO_CARGO&c[]=SIGLA_PARTIDO&c[]=NUMERO_CANDIDATO&c[]=CPF_CANDIDATO&c[]=NOME_URNA_CANDIDATO&c[]=DESCRICAO_SEXO&c[]=DESCRICAO_COR_RACA&c[]=DESC_SIT_TOT_TURNO&filters[DESCRICAO_COR_RACA]=PRETA\n \nÉ possível acessar a lista de colunas disponíveis para cada banco de dados no nosso [dicionário de variáveis](http://www.cepespdata.io/static/docs/cepespdata_dicionario_publico.xlsx) ou no [nosso GitHub](https://github.com/Cepesp-Fgv/tse-dados/wiki/Colunas).\n\n### API Rest no navegador\n\nQuando se insere o link conforme a estrutura descrita acima no navegador, a requisição deve ser bem-sucedida. Isso pode ser verificado por meio do aviso `last_status:\"SUCCEEDED\"` que deverá aparecer no canto superior esquerdo tela. \n\nNeste caso, guarde o número do `id` devolvido pela consulta. No caso da consulta do exemplo cima: `id: 7738`.\n\nFinalmente, faça o download da requisição em formato CSV inserindo o link `cepesp.io/api/consulta/athena/result?id=<ID>&ignore_version=true` em seu navegador, mas substituindo o termo `<ID>` pelo número de `id` recuperado na consulta acima. No nosso exemplo: `7738`. Ou seja, neste caso, o link a ser inserido seria:\n\n cepesp.io/api/consulta/athena/result?id=7738&ignore_version=true\n\n### API Rest no R\n\nUtilizar a API Rest no R permite importar dados de forma mais automática e estruturada, diretamente para o ambiente **R**, onde eles podem ser manipulados de acordo com seu interesse. \n\nPara usar a API Rest por meio do R, você vai precisar instalar os pacotes `httr` e `jasonlite`.\n\n```{r, eval=FALSE}\ninstall.packages(\"httr\")\ninstall.packages(\"jsonlite\")\n```\n\nEm seguida, vamos requerir a utilização dos pacotes:\n\n```{r, eval=FALSE}\nrequire(\"httr\")\nrequire(\"jsonlite\")\n```\n\nE depois fazer a requisição utilizando a estrutura descrita acima em **Estrutura das resquisições**. Vamos usar um exemplo:\n\n```{r, eval=FALSE}\n\n# Definindo link da requisição:\nlink <- \"cepesp.io/api/consulta/athena/query?table=candidatos&anos=2014&cargo=1&c[]=ANO_ELEICAO&c[]=NUM_TURNO&c[]=SIGLA_UE&c[]=DESCRICAO_CARGO&c[]=SIGLA_PARTIDO&c[]=NUMERO_CANDIDATO&c[]=CPF_CANDIDATO&c[]=NOME_URNA_CANDIDATO&c[]=DESCRICAO_SEXO&c[]=DESCRICAO_COR_RACA&c[]=DESC_SIT_TOT_TURNO&filters[DESCRICAO_COR_RACA]=PRETA\"\n\n# Fazendo requisição:\ncall <- httr::GET(\"cepesp.io/api/consulta/athena/query?table=candidatos&anos=2014&cargo=1&c[]=ANO_ELEICAO&c[]=NUM_TURNO&c[]=SIGLA_UE&c[]=DESCRICAO_CARGO&c[]=SIGLA_PARTIDO&c[]=NUMERO_CANDIDATO&c[]=CPF_CANDIDATO&c[]=NOME_URNA_CANDIDATO&c[]=DESCRICAO_SEXO&c[]=DESCRICAO_COR_RACA&c[]=DESC_SIT_TOT_TURNO&filters[DESCRICAO_COR_RACA]=PRETA\")\n\n# Transformando a lista em texto:\ncall_text <- httr::content(call, 'text')\n\n# Abrindo a nossa lista JSON:\ncall_json <- fromJSON(call_text, flatten = TRUE) \nView(call_json) # Aqui conseguimos acessar o id que contém a nossa requisição, que pode ser acessado no:\ncall_json$id\n\n# Assim vamos importar o banco desejado inserindo o nosso id na requisição do resultado da consulta:\nrequis <- httr::GET(paste0('cepesp.io/api/consulta/athena/result?id=',call_json$id,'&ignore_version=true'))\n\n# Transformando a requisição em formato de banco de dados:\nrequis_df <- httr::content(requis, 'parsed')\n\n```\n\nAssim, o objeto final `requis_df` deve conter o banco de dados desejado, que pode ser manipulado dentro do seu ambiente R e salvo conforme interesse. Veja as seções 3 e 4 deste tutorial para mais detalhes sobre a utilização do R.\n\nPara mais detalhes sobre a API Rest do CepespData/FGV, consulte nossa página no [GitHub](https://github.com/Cepesp-Fgv/cepesp-rest/).\n\n" }, { "alpha_fraction": 0.7855311036109924, "alphanum_fraction": 0.7879889011383057, "avg_line_length": 93.52525329589844, "blob_id": "900e9477359effbd555a3f66e97df68f8d50ca75", "content_id": "460951b44ca23f793516ca5b14509958333540cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 9634, "license_type": "no_license", "max_line_length": 599, "num_lines": 99, "path": "/bookdown/01-como_usar_o_site.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# Como usar o site\n\nO site do [Cepespdata/FGV](cepespdata.io) permite acessar manualmente, em uma interface gráfica e amigável, os dados eleitorais. \n\nÉ possível selecionar colunas, agregar por diferentes níveis regionais (de seção eleitoral a Brasil) ou de agregação política (candidato, partido, coligação) e filtrar os dados de acordo com o interesse.\n\nA seguir, apresentamos os bancos de dados hoje disponíveis no [cepespdata.io](cepespdata.io) e os que serão disponibilizados em breve, e as principais ferramentas para manipulação dos dados no próprio site.\n\n## Bancos de dados disponíveis no site do Cepespdata\n\n### Resultados de eleições por cargo\n\nEste é o banco mais completo do Cepespdata/FGV. Nele estão mesclados os bancos de **Votos**, **Perfil dos candidatos** e **Coligações** do TSE, em um longo trabalho para a criação de identificador único de candidatos, partidos e coligações que permitissem unir os bancos sem perder ou multiplicar votos. Assim contém todas as colunas desses três bancos de dados, conforme descrito nos três tópicos a seguir.\n\nAcesse aqui os [Resultados de eleições por cargo](http://cepespdata.io/consulta/tse).\n\n### Perfil dos candidatos\n\nO banco [Perfil dos candidatos](http://cepespdata.io/consulta/candidatos) contém dados referentes à identificação do candidato (Nome, número de do título eleitoral, CPF, e-mail, Nome de urna), ao perfil etário (idade e data de nascimento), perfil de gênero (sexo), étnico-racial, de escolaridade (grau de instrução) e ocupacional (profissão), à filiação partidária e ao local de de nascimento. Veja a lista completa das variáveis no [Dicionário de Variáveis do Cepespdata/FGV](http://cepespdata.io/static/docs/cepespdata_dicionario_publico.xlsx).\n\n### Coligações\n\n[A consulta às coligações](http://cepespdata.io/consulta/legendas) permite relacionar cada partido à coligação de que participou, bem como à composição dessa coligação, ou seja, quais partidos a compunham.\n\n### Votos\n\n[A consulta aos votos](http://cepespdata.io/consulta/votos) liga o número do candidato à quantidade de votos. Não é possível ver outros dados pessoais do candidato, como nome ou nome de urna neste banco. Para isso, acesse a consulta mais completa: [Resultados de eleições por cargo](http://cepespdata.io/consulta/tse).\n\n### Resumo das eleições\n\nO [Resumo da eleição](http://cepespdata.io/consulta/tse?agregacao_politica=4) é uma consulta parecida com os [Resultados de eleições por cargo](http://cepespdata.io/consulta/tse), porém mostra algumas medidas síntese da eleição, como quantidade de comparecimentos e de abstenções, de votos nominais e de legenda, quantidade de votos nulos e brancos. Está disponível para todos os cargos e para as diferentes agregações regionais.\n\n### Bens de Candidatos\n\nO banco de [bens de candidatos](http://cepespdata.io/consulta/bem_candidato) permite acessar todos os bens declarados pelos candidatos ao TSE de 2006 a 2018, isto é, descrição do bem, tipo de bem, valor declarado etc. \n\nÉ necessário escolher o(s) ano(s) desejado(s) para realizar a consulta. Ainda não é possível buscar diretamente por CPF ou Título de Eleitor. Assim, antes de consultar, é bom ter em mãos os anos no qual um candidato concorreu. \n\nO filtro por UF é opcional. Neste caso, tenha em mãos também o estado no qual determinado candidato concorreu.\n\nÉ possível filtrar o cargo desejado. Para encontrar candidatos que concorreram a Presidente, por exemplo, basta filtrar diretamente a coluna SIGLA_UE, inserindo no campo de busca o texto `BR`, ou, na coluna DESCRICAO_CARGO, o texto `PRESIDENTE`.\n\n### Filiados\n\n[O banco de filiados](http://cepespdata.io/consulta/filiados) contém a lista completa de filiados por Partido e Unidade Federativa (UF). É possível identificar o filiado (pelo nome e número de eleitor -- neste banco chamado de \"número de inscrição\"), localizar o filiado até o nível da Seção Eleitoral em que o filiado está registrado, município (inclusive com o código do IBGE) etc., e saber as datas de filiação, desfiliação, regularização da filiação e cancelamento. \n\nHá problemas de consistência dos dados, como pessoas filiadas a dois partidos ao mesmo tempo, o que já vem do banco original do TSE. A equipe Cepespdata/FGV está trabalhando no aperfeiçoamento deste banco para a correção destas inconsistências.\n\n### Secretários\n\n[O banco de secretários](http://cepespdata.io/consulta/secretarios) contém atualmente dados de secretários estaduais e do Distrito Federal. Estão disponíveis dados de data de entrada e saída do governo, se o secretário era já funcionário público no momento da nomeação, nome da pasta que assumiu, se era filiado a algum partido, e outras informações pessoais como profissão anterior.\n\nEste é o único banco que não estava originalmente disponível no repositório do TSE. Este é um banco original do Cepespdata/FGV, construído a partir da coleta feita por pesquisadores associados e colaboradores da Rede Federativa de Pesquisadores em Diários Oficiais e outras fontes documentais, além de juntar estes com dados disponíveis na Relação Anual de Informações Sociais (RAIS) do Ministério da Economia. Você pode ler mais sobre o projeto [aqui](http://cepespdata.io/about-state-secretaries).\n\n### Bancos a serem disponibilizados\n\nOs bancos de doações de campanha, despesas eleitorais e de diretórios dos partidos serão disponibilizados em breve.\n\n## Ferramentas\n\n### Seleção dos Anos\n\nÉ possível selecionar mais de um ano ao mesmo tempo, pressionando a tecla `Shift` ou `Ctrl`.\n\n### Seleção de colunas\n\nAs consultas no [cepespdata.io](cepespdata.io) permitem selecionar colunas de interesse do(a) usuário(a). Cada banco de dados já vem com algumas colunas-padrão selecionadas, mas não todas. \n\nPor exemplo, a consulta [Resultado de eleições por cargo](http://cepespdata.io/consulta/tse) não vem com as colunas grau de instrução ou estado civil do candidato automaticamente selecionadas. Caso queira acessar tais informações, basta ir até o botão branco e azul **Selecionar colunas** no canto inferior esquerdo dos painel com os parâmetros da consulta, selecionar as colunas de interesse e clicar em **Atualizar tabela** (botão verde no canto inferior direito).\n\n### Filtro de colunas\n\nSelecionadas as colunas de interesse, é possível também filtrar o banco de dados a partir dos atributos de um candidato, de um partido, da região geográfica de interesse etc. Basta escrever o que deseja filtrar no campo abaixo do nome da coluna do atributo a filtrar e clicar no símbolo da lupa. Por exemplo, se desejo filtrar candidatos do PSOL a Deputado Federal em 2018, basta escrever `PSOL` no campo abaixo de SIGLA_PARTIDO.\n\nÉ possível **filtrar mais de uma categoria** de uma mesma coluna. Por exemplo, candidatos negros (isto é, pretos e pardos) a Deputado Federal em 2018 podem ser filtrados no banco [Perfil dos candidatos](http://cepespdata.io/consulta/candidatos) ou [Resultados de eleições por cargo](http://cepespdata.io/consulta/tse) da seguinte forma:\n\n- selecionar o cargo `Deputado Federal` e o ano `2018` \n\n- caso esteja trabalhando com a consulta [Resultados de eleições por cargo](http://cepespdata.io/consulta/tse) e não [Perfil dos candidatos](http://cepespdata.io/consulta/candidatos), selecionar a agregação regional `Brasil` ou `UF`, a Agregação Política `Candidato`\n\n- incluir a coluna DESCRICAO_COR_RACA por meio do botão **Selecionar colunas**\n\n- feita a consulta desta forma, filtrar a coluna DESCRICAO_COR_RACA inserindo o texto `(PRETA|PARDA)` logo abaixo do nome da coluna.\n\n### Agregação Política\n\nEsta função está disponível apenas nas consultas [Resumo da eleição](http://cepespdata.io/consulta/tse?agregacao_politica=4) e [Resultado de eleições por cargo](http://cepespdata.io/consulta/tse). Ela permite que os dados sejam agregados por `Candidato`, `Partido` ou `Coligação`. Assim, é possível, por exemplo, somar os votos de uma determinada coligação (somando os votos nominais recebidos por candidatos dos partidos participantes da coligação e os votos na legenda desses mesmos partidos) em todo o Brasil, em determinado estado ou município, conforme o tópico **Agregação Regional** a seguir.\n\n### Agregação Regional \n\nA agregação regional dos dados está disponível nas consultas [Resumo da eleição](http://cepespdata.io/consulta/tse?agregacao_politica=4), [Votos](http://cepespdata.io/consulta/votos) e [Resultado de eleições por cargo](http://cepespdata.io/consulta/tse). Ela permite agregar os votos de um candidato, partido ou coligação por seção de votação (`Votação Seção`), `Micro Região`, `Macro Região`, zona elitoral (`Zona`), `Município`, estado (`UF`) ou para o Brasil inteiro (`Brasil`).\n\nÉ possível, por exemplo, obter todos os votos do PSL para Deputado Estadual ao selecionar, na consulta **Resultado de eleições por cargo**, Agregação Regional `Brasil` e Agregação Política `Partido` e, em seguida, filtrar a coluna SIGLA_PARTIDO com o texto `PSL`.\n\n### Download dos dados\n\nÉ possível baixar os dados em CSV, já agregados e filtrados para tê-los salvos, ou para trabalhar localmente, em seu próprio computador. Basta clicar no botão verde **CSV** no canto superior direito da tabela, depois que a consulta já estiver pronta e aparecendo na sua tela.\n\nO arquivo CSV pode ser aberto por diversos *softwares* (programas), como MS Excel, R, Python, e editores de texto como Notepad (Windows) ou Gedit (Linux).\n" }, { "alpha_fraction": 0.552312970161438, "alphanum_fraction": 0.5579012632369995, "avg_line_length": 45.02857208251953, "blob_id": "4bdf39a0f5d9d835fd33fef00fdb1fe34dccfe27", "content_id": "c775b6f932541cdd3f853f7d6f5c9460adc24fee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3276, "license_type": "no_license", "max_line_length": 127, "num_lines": 70, "path": "/web/templates/about.pt.html", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block content %}\n\n <div class=\"row mt-4\">\n <div class=\"col-md-12\">\n\n <h1>Sobre</h1>\n\n <p class=\"text-muted\">\n O CepespData é uma plataforma de acesso a dados eleitorais desenvolvida pelo Centro de Política e\n Economia do Setor Público (CEPESP) da Fundação Getulio Vargas (FGV) com apoio da FAPESP ao projeto As\n Instituições Políticas Subnacionais: Um Estudo Comparativo dos Estados Brasileiros (processo:\n 2013/15658-1) e do Tribunal Superior Eleitoral - TSE.\n </p>\n\n <p class=\"text-muted\">\n As bases de dados utilizadas são disponibilizadas pelo TSE em seu repositório de dados eleitorais. O\n tratamento dos arquivos originais está publicamente disponível no GitHub (<a\n href=\"https://github.com/Cepesp-Fgv/tse-dados\" target=\"_blank\">https://github.com/Cepesp-Fgv/tse-dados</a>)\n , que garante a integridade dos mesmos, a transparência e colaboração no processo de aperfeiçoamento das\n bases.\n </p>\n\n <p class=\"text-muted\">\n Pessoas que contribuíram:\n </p>\n\n <ul>\n <!-- <li>Luiz Gabriel</li> -->\n <li>Abraão Lacerda</li>\n <li>Arthur Fisch</li>\n <li>Ciro Biderman</li>\n <li>Eliana Lins Morandi</li>\n <li>Frederico Ramos</li>\n <li>Gabriela Campos</li>\n <li>George Avelino</li>\n <li>Guilherme Russo</li>\n <li>Jairo Pimentel</li>\n <li>Jonathan Phillips</li>\n <li>Lara Mesquita</li>\n <li>Marina Merlo</li>\n <li>Mauricio Izumi</li>\n <li>Natália Salgado Bueno</li>\n <li>Rafael Coelho</li>\n <li>Rebeca Carvalho</li>\n <li>Wesley Seidel</li>\n <li>William Colen</li>\n </ul>\n\n <h3>Nota metodológica</h3>\n\n <p class=\"text-muted\">\n Quanto às bases de resultados eleitorais, optou-se por se trabalhar com a votação por seção eleitoral,\n privilegiando a vontade do eleitor, uma vez que esse é o único arquivo em que o TSE não atualiza o\n resultado conforme decisões judiciais posteriores ao dia da eleição. Esses dados, portanto, são a\n representação mais próxima do que ocorreu nas eleições. Esta opção implica em diferenças entre os\n resultados nas bases do Cepespdata/FGV e análises utilizando os resultados agregados por município e\n zona, pois, neste último caso, o TSE frequentemente atualiza os resultados com base em julgamentos\n posteriores à publicação dos resultados da eleição (por exemplo, com o indeferimento de uma\n candidatura). Um persistente trabalho para reforçar a consistência dos dados garante que as diferenças\n de votos reportada pelas bases do Cepespdata /FGV nunca sejam superior a 1% aos resultados reportados\n pelo TSE, tomando como base a disponibilização dos resultados eleitorais por seção, no respositório de\n dados eleitorais.\n </p>\n\n </div>\n </div>\n\n{% endblock %}" }, { "alpha_fraction": 0.456878125667572, "alphanum_fraction": 0.456878125667572, "avg_line_length": 26.509803771972656, "blob_id": "65061a03b8072aa04cd42fa544a4f711659c4467", "content_id": "7f560f0710d2fbe9b3530bf6a24e99b9b7b31313", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1403, "license_type": "no_license", "max_line_length": 66, "num_lines": 51, "path": "/web/cepesp/columns/bem_candidato.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "class CandidateAssetsColumnsSelector:\n\n def columns(self):\n return [\n # BEM CANDIDATO\n \"DATA_GERACAO\",\n \"HORA_GERACAO\",\n \"ANO_ELEICAO\",\n \"DESCRICAO_ELEICAO\",\n \"SIGLA_UF\",\n \"SIGLA_UE\",\n \"SEQUENCIAL_CANDIDATO\",\n \"CD_TIPO_BEM_CANDIDATO\",\n \"DS_TIPO_BEM_CANDIDATO\",\n \"DETALHE_BEM\",\n \"VALOR_BEM\",\n \"DATA_ULTIMA_ATUALIZACAO\",\n \"HORA_ULTIMA_ATUALIZACAO\",\n \"ID_CANDIDATO\",\n\n # CANDIDATO\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NOME_CANDIDATO\",\n \"NUMERO_CANDIDATO\",\n \"NUMERO_PARTIDO\",\n \"SIGLA_PARTIDO\",\n \"CPF_CANDIDATO\",\n \"NUM_TITULO_ELEITORAL_CANDIDATO\",\n \"COD_SIT_TOT_TURNO\",\n \"DESC_SIT_TOT_TURNO\"\n ]\n\n def visible_columns(self):\n return [\n \"ANO_ELEICAO\",\n \"SIGLA_UF\",\n \"SIGLA_UE\",\n \"DS_TIPO_BEM_CANDIDATO\",\n \"DETALHE_BEM\",\n \"VALOR_BEM\",\n \"DESCRICAO_CARGO\",\n \"NOME_CANDIDATO\",\n \"NUMERO_CANDIDATO\",\n \"SIGLA_PARTIDO\",\n \"CPF_CANDIDATO\",\n \"DESC_SIT_TOT_TURNO\",\n ]\n\n def order_by_columns(self):\n return ['ANO_ELEICAO', 'SIGLA_UF', 'SEQUENCIAL_CANDIDATO']\n" }, { "alpha_fraction": 0.7053942084312439, "alphanum_fraction": 0.7053942084312439, "avg_line_length": 25.77777862548828, "blob_id": "db5d8fae12cd95822890ef17f266583a9391b58f", "content_id": "70b70797cd0757b80df24d048c0a710233a50a55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 60, "num_lines": 9, "path": "/web/cepesp/athena/builders/utils.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "def opt(dictionary, key, default=None):\n return dictionary[key] if key in dictionary else default\n\n\ndef arg(dictionary, key):\n if key not in dictionary:\n raise KeyError(f'No argument {key} supplied')\n\n return dictionary[key]\n" }, { "alpha_fraction": 0.522089421749115, "alphanum_fraction": 0.529206395149231, "avg_line_length": 31.238094329833984, "blob_id": "6ee3e5717e6f81471fdd028c0261218724dfbb7c", "content_id": "5789f498e00dde86c90e92fd439284268b14585d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7447, "license_type": "no_license", "max_line_length": 110, "num_lines": 231, "path": "/etl/crawler/pipelines.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport zipfile\nfrom _csv import QUOTE_ALL\n\nimport pandas as pd\nfrom scrapy.exceptions import DropItem\nfrom scrapy.pipelines.files import FilesPipeline\n\nfrom etl.crawler.items import TSEFileItem\n\n\nclass TSEFilesPipeline(FilesPipeline):\n\n def file_path(self, request, response=None, info=None):\n item = TSEFileItem.create(request.url)\n return item['name']\n\n\nclass ProcessItemPipeline:\n columns = [\n \"DATA_GERACAO\",\n \"HORA_GERACAO\",\n \"ANO_ELEICAO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"SIGLA_UF\",\n \"SIGLA_UE\",\n \"COD_MUN_TSE\",\n \"NOME_MUNICIPIO\",\n \"NUM_ZONA\",\n \"NUM_SECAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ]\n\n rename_2018 = {\n \"DT_GERACAO\": \"DATA_GERACAO\",\n \"HH_GERACAO\": \"HORA_GERACAO\",\n \"DS_ELEICAO\": \"DESCRICAO_ELEICAO\",\n \"NR_TURNO\": \"NUM_TURNO\",\n \"SG_UF\": \"SIGLA_UF\",\n \"SG_UE\": \"SIGLA_UE\",\n \"NM_UE\": \"NOME_UE\",\n \"CD_MUNICIPIO\": \"COD_MUN_TSE\",\n \"NM_MUNICIPIO\": \"NOME_MUNICIPIO\",\n \"NR_ZONA\": \"NUM_ZONA\",\n \"NR_SECAO\": \"NUM_SECAO\",\n \"CD_CARGO\": \"CODIGO_CARGO\",\n \"DS_CARGO\": \"DESCRICAO_CARGO\",\n \"NR_VOTAVEL\": \"NUMERO_CANDIDATO\",\n \"QT_VOTOS\": \"QTDE_VOTOS\",\n \"NM_VOTAVEL\": \"NOME_CANDIDATO\"\n }\n\n bem_candidato = [\n \"DATA_GERACAO\",\n \"HORA_GERACAO\",\n \"ANO_ELEICAO\",\n \"DESCRICAO_ELEICAO\",\n \"SIGLA_UF\",\n \"SEQUENCIAL_CANDIDATO\",\n \"CD_TIPO_BEM_CANDIDATO\",\n \"DS_TIPO_BEM_CANDIDATO\",\n \"DETALHE_BEM\",\n \"VALOR_BEM\",\n \"DATA_ULTIMA_ATUALIZACAO\",\n \"HORA_ULTIMA_ATUALIZACAO\",\n ]\n\n bem_candidato_rename_2018 = {\n \"DT_GERACAO\": \"DATA_GERACAO\",\n \"HH_GERACAO\": \"HORA_GERACAO\",\n \"DS_ELEICAO\": \"DESCRICAO_ELEICAO\",\n \"SG_UF\": \"SIGLA_UF\",\n \"SG_UE\": \"SIGLA_UE\",\n \"NM_UE\": \"NOME_UE\",\n \"VR_BEM_CANDIDATO\": \"VALOR_BEM\",\n \"DS_BEM_CANDIDATO\": \"DETALHE_BEM\",\n \"DT_ULTIMA_ATUALIZACAO\": \"DATA_ULTIMA_ATUALIZACAO\",\n \"HH_ULTIMA_ATUALIZACAO\": \"HORA_ULTIMA_ATUALIZACAO\",\n \"SQ_CANDIDATO\": \"SEQUENCIAL_CANDIDATO\"\n }\n\n detalhe = [\n \"DATA_GERACAO\",\n \"HORA_GERACAO\",\n \"ANO_ELEICAO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"SIGLA_UF\",\n \"SIGLA_UE\",\n \"COD_MUN_TSE\",\n \"NOME_MUNICIPIO\",\n \"NUM_ZONA\",\n \"NUM_SECAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"QTD_APTOS\",\n \"QTD_COMPARECIMENTO\",\n \"QTD_ABSTENCOES\",\n \"QT_VOTOS_NOMINAIS\",\n \"QT_VOTOS_BRANCOS\",\n \"QT_VOTOS_NULOS\",\n \"QT_VOTOS_LEGENDA\",\n \"QT_VOTOS_ANULADOS_APU_SEP\",\n ]\n\n detalhe_2018 = {\n \"DT_GERACAO\": \"DATA_GERACAO\",\n \"HH_GERACAO\": \"HORA_GERACAO\",\n \"DT_ELEICAO\": \"DATA_ELEICAO\",\n \"DS_ELEICAO\": \"DESCRICAO_ELEICAO\",\n \"NR_TURNO\": \"NUM_TURNO\",\n \"SG_UF\": \"SIGLA_UF\",\n \"SG_UE\": \"SIGLA_UE\",\n \"NM_UE\": \"DESCRICAO_UE\",\n \"CD_MUNICIPIO\": \"COD_MUN_TSE\",\n \"NM_MUNICIPIO\": \"NOME_MUNICIPIO\",\n \"NR_ZONA\": \"NUM_ZONA\",\n \"NR_SECAO\": \"NUM_SECAO\",\n \"CD_CARGO\": \"CODIGO_CARGO\",\n \"DS_CARGO\": \"DESCRICAO_CARGO\",\n \"QT_APTOS\": \"QTD_APTOS\",\n \"QT_COMPARECIMENTO\": \"QTD_COMPARECIMENTO\",\n \"QT_ABSTENCOES\": \"QTD_ABSTENCOES\",\n \"QT_VOTOS_PENDENTES\": \"QT_VOTOS_ANULADOS_APU_SEP\",\n }\n\n rename_filiados = {\n \"DATA DA EXTRACAO\": \"DATA_EXTRACAO\",\n \"HORA DA EXTRACAO\": \"HORA_EXTRACAO\",\n \"NUMERO DA INSCRICAO\": \"NUMERO_INSCRICAO\",\n \"NOME DO FILIADO\": \"NOME_FILIADO\",\n \"SIGLA DO PARTIDO\": \"SIGLA_PARTIDO\",\n \"NOME DO PARTIDO\": \"NOME_PARTIDO\",\n \"UF\": \"UF\",\n \"CODIGO DO MUNICIPIO\": \"COD_MUN_TSE\",\n \"NOME DO MUNICIPIO\": \"NOME_MUNICIPIO\",\n \"ZONA ELEITORAL\": \"NUM_ZONA\",\n \"SECAO ELEITORAL\": \"NUM_SECAO\",\n \"DATA DA FILIACAO\": \"DATA_FILIACAO\",\n \"SITUACAO DO REGISTRO\": \"SITUACAO_REGISTRO\",\n \"TIPO DO REGISTRO\": \"TIPO_REGISTRO\",\n \"DATA DO PROCESSAMENTO\": \"DATA_PROCESSAMENTO\",\n \"DATA DA DESFILIACAO\": \"DATA_DESFILIACAO\",\n \"DATA DO CANCELAMENTO\": \"DATA_CANCELAMENTO\",\n \"DATA DA REGULARIZACAO\": \"DATA_REGULARIZACAO\",\n \"MOTIVO DO CANCELAMENTO\": \"MOTIVO_CANCELAMENTO\"\n }\n\n def __init__(self, source, output, years):\n self.source = source\n self.output = output\n self.years = years\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n source=crawler.settings.get('FILES_STORE'),\n output=crawler.settings.get('PROCESSED_STORE'),\n years=crawler.settings.get('YEARS'),\n )\n\n def process_item(self, item, spider):\n if len(item['files']) == 0:\n raise DropItem(\"No file downloaded\")\n\n file_path = os.path.join(self.source, item['files'][0]['path'])\n\n if 'bem_candidato' in item['name']:\n\n if item['year'] in [2014, 2016, 2018]:\n self._extract_files(file_path, rename=self.bem_candidato_rename_2018)\n else:\n self._extract_files(file_path, columns=self.bem_candidato)\n\n elif 'detalhe' in item['name']:\n\n if item['year'] == 2018:\n self._extract_files(file_path, rename=self.detalhe_2018)\n else:\n self._extract_files(file_path, columns=self.detalhe)\n\n elif 'votacao' in item['name']:\n\n if item['year'] == 2018:\n self._extract_files(file_path, rename=self.rename_2018)\n else:\n self._extract_files(file_path, columns=self.columns)\n\n elif 'filiados' in item['name']:\n\n self._extract_files(file_path, rename=self.rename_filiados, filter_contains='filiados_')\n\n else:\n self._extract_files(file_path)\n\n return item\n\n def _extract_files(self, file_path, sep=';', columns=None, rename=None, filter_contains=None):\n with zipfile.ZipFile(file_path) as z:\n for file in z.namelist():\n if (filter_contains is None or filter_contains in file) \\\n and (file.endswith('.txt') or file.endswith('.csv')):\n\n file_new = re.sub(r'(\\.txt|\\.csv)', '.gz', file)\n file_new = file_new.split('/')[-1] # skip inner directories\n output_path = os.path.join(self.output, file_new)\n\n if not os.path.exists(output_path):\n\n directory = os.path.dirname(output_path)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n with z.open(file) as f:\n\n if columns is None or len(columns) == 0:\n df = pd.read_csv(f, sep=sep, dtype=str, encoding='latin1', header=0)\n else:\n df = pd.read_csv(f, sep=sep, dtype=str, encoding='latin1', names=columns)\n\n if rename is not None:\n df.rename(columns=rename, inplace=True)\n\n df.to_csv(output_path, compression='gzip', sep=';', encoding='utf-8', index=False,\n quoting=QUOTE_ALL)\n" }, { "alpha_fraction": 0.5511022210121155, "alphanum_fraction": 0.5517702102661133, "avg_line_length": 25.263158798217773, "blob_id": "ed43fc9bd22b0943e9f9049e95c179c54bfb65fe", "content_id": "9cf3b85617c85d858bacc910f3b29118f39db5de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1497, "license_type": "no_license", "max_line_length": 103, "num_lines": 57, "path": "/etl/process/PartitioningFiliadosProcess.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\nfrom _csv import QUOTE_ALL\n\n\nclass PartitioningFiliadosProcess:\n columns = [\n \"DATA_EXTRACAO\",\n \"HORA_EXTRACAO\",\n \"NUMERO_INSCRICAO\",\n \"NOME_FILIADO\",\n \"SIGLA_PARTIDO\",\n \"NOME_PARTIDO\",\n \"UF\",\n \"COD_MUN_TSE\",\n \"NOME_MUNICIPIO\",\n \"NUM_ZONA\",\n \"NUM_SECAO\",\n \"DATA_FILIACAO\",\n \"SITUACAO_REGISTRO\",\n \"TIPO_REGISTRO\",\n \"DATA_PROCESSAMENTO\",\n \"DATA_DESFILIACAO\",\n \"DATA_CANCELAMENTO\",\n \"DATA_REGULARIZACAO\",\n \"MOTIVO_CANCELAMENTO\"\n ]\n\n def __init__(self, output):\n self.output = output\n\n def check(self, item):\n return item['database'] == 'filiados'\n\n def done(self, item):\n return os.path.exists(self._output(item))\n\n def handle(self, item):\n df = pd.read_csv(item['path'], sep=';', dtype=str)\n df.fillna('#NE#')\n\n if not os.path.exists(self._output(item)):\n self._save(df, item)\n\n def _save(self, df, item):\n output_path = self._output(item)\n\n directory = os.path.dirname(output_path)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n df = df[self.columns]\n df.to_csv(output_path, header=True, compression='gzip', sep=';', encoding='utf-8', index=False,\n quoting=QUOTE_ALL)\n\n def _output(self, item):\n return os.path.join(self.output, str(item['party']), item['uf'], item['name'])\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 18.5, "blob_id": "e68c42f7570a2145db4ee27a5b4224bb65111d07", "content_id": "27b3570130e04c081dc51a5c4d45dfdc368327e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 77, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/.env.example", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "APP_DEBUG = True\nAPP_SECRET_KEY =\nAWS_ACCESS_KEY_ID =\nAWS_SECRET_ACCESS_KEY =" }, { "alpha_fraction": 0.6737451553344727, "alphanum_fraction": 0.6882239580154419, "avg_line_length": 26.263158798217773, "blob_id": "951172530645aceb5f2dae9bb5d64c5aa1307ab5", "content_id": "d0a8a8ff07f70d86bea51cf8f2ccb676e679dc1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 283, "num_lines": 38, "path": "/bookdown/05-como_visualizar_dados_no_R.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# Como visualizar dados no R\n\n```{r, include=FALSE}\nknitr::opts_chunk$set(echo = TRUE,\n eval = FALSE)\n```\n\n## Gráficos\n\n### Gráfico de Barras\n\nPrimeiro, instale os pacotes necessários:\n\n```{r}\ninstall.packages(c(\"ggplot2\", \"sf\", \"ggthemes\"))\n```\n\nEm seguida, precisamos fazer a requisição da nossa tabela.\n\n```{r}\nlibrary(cepespR)\nlibrary(dplyr)\nlibrary(ggplot2)\n\ncand_2014 <- get_candidates(2014, 6)\n```\n\nEm seguinda, iremos filtrar o nosso banco para utilizar a função `ggplot`. Dentro da função `ggplot`, repare no parâmetro `mapping` que recebe a função `aes`. Dentro de `aes`, o parâmetro `x` recebe o nome da variável que queremos utilizar para montar no nosso __gráfico de barras__.\n\n```{r}\ncand_2014 %>% \n filter(DES_SITUACAO_CANDIDATURA %in% c(\"DEFERIDO\", \"DEFERIDO COM RECURSO\")) %>% \n ggplot(mapping = aes(x = DESCRICAO_SEXO)) +\n geom_bar() +\n theme_minimal()\n```\n\nVocê pode substituir `DESCRICAO_SEXO` por outras variáeis: `DESCRICAO_COR_RACA`, `DESCRICAO_GRAU_INSTRUCAO`, `DESCRICAO_ESTADO_CIVIL`.\n" }, { "alpha_fraction": 0.5260455012321472, "alphanum_fraction": 0.5524578094482422, "avg_line_length": 27.39583396911621, "blob_id": "73f89bceb366d1221a01e1a8514dc7a302be1039", "content_id": "9d1c8431b345387edaa833f3fb34a772d9a3dc77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1363, "license_type": "no_license", "max_line_length": 103, "num_lines": 48, "path": "/etl/process/PartitioningDimensionsProcess.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import os\nfrom _csv import QUOTE_ALL\n\nimport pandas as pd\n\n\nclass PartitioningDimensionsProcess:\n\n def __init__(self, jobs, output):\n self.output = output\n self.jobs = jobs\n\n def check(self, item):\n return item['table'] in [\"candidatos\", \"legendas\"]\n\n def done(self, item):\n for job in self._get_jobs(item):\n if not os.path.exists(self._output(item, job)):\n return False\n\n return True\n\n def handle(self, item):\n df = pd.read_csv(item['path'], sep=';', dtype=str)\n\n for job in self.jobs:\n job_df = df[df['CODIGO_CARGO'] == str(job)]\n if not job_df.empty:\n self._save(job_df, item, job)\n\n def _output(self, item, job):\n return os.path.join(self.output, str(item['year']), str(job), item['name'])\n\n def _save(self, df, item, job):\n output_path = self._output(item, job)\n\n directory = os.path.dirname(output_path)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n df.to_csv(output_path, header=True, compression='gzip', sep=';', encoding='utf-8', index=False,\n quoting=QUOTE_ALL)\n\n def _get_jobs(self, item):\n if item['year'] in [1998, 2002, 2006, 2010, 2014]:\n return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n else:\n return [11, 13]\n" }, { "alpha_fraction": 0.5264639854431152, "alphanum_fraction": 0.568130612373352, "avg_line_length": 39.3636360168457, "blob_id": "6c1b0a81e742206fb2cde692438e5da279aa6709", "content_id": "0c88e402b034974e28a9b57b60a5ce5bed20768b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1778, "license_type": "no_license", "max_line_length": 112, "num_lines": 44, "path": "/etl/fixes/DescricaoEleicaoFix2010.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass DescricaoEleicaoFix2010:\n\n def __init__(self):\n self.descriptions = [\n \"ELEIÇÕES 2010\",\n \"ELEICOES\"\n ]\n\n def check(self, item):\n return item['year'] == 2010 and item['database'] in ['votos', 'candidatos', 'legendas']\n\n def apply(self, df: pd.DataFrame):\n for desc in self.descriptions:\n df.at[df[\"DESCRICAO_ELEICAO\"] == desc, \"DESCRICAO_ELEICAO\"] = \"Eleicoes Gerais 2010\"\n\n return df\n\n def _count_df(self, df):\n total = 0\n for desc in self.descriptions:\n total += len(df[df['DESCRICAO_ELEICAO'] == desc])\n\n return total\n\n def test(self, client):\n for j in [1, 3, 5, 6, 7, 8]:\n cand_df = client.get_candidates(year=2010, job=j, columns=['NOME_CANDIDATO', 'DESCRICAO_ELEICAO'])\n legendas_df = client.get_coalitions(year=2010, job=j, columns=['NOME_PARTIDO', 'DESCRICAO_ELEICAO'])\n votos_df = client.get_votes(year=2010, job=j, regional_aggregation=0,\n columns=['DESCRICAO_ELEICAO', 'QTDE_VOTOS'])\n\n assert self._count_df(cand_df) == 0, \"candidatos, 2010, job %d\" % j\n assert self._count_df(legendas_df) == 0, \"legendas, 2010, job %d\" % j\n assert self._count_df(votos_df) == 0, \"votos, 2010, job %d\" % j\n\n for j in [2, 4, 9, 10]:\n cand_df = client.get_candidates(year=2010, job=j, columns=['NOME_CANDIDATO', 'DESCRICAO_ELEICAO'])\n legendas_df = client.get_coalitions(year=2010, job=j, columns=['NOME_PARTIDO', 'DESCRICAO_ELEICAO'])\n\n assert self._count_df(cand_df) == 0, \"candidatos, 2010, job %d\" % j\n assert self._count_df(legendas_df) == 0, \"legendas, 2010, job %d\" % j\n" }, { "alpha_fraction": 0.5460789799690247, "alphanum_fraction": 0.5678305625915527, "avg_line_length": 29.64912223815918, "blob_id": "48c749402f77f9b0d153f4161e26c61a6bb1dad0", "content_id": "a210dd870990c4b829f8bf88709a719b6a23c1bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1747, "license_type": "no_license", "max_line_length": 125, "num_lines": 57, "path": "/etl/fixes/FixSequenciaColigacao2010.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nfrom web.cepesp.utils.data import resolve_conflicts\n\n\nclass FixSequenciaColigacao2010:\n\n columns = [\n \"ANO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"COMPOSICAO_COLIGACAO\",\n \"DATA_GERACAO\",\n \"DESCRICAO_CARGO\",\n \"DESCRICAO_ELEICAO\",\n \"DESCRICAO_UE\",\n \"HORA_GERACAO\",\n \"NOME_COLIGACAO\",\n \"NOME_PARTIDO\",\n \"NUMERO_PARTIDO\",\n \"NUM_TURNO\",\n \"SEQUENCIA_COLIGACAO\",\n \"SIGLA_COLIGACAO\",\n \"SIGLA_PARTIDO\",\n \"SIGLA_UE\",\n \"SIGLA_UF\",\n \"TIPO_LEGENDA\"\n ]\n\n def __init__(self, cand_2010_path):\n self.cand_2010_path = cand_2010_path\n self.idx = ['ANO_ELEICAO', 'CODIGO_CARGO', 'NUMERO_PARTIDO']\n\n def check(self, item):\n return item['year'] == 2010 and item['database'] == 'legendas'\n\n def apply(self, df: pd.DataFrame):\n if len(df[df['CODIGO_CARGO'] == '1']) > 0:\n cand = pd.read_csv(self.cand_2010_path, sep=';', dtype=str).set_index(self.idx)\n\n before = len(df)\n\n df = df.set_index(self.idx)\n df = df.merge(cand, left_index=True, right_index=True).reset_index()\n df = resolve_conflicts(df)\n df['SEQUENCIA_COLIGACAO'] = df['CODIGO_LEGENDA']\n df = df[self.columns]\n\n if len(df) > before:\n raise Exception('Duplicating values')\n\n return df\n\n def test(self, client):\n df = client.get_coalitions(year=2010, job=1, columns=['SEQUENCIAL_COLIGACAO'])\n df.SEQUENCIAL_COLIGACAO = pd.to_numeric(df['SEQUENCIAL_COLIGACAO'], errors='coerce')\n\n assert len(df[df['SEQUENCIAL_COLIGACAO'] > 0][df['SEQUENCIAL_COLIGACAO'] < 10000]) == 0, \"wrong SEQUENCIAL_COLIGACAO\"\n" }, { "alpha_fraction": 0.431010365486145, "alphanum_fraction": 0.43436887860298157, "avg_line_length": 52.3283576965332, "blob_id": "bf69693c98647cce918fb6a2af104b4d6a17ea82", "content_id": "4d495fe72b3ff82fb1297c0df023e27e4c9b7fb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3573, "license_type": "no_license", "max_line_length": 134, "num_lines": 67, "path": "/web/templates/partials/columns-modal.html", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "<div id=\"columns-modal\" class=\"modal\" tabindex=\"-1\" role=\"dialog\">\n <div class=\"modal-dialog modal-lg\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <h5 class=\"modal-title\">{{ gettext('pages.query.add-columns') }}</h5>\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\">\n <span aria-hidden=\"true\">&times;</span>\n </button>\n </div>\n\n <div class=\"modal-body\">\n\n <div id=\"selected-columns-message\"></div>\n\n <div class=\"row columns-modal-list\">\n {% for column in options.all_columns %}\n <div class=\"col-md-4\">\n <div class=\"custom-control custom-checkbox mt-2\" style=\"overflow-wrap: break-word\">\n <input type=\"checkbox\" class=\"custom-control-input\" name=\"c[]\"\n value=\"{{ column }}\"\n id=\"columns-{{ loop.index0 }}\"\n {% if column in options.selected_columns %}checked{% endif %}>\n <label class=\"custom-control-label d-block\"\n for=\"columns-{{ loop.index0 }}\">{{ gettext('columns.' + column) }}</label>\n {% if gettext('descriptions.' + column) != 'descriptions.' + column %}\n <small class=\"text-muted d-block\">{{ gettext('descriptions.' + column) }}</small>\n {% endif %}\n </div>\n </div>\n {% endfor %}\n </div>\n </div>\n <div class=\"modal-footer d-flex flex-row\">\n <button type=\"button\" data-dismiss=\"modal\" class=\"btn btn-outline-default\">\n {{ gettext('pages.query.cancel') }}\n </button>\n <button type=\"button\" class=\"btn btn-outline-info\" style=\"width: 250px\" id=\"select-default\">\n {{ gettext('pages.query.select-default') }}\n </button>\n {% if options.all_columns|length < 30 %}\n <button type=\"button\" class=\"btn btn-outline-success\" id=\"select-all\">\n {{ gettext('pages.query.select-all') }}\n </button>\n {% endif %}\n\n <div class=\"input-group align-end\" style=\"width: auto\">\n <div class=\"input-group-append\">\n <span class=\"input-group-text d-none d-md-block\"> <i class=\"fa fa-play\"></i></span>\n <span class=\"input-group-text d-none d-lg-block\"> <small>{{ gettext('pages.query.preview') }}:</small> </span>\n </div>\n <div class=\"input-group-append\">\n <button class=\"btn {% if mode == \"athenas\" %} btn-primary {% else %} btn-outline-primary {% endif %}\"\n type=\"submit\" name=\"mode\" value=\"athenas\">\n AWS Athenas\n </button>\n\n <button class=\"btn {% if mode == \"lambda\" %} btn-success {% else %} btn-outline-success {% endif %}\"\n type=\"submit\" name=\"mode\" value=\"lambda\">\n AWS Lambda\n </button>\n </div>\n </div>\n\n </div>\n </div>\n </div>\n</div>\n" }, { "alpha_fraction": 0.6825242638587952, "alphanum_fraction": 0.7047572731971741, "avg_line_length": 41.3868293762207, "blob_id": "b98cc698f1af15c7f469fd7f35648f73e7d7a778", "content_id": "0ebf3d9a75eec8438c9bd77c8573268d50d713e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 10482, "license_type": "no_license", "max_line_length": 503, "num_lines": 243, "path": "/bookdown/03-exercicios_API_R.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# API R: Exercícios para praticar\n\nVocê também pode [clicar aqui](http://www.cepespdata.io/static/docs/API_CEPESPData_abraji2019.pdf) para fazer download do documento PDF com os exercícios abaixo.\n\n**Questão 1.** Para cada uma das questões numeradas abaixo, responda:\n\n- Qual função do __cepespR__ você utilizaria?\n\n- Quais são os parâmetros que você deve informar?\n\n- Seria preciso fazer outras operações no banco? Quais? \n\n **1.1.** Quantos votos na legenda o PSL teve em 2018 para Deputado Federal? E em 2014? Houve aumento?\n \n **1.2.** Quantas governadoras foram eleitas nas últimas quatro eleições?\n \n **1.3.** Quantas mulheres negras (pretas ou pardas) concorreram ao cargo de prefeita em 2016 no Brasil?\n \n **1.4.** Quantas pessoas que eram filiadas ao PCO no Estado de Alagoas se desfiliaram do partido ou tiveram sua filiação cancelada?\n \n **1.5.** Considerando os candidatos a vereador no Rio Grande do Sul em 2012, qual é o partido com o maior valor total de bens declarados? \n\n\n**Trabalhando com outras bases de dados a partir do código do IBGE**\n\nUma das vantagens de utilizar os dados eleitorais do CepespData é já ter pronta a compatibilização entre o código do município do TSE e o código das regiões administrativas do IBGE.\n\nAssim, podemos juntar os dados eleitorais do CepespData com qualquer outro que tenha o código do IBGE em poucas linhas de programação. \n\nNesse exemplo, vamos explorar como juntar os dados do CepespData com os [dados dos beneficiários do Programa Bolsa Família em 2016](http://dados.gov.br/dataset/bolsa-familia-misocial) e das [estimativas populacionais do IBGE enviadas ao TCU de 2016](https://www.ibge.gov.br/estatisticas/sociais/populacao/9103-estimativas-de-populacao.html?=&t=downloads). \n\nOs dados pré-processados em `.csv` podem ser baixados [clicando aqui](https://drive.google.com/open?id=1lZUEJrmlaOTONBGI_3aUZPnMlA_rp3jD).\n\nA pergunta que queremos responder é: \n\n**Questão 2.** Existe correlação entre a porcentagem de votos do PT num município e a porcentagem de famílias do Programa Bolsa Família em 2016?\n\n*Lembrando: que correlação não é a mesma coisa que causa! O foco aqui é o trabalho com os dados e não as conclusões. Para afirmar que o Programa Bolsa Família melhora o resultado eleitoral do PT, precisaríamos de uma análise muito mais sofisticada que a proposta nesse exercício.*\n\nPasso a passo:\n\n2.1. Baixe os dados do Programa Bolsa FamÍlia (`pbf_2016.csv`) e abra no R. Dica: veja como utilizar a função `read.csv2()`. Inspecione quais são as variáveis no banco.\n \n2.2. Baixe os dados da população do IBGE (`pop_ibge_2016.csv`) e abra no R. Inspecione quais são as variáveis no banco.\n \n2.3. Utilizando o **cepespR**, construa um banco de dados que tenha a quantidade de votos por município por partido para o cargo de prefeito no ano de 2016.\n \n2.4. Crie uma variável que indique a porcentagem de votos recebida por partido por município.\n \n2.5. Junte o banco de população com os dados do Bolsa Família utilizando o código do IBGE. *Dica: Verifique se o tipo da variável do código do IBGE nos dos bancos são iguais. Caso contrário, uniformize-as. Também veja quantos dígitos o código possui -- em alguns bancos, o código do IBGE vem com o dígito verificador, que pode ser descartado sem problemas. Se for preciso descartá-lo, procure como usar a função* `substr()`. *Guarde em uma nova variável este código de município com um dígito a menos.* \n \n2.6. Verifique se o *join* foi feito corretamente explorando o banco com o comando `summary()`. Não podemos ter NA's!\n \n2.7. Nesse novo banco, crie uma variável que indique o número de famílias benefiárias a cada 1000 habitantes.\n \n2.8. Agora, junte o banco com a variável do item 7 ao banco com as porcentagens de voto. *Dica: lembre-se de usar a variável do código do IBGE com o mesmo número de digitos.*\n \n2.9. Faça um gráfico de dispersão simples usando a função `plot()` entre as variáveis de famílias beneficiárias e a porcentagem de votos para candidatos do PT. Parece haver correlação?\n \n## Respostas aos exercícios\n\nVocê também pode [clicar aqui](http://www.cepespdata.io/static/docs/API_CEPESPData_abraji2019_respostas.pdf) para fazer download do documento PDF com as respostas abaixo.\n \n1.1. Quantos votos na legenda o PSL teve em 2018 para Deputado Federal? E em 2014? Houve aumento?\n\n```{r,eval = FALSE}\nlista.colunas <- list(\"ANO_ELEICAO\",\"NUMERO_CANDIDATO\",\"QTDE_VOTOS\")\n\nvotosPSL <- get_elections(year=\"2014,2018\",\n position=\"Deputado Federal\",\n regional_aggregation=\"Brasil\",\n candidate_number = \"17\",\n columns_list = lista.colunas)\nprint(votosPSL)\n```\n\n1.2. Quantas governadoras foram eleitas nas últimas quatro eleições?\n\n```{r,eval = FALSE}\nlista.colunas <- list(\"ANO_ELEICAO\",\"SIGLA_UE\",\"NUMERO_CANDIDATO\",\"QTDE_VOTOS\",\"DESCRICAO_SEXO\")\n\ngovernadoras <- get_elections(year=\"2006,2010,2014,2018\",\n position=\"Governador\",\n regional_aggregation=\"Brasil\",\n only_elected = T,\n columns_list = lista.colunas)\n\ngovernadoras %>%\n group_by(DESCRICAO_SEXO) %>%\n summarise(n = n())\n\n```\n\n1.3. Quantas mulheres negras (pretas ou pardas) concorreram ao cargo de prefeita em 2016 no Brasil?\n\n```{r,eval = FALSE}\nlista.colunas <- list(\"ANO_ELEICAO\",\"DESCRICAO_COR_RACA\",\"QTDE_VOTOS\",\"DESCRICAO_SEXO\")\n\nprefeitasnegras <- get_elections(year=\"2016\",\n position=\"Prefeito\",\n columns_list = lista.colunas)\n\nprefeitasnegras %>%\n filter(DESCRICAO_COR_RACA %in% c(\"PRETA\",\"PARDA\")) %>%\n group_by(DESCRICAO_SEXO,DESCRICAO_COR_RACA) %>%\n summarise(n = n())\n\n```\n\n\n1.4. Quantas pessoas que eram filiadas ao PCO no Estado de Alagoas se desfiliaram do partido ou tiveram sua filiação cancelada?\n\n```{r,eval = FALSE}\n\nfiliadosAL <- get_filiates(state='AL',party ='PCO')\n\nfiliadosAL %>%\n group_by(SITUACAO_REGISTRO) %>%\n summarise(n = n())\n \n\n```\n\n\n1.5. Considerando os candidatos a vereador no Rio Grande do Sul em 2012, qual é o partido com o maior valor total de bens declarados? \n\n```{r,eval = FALSE}\nlista.colunas <- list(\"NUMERO_PARTIDO\",\"SIGLA_PARTIDO\",\"VALOR_BEM\")\n\nbens_vereadores <- get_assets(year=2012,\n state = \"RS\",\n columns_list = lista.colunas)\n\nbens_vereadores %>%\n mutate(valor = gsub(\"[.]\",\"\",VALOR_BEM), #remove o ponto do numeral\n valor = as.numeric(sub(\",\", \".\", valor))) %>% #transforma a v?rgula em ponto \n group_by(SIGLA_PARTIDO) %>%\n summarise(total = sum(valor)) %>%\n top_n(1,total)\n \n```\n\n**2. Trabalhando com outras bases de dados a partir do código do IBGE**\n\n2.1. Baixe os dados do Programa Bolsa Família (`pbf_2016.csv`) e abra no R. Dica: veja como utilizar a função `read.csv2()`. Inspecione quais são as variáveis no banco.\n \n```{r,eval=F}\npbf <- read.csv2(\"pbf_2016.csv\",stringsAsFactors = F)\nglimpse(pbf)\n```\n\n2.2. Baixe os dados da população do IBGE (`pop_ibge_2016.csv`) e abra no R. Inspecione quais são as variáveis no banco.\n \n```{r,eval=F}\npop <- read.csv2(\"pop_ibge_2016.csv\",stringsAsFactors = F)\nglimpse(pop)\n```\n\n2.3. Utilizando o **cepespR**, construa um banco de dados que tenha a quantidade de votos por município por partido para o cargo de prefeito no ano de 2016.\n \n```{r,eval=F}\nlista.colunas <- list(\"NUMERO_CANDIDATO\",\"QTDE_VOTOS\",\"COD_MUN_IBGE\")\n\nvotos <- get_votes(year=2016,\n position = \"Prefeito\",\n columns_list = lista.colunas)\n```\n\n2.4. Crie uma variável que indique a porcentagem de votos recebida por partido por município.\n\n```{r,eval=F}\n# Fazendo os totais de votos por município:\nvotos_totais <- votos %>%\n group_by(COD_MUN_IBGE) %>%\n summarise(votosmun = sum(QTDE_VOTOS))\n\n# Juntando os totais e criando a variável com a porcentagem:\nvotos <- votos %>%\n left_join(votos_totais,by=\"COD_MUN_IBGE\") %>%\n mutate(pct_votos = 100*(QTDE_VOTOS/votosmun))\n\nsummary(votos$pct_votos)\n```\n \n2.5. Junte o banco de população com os dados do Bolsa Família utilizando o código do IBGE. *Dica: Verifique se o tipo da variável do código do IBGE nos dos bancos são iguais. Caso contrário, uniformize-as. Também veja quantos dígitos o código possui -- em alguns bancos, o código do IBGE vem com o dígito verificador, que pode ser descartado sem problemas. Se for preciso descartá-lo, procure como usar a função* `substr()` *e guarde-a numa nova variável.* \n\n```{r,eval=F}\n# Vendo como é a variável do código do IBGE de cada um dos bancos:\nsummary(pbf$cod_ibge)\nsummary(pop$cod_mun)\n# Ambas são numéricas, mas há diferença no número de dígitos.\n# O banco com os dados da população possui um dígito a mais. \n\n# Criando uma nova variável do código com 6 dígitos, para ficar igual ao do PBF:\npop <- pop %>%\n mutate(cod_mun2 = as.numeric(substr(cod_mun, start = 1, stop = 6)))\n\n# Fazendo o join:\nbd_join <- pop %>%\n left_join(pbf, by = c(\"cod_mun2\" = \"cod_ibge\"))\n\n```\n \n2.6. Verifique se o *join* foi feito corretamente explorando o banco com o comando `summary()`. Não podemos ter NA's!\n \n```{r,eval=F}\n# Testando se não perdemos ou ganhamos linhas no join:\nnrow(bd_join) == nrow(pbf)\nnrow(bd_join) == nrow(pop)\n\n# Checando se todas as variáveis estão completas:\nsummary(bd_join)\n```\n \n2.7. Nesse novo banco, crie uma variável que indique o número de famílias benefiárias a cada 1000 habitantes.\n \n```{r,eval=F}\nbd_join <- bd_join %>%\n mutate(familias_1000hab = (qtd_familias_beneficiarias_bolsa_familia/pop_estimada)*1000)\n\nsummary(bd_join$familias_1000hab)\n```\n \n2.8. Agora, junte o banco com a variável do item 7 ao banco com as porcentagens de voto. *Dica: lembre-se de usar a variável do código do IBGE com o mesmo número de digitos.*\n \n```{r,eval=F}\n\nvotos_completo <- votos %>%\n mutate(cod_mun = COD_MUN_IBGE) %>% \n left_join(bd_join, by=\"cod_mun\")\n\nnrow(votos_completo) == nrow(votos)\n```\n \n2.9. Faça um gráfico de dispersão simples usando a função `plot()` entre as variáveis de famílias beneficiárias e a porcentagem de votos para candidatos do PT. Parece haver correlação?\n \n```{r,eval=F}\nvotosPT <- votos_completo %>%\n filter(NUMERO_CANDIDATO == 13)\n\nplot(votosPT$familias_1000hab,votosPT$pct_votos)\n\n```\n" }, { "alpha_fraction": 0.7280471920967102, "alphanum_fraction": 0.7313237190246582, "avg_line_length": 43.882354736328125, "blob_id": "8319a379f0b73960e8f7b0ddf5164dbee73cf3cb", "content_id": "13d751a7318c76c8e947601f8e406511f8b3b9a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1526, "license_type": "no_license", "max_line_length": 103, "num_lines": 34, "path": "/web/cepesp/athena/builders/factory.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.utils.request import trim\nfrom web.cepesp.athena.builders.candidates_assets import CandidateAssetsQueryBuilder\nfrom web.cepesp.athena.builders.elections import ElectionsQueryBuilder, SummaryElectionsQueryBuilder, \\\n ElectionsCoalitionsQueryBuilder\nfrom web.cepesp.athena.builders.others import VotesQueryBuilder, CandidatesCoalitionsQueryBuilder\nfrom web.cepesp.athena.builders.party_affiliations import PartyAffiliationsQueryBuilder\nfrom web.cepesp.athena.builders.secretaries import SecretariesQueryBuilder\nfrom web.cepesp.athena.builders.utils import arg, opt\n\n\ndef build_query(**options):\n table = arg(options, 'table')\n pol = opt(options, 'pol', 0)\n\n if table == 'tse' and pol != 4 and pol != 3:\n builder = ElectionsQueryBuilder(**options)\n elif table == 'tse' and pol == 3:\n builder = ElectionsCoalitionsQueryBuilder(**options)\n elif table == 'tse' and pol == 4:\n builder = SummaryElectionsQueryBuilder(**options)\n elif table == 'votos':\n builder = VotesQueryBuilder(**options)\n elif table == 'bem_candidato':\n builder = CandidateAssetsQueryBuilder(**options)\n elif table == 'filiados':\n builder = PartyAffiliationsQueryBuilder(**options)\n elif table == 'secretarios':\n builder = SecretariesQueryBuilder(**options)\n elif table in ['candidatos', 'legendas']:\n builder = CandidatesCoalitionsQueryBuilder(**options)\n else:\n raise KeyError(f'Invalid table {table} supplied')\n\n return trim(builder.build())\n" }, { "alpha_fraction": 0.49204832315444946, "alphanum_fraction": 0.498191237449646, "avg_line_length": 27.39341163635254, "blob_id": "bb484415a0f3845aa1cf2419c1c3bcb2cf29ef65", "content_id": "b124ba57b2cb04ac4a68374b1a338f1c05971149", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14651, "license_type": "no_license", "max_line_length": 100, "num_lines": 516, "path": "/web/cepesp/athena/builders/elections.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.utils.request import trim\nfrom web.cepesp.athena.builders.base import AthenaBuilder\nfrom web.cepesp.columns.tse import ElectionsColumnsSelector\n\nvotos_cols = {\n\n # BR\n 0: [\n \"ANO_ELEICAO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\",\n ],\n\n # MACRO\n 1: [\n \"ANO_ELEICAO\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ],\n\n # UF\n 2: [\n \"ANO_ELEICAO\",\n \"UF\",\n \"NOME_UF\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ],\n\n # MESO\n 4: [\n \"ANO_ELEICAO\",\n \"CODIGO_MESO\",\n \"NOME_MESO\",\n \"UF\",\n \"NOME_UF\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ],\n\n # MICRO\n 5: [\n \"ANO_ELEICAO\",\n \"CODIGO_MICRO\",\n \"NOME_MICRO\",\n \"CODIGO_MESO\",\n \"NOME_MESO\",\n \"UF\",\n \"NOME_UF\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ],\n\n # MUNICIPIO\n 6: [\n \"ANO_ELEICAO\",\n \"COD_MUN_TSE\",\n \"COD_MUN_IBGE\",\n \"NOME_MUNICIPIO\",\n \"CODIGO_MICRO\",\n \"NOME_MICRO\",\n \"CODIGO_MESO\",\n \"NOME_MESO\",\n \"UF\",\n \"NOME_UF\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ],\n\n # MUNZONA\n 7: [\n \"ANO_ELEICAO\",\n \"NUM_ZONA\",\n \"COD_MUN_TSE\",\n \"COD_MUN_IBGE\",\n \"NOME_MUNICIPIO\",\n \"CODIGO_MICRO\",\n \"NOME_MICRO\",\n \"CODIGO_MESO\",\n \"NOME_MESO\",\n \"UF\",\n \"NOME_UF\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ],\n\n # ZONA\n 8: [\n \"ANO_ELEICAO\",\n \"NUM_ZONA\",\n \"CODIGO_MICRO\",\n \"NOME_MICRO\",\n \"CODIGO_MESO\",\n \"NOME_MESO\",\n \"UF\",\n \"NOME_UF\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ],\n\n # VOTACAO SECAO\n 9: [\n \"ANO_ELEICAO\",\n \"NUM_SECAO\",\n \"NUM_ZONA\",\n \"CODIGO_MICRO\",\n \"NOME_MICRO\",\n \"CODIGO_MESO\",\n \"NOME_MESO\",\n \"UF\",\n \"NOME_UF\",\n \"CODIGO_MACRO\",\n \"NOME_MACRO\",\n \"COD_MUN_TSE\",\n \"COD_MUN_IBGE\",\n \"NOME_MUNICIPIO\",\n \"NUM_TURNO\",\n \"DESCRICAO_ELEICAO\",\n \"CODIGO_CARGO\",\n \"DESCRICAO_CARGO\",\n \"NUMERO_CANDIDATO\",\n \"QTDE_VOTOS\"\n ]\n\n}\n\n\nclass ElectionsQueryBuilder(AthenaBuilder):\n # region Columns\n\n candidatos = [\n 'SIGLA_UE',\n 'DESCRICAO_UE',\n 'NOME_CANDIDATO',\n 'SEQUENCIAL_CANDIDATO',\n 'CPF_CANDIDATO',\n 'NOME_URNA_CANDIDATO',\n 'COD_SITUACAO_CANDIDATURA',\n 'DES_SITUACAO_CANDIDATURA',\n 'CODIGO_LEGENDA',\n 'SIGLA_LEGENDA',\n 'COMPOSICAO_LEGENDA',\n 'CODIGO_OCUPACAO',\n 'DESCRICAO_OCUPACAO',\n 'DATA_NASCIMENTO',\n 'NUM_TITULO_ELEITORAL_CANDIDATO',\n 'IDADE_DATA_ELEICAO',\n 'CODIGO_SEXO',\n 'DESCRICAO_SEXO',\n 'COD_GRAU_INSTRUCAO',\n 'DESCRICAO_GRAU_INSTRUCAO',\n 'CODIGO_ESTADO_CIVIL',\n 'DESCRICAO_ESTADO_CIVIL',\n 'CODIGO_COR_RACA',\n 'DESCRICAO_COR_RACA',\n 'CODIGO_NACIONALIDADE',\n 'DESCRICAO_NACIONALIDADE',\n 'SIGLA_UF_NASCIMENTO',\n 'CODIGO_MUNICIPIO_NASCIMENTO',\n 'NOME_MUNICIPIO_NASCIMENTO',\n 'DESPESA_MAX_CAMPANHA',\n 'COD_SIT_TOT_TURNO',\n 'DESC_SIT_TOT_TURNO',\n 'EMAIL_CANDIDATO',\n ]\n\n legendas = [\n \"TIPO_LEGENDA\",\n \"SIGLA_PARTIDO\",\n \"NOME_PARTIDO\",\n 'NUMERO_PARTIDO',\n \"SIGLA_COLIGACAO\",\n \"NOME_COLIGACAO\",\n \"COMPOSICAO_COLIGACAO\",\n \"SEQUENCIA_COLIGACAO\"\n ]\n\n # endregion\n\n def __init__(self, **options):\n super().__init__(**options)\n self.selector = ElectionsColumnsSelector(self.arg('pol'), self.arg('reg'), self.arg('job'))\n\n def build(self):\n pol = self.arg('pol')\n inner_query = trim(self._build_inner_query())\n\n columns = \", \".join(self._get_outer_query_columns())\n candidates_join = 'LEFT JOIN candidatos as c ' \\\n 'ON v.ID_CANDIDATO = c.ID_CANDIDATO ' \\\n 'AND v.ANO_ELEICAO = c.ANO_ELEICAO' if pol == 2 else ''\n\n return f'''\n SELECT {columns}\n FROM ({inner_query}) as v\n {candidates_join}\n LEFT JOIN legendas as l ON v.ID_LEGENDA = l.ID_LEGENDA AND v.ANO_ELEICAO = l.ANO_ELEICAO\n {self._build_where()}\n {self._build_order_by()}\n '''\n\n def _build_inner_query(self):\n reg = self.arg('reg')\n pol = self.arg('pol')\n years = \"', '\".join(map(str, self.arg('years')))\n\n columns = [c for c in votos_cols[reg] if c != 'QTDE_VOTOS']\n if pol != 2:\n columns = [c for c in columns if c != 'NUMERO_CANDIDATO']\n\n group_columns = \", \".join(map(str, range(1, len(columns) + 2)))\n columns = \", \".join([f\"{c} AS {c}\" for c in columns])\n extra_columns = [\n \"SUBSTRING(NUMERO_CANDIDATO, 1, 2) AS NUMERO_PARTIDO\",\n \"MAX(ID_LEGENDA) AS ID_LEGENDA\",\n \"SUM(QTDE_VOTOS) AS QTDE_VOTOS\"\n ]\n if pol == 2:\n extra_columns = [\"ID_CANDIDATO\"] + extra_columns\n\n extra_columns = \", \".join(extra_columns)\n\n return f'''\n SELECT {columns}, {extra_columns}\n FROM {self.table_name('votos')}\n WHERE {self._build_filter_job()}\n AND p_ano IN (\\'{years}\\')\n {self._build_filter_uf()}\n GROUP BY {group_columns}\n '''\n\n def _build_filter_uf(self):\n uf = self.opt('uf_filter')\n if self.arg('reg') >= 2 and uf:\n years = self.arg('years')\n if 2018 in years or 2014 in years or 2002 in years:\n return f\"AND UF = '{uf}'\"\n else:\n return f\"AND p_uf = '{uf}'\"\n else:\n return \"\"\n\n def _get_outer_query_columns(self):\n pol = self.arg('pol')\n\n def nome_candidato_replace(c):\n if c == \"c.NOME_CANDIDATO AS NOME_CANDIDATO\":\n nulo = \"IF(v.NUMERO_PARTIDO = '96', 'VOTO NULO', c.NOME_CANDIDATO)\"\n return f\"IF(v.NUMERO_PARTIDO = '95', 'VOTO BRANCO', {nulo}) AS NOME_CANDIDATO\"\n if c == \"l.NOME_PARTIDO AS NOME_PARTIDO\":\n nulo = \"IF(v.NUMERO_PARTIDO = '96', 'VOTO NULO', l.NOME_PARTIDO)\"\n return f\"IF(v.NUMERO_PARTIDO = '95', 'VOTO BRANCO', {nulo}) AS NOME_PARTIDO\"\n if c == \"l.SIGLA_PARTIDO AS SIGLA_PARTIDO\":\n nulo = \"IF(v.NUMERO_PARTIDO = '96', 'VOTO NULO', l.SIGLA_PARTIDO)\"\n return f\"IF(v.NUMERO_PARTIDO = '95', 'VOTO BRANCO', {nulo}) AS SIGLA_PARTIDO\"\n if c == \"l.NUMERO_PARTIDO AS NUMERO_PARTIDO\" and pol != 2:\n return \"v.NUMERO_PARTIDO AS NUMERO_PARTIDO\"\n else:\n return c\n\n columns = map(lambda c: f\"{self._map_column(c)} AS {c}\", self.selected_columns())\n columns = map(nome_candidato_replace, columns)\n\n return columns\n\n def _map_column(self, column):\n reg = self.arg('reg')\n pol = self.arg('pol')\n if column in (votos_cols[reg] + ['ID_CANDIDATO', 'ID_LEGENDA']):\n return f\"v.{column}\"\n elif column in self.candidatos and pol == 2:\n return f\"c.{column}\"\n elif column in self.legendas:\n return f\"l.{column}\"\n else:\n return column\n\n # region def _build_where(self): [...]\n def _build_where(self):\n where = self._build_base_filters()\n pol = self.arg('pol')\n\n where.append(\"v.NUMERO_PARTIDO <> '97'\")\n\n if not self.opt('brancos', True):\n where.append(\"v.NUMERO_PARTIDO <> '95'\")\n\n if not self.opt('nulos', True):\n where.append(\"v.NUMERO_PARTIDO <> '96'\")\n\n if self.opt('turno'):\n where.append(f\"v.NUM_TURNO = \\'{self.options['turno']}\\'\")\n\n if self.opt('mun_filter'):\n where.append(f\"v.COD_MUN_TSE = \\'{self.options['mun_filter']}\\'\")\n\n if self.opt('only_elected', False) and pol == 2:\n where.append(f\"c.COD_SIT_TOT_TURNO IN ('1', '2', '3')\")\n\n if len(where) > 0:\n return \"WHERE \" + \"\\n AND \".join(where)\n else:\n return \"\"\n # endregion\n\n\nclass ElectionsCoalitionsQueryBuilder(AthenaBuilder):\n # region Columns\n\n coligacao = [\n \"SIGLA_UE\",\n \"DESCRICAO_UE\",\n \"SIGLA_COLIGACAO\",\n \"NOME_COLIGACAO\",\n \"COMPOSICAO_COLIGACAO\",\n ]\n\n # endregion\n\n def __init__(self, **options):\n super().__init__(**options)\n self.selector = ElectionsColumnsSelector(3, self.arg('reg'), self.arg('job'))\n\n def build(self):\n vot_table = self.table_name('votos')\n columns = self._build_select()\n group_columns = \", \".join(map(str, range(1, len(columns))))\n columns = \", \".join(columns)\n\n return f'''\n SELECT {columns}\n FROM {vot_table} as v\n LEFT JOIN legendas as l ON v.ID_LEGENDA = l.ID_LEGENDA AND v.ANO_ELEICAO = l.ANO_ELEICAO\n {self._build_where()}\n GROUP BY {group_columns}\n {self._build_order_by()}\n '''\n\n def _build_select(self):\n columns = self.selected_columns()\n\n for i in range(len(columns)):\n if columns[i] == \"QTDE_VOTOS\":\n columns[i] = \"SUM(v.QTDE_VOTOS) AS QTDE_VOTOS\"\n else:\n columns[i] = f\"{self._map_column(columns[i])} AS {columns[i]}\"\n\n return columns\n\n def _map_column(self, column):\n reg = self.arg('reg')\n if column in (votos_cols[reg] + ['ID_LEGENDA']):\n return f\"v.{column}\"\n elif column in self.coligacao:\n return f\"l.{column}\"\n else:\n return column\n\n # region def _build_where(self): [...]\n def _build_where(self):\n where = self._build_base_filters()\n\n years = \"', '\".join(map(str, self.arg('years')))\n where.append(f\"v.p_ano IN (\\'{years}\\')\")\n\n job = self.arg('job')\n if job == 7:\n where.append(\"(v.p_cargo = '7' OR v.p_cargo = '8')\")\n else:\n where.append(f\"(v.p_cargo = '{job}')\")\n\n reg = self.arg('reg')\n uf = self.opt('uf_filter')\n if reg >= 2 and uf:\n years = self.arg('years')\n if 2018 in years or 2014 in years or 2002 in years:\n return where.append(f\"UF = '{uf}'\")\n else:\n return where.append(f\"p_uf = '{uf}'\")\n\n where.append(\"v.NUMERO_CANDIDATO <> '97'\")\n\n if not self.opt('brancos', True):\n where.append(\"v.NUMERO_CANDIDATO <> '95'\")\n\n if not self.opt('nulos', True):\n where.append(\"v.NUMERO_CANDIDATO <> '96'\")\n\n if self.opt('turno'):\n where.append(f\"v.NUM_TURNO = \\'{self.options['turno']}\\'\")\n\n if self.opt('mun_filter'):\n where.append(f\"v.COD_MUN_TSE = \\'{self.options['mun_filter']}\\'\")\n\n return \"WHERE \" + \"\\n AND \".join(where)\n # endregion\n\n\nclass SummaryElectionsQueryBuilder(AthenaBuilder):\n mun_columns = [\"SIGLA_UF\", \"COD_MUN_IBGE\", \"NOME_UF\", \"CODIGO_MESO\", \"NOME_MESO\",\n \"CODIGO_MICRO\", \"NOME_MICRO\", \"NOME_MUNICIPIO\", \"CODIGO_MACRO\", \"NOME_MACRO\"]\n\n def __init__(self, **options):\n super().__init__(**options)\n self.selector = ElectionsColumnsSelector(4, self.arg('reg'), self.arg('job'))\n\n def build(self):\n table = self.table_name('detalhe')\n years = \"', '\".join(map(str, self.arg('years')))\n selected_columns = self.selected_columns()\n sum_columns = [c for c in self.selector.sum_columns() if c in selected_columns]\n columns = [c for c in self.selected_columns() if c not in sum_columns]\n\n sum_columns = \", \".join([f\"SUM({c}) AS {c}\" for c in sum_columns])\n group_columns = \", \".join(map(str, range(1, len(columns) + 1)))\n columns = \", \".join([f\"{c} AS {c}\" for c in columns])\n\n if len(sum_columns) > 0:\n sum_columns = \", \" + sum_columns\n\n return f'''\n SELECT {columns}{sum_columns} \n FROM {table}\n WHERE p_ano IN (\\'{years}\\') \n AND {self._build_filter_job()}\n {self._build_filter_uf()}\n GROUP BY {group_columns}\n {self._build_filters('HAVING')}\n {self._build_order_by()}\n '''\n\n def _detalhe_table(self):\n reg = self.arg('reg')\n if reg == 9:\n return \"detalhe_votsec\"\n elif reg == 8:\n return \"detalhe_zona\"\n elif reg == 7:\n return \"detalhe_munzona\"\n elif reg == 6:\n return \"detalhe_mun\"\n elif reg == 5:\n return \"votos_micro\"\n elif reg == 4:\n return \"votos_meso\"\n else:\n return \"votos_uf\"\n\n def _build_filter_uf(self):\n uf = self.opt('uf_filter')\n if self.arg('reg') >= 2 and uf:\n return f\"AND UF = '{uf}'\"\n else:\n return \"\"\n\n # region def _build_filters(self, start): [...]\n def _build_filters(self, start):\n where = self._build_base_filters()\n\n if self.opt('mun_filter'):\n where.append(f\"COD_MUN_TSE = '{self.options['mun_filter']}'\")\n\n if self.opt('turno'):\n where.append(f\"NUM_TURNO = '{self.options['turno']}'\")\n\n if len(where) > 0:\n return f\"{start} \" + \"\\n AND \".join(where)\n else:\n return \"\"\n # endregion\n" }, { "alpha_fraction": 0.6574585437774658, "alphanum_fraction": 0.6574585437774658, "avg_line_length": 15.454545021057129, "blob_id": "f32402ebc85e989dca6eb0591dd592ef14660edf", "content_id": "8b109a3b5c04f664103f90a00c7cb590a3592ded", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 47, "num_lines": 11, "path": "/etl/tests.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from etl.config import FIXES\nfrom etl.process.TestProcess import TestProcess\n\n\ndef run():\n tester = TestProcess(FIXES)\n tester.handle()\n\n\nif __name__ == \"__main__\":\n run()\n" }, { "alpha_fraction": 0.6599265933036804, "alphanum_fraction": 0.6807848811149597, "avg_line_length": 70.03115844726562, "blob_id": "c98777baae33a2e9fe91a2ce77cae215f3f24bb5", "content_id": "d8dbbf7edeb880113b8b59c3acb88a6f172ed08e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 25175, "license_type": "no_license", "max_line_length": 519, "num_lines": 353, "path": "/docs/como-usar-a-api-rest.html", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html >\n\n<head>\n\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <title>8 Como usar a API REST | Como usar o CepespData/FGV</title>\n <meta name=\"description\" content=\"8 Como usar a API REST | Como usar o CepespData/FGV\">\n <meta name=\"generator\" content=\"bookdown and GitBook 2.6.7\">\n\n <meta property=\"og:title\" content=\"8 Como usar a API REST | Como usar o CepespData/FGV\" />\n <meta property=\"og:type\" content=\"book\" />\n \n \n \n \n\n <meta name=\"twitter:card\" content=\"summary\" />\n <meta name=\"twitter:title\" content=\"8 Como usar a API REST | Como usar o CepespData/FGV\" />\n \n \n \n\n<meta name=\"author\" content=\"Equipe CepespData/FGV\">\n\n\n\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n <meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black\">\n \n \n<link rel=\"prev\" href=\"como-visualizar-dados-no-r.html\">\n<link rel=\"next\" href=\"documentos-auxiliares.html\">\n<script src=\"libs/jquery-2.2.3/jquery.min.js\"></script>\n<link href=\"libs/gitbook-2.6.7/css/style.css\" rel=\"stylesheet\" />\n<link href=\"libs/gitbook-2.6.7/css/plugin-table.css\" rel=\"stylesheet\" />\n<link href=\"libs/gitbook-2.6.7/css/plugin-bookdown.css\" rel=\"stylesheet\" />\n<link href=\"libs/gitbook-2.6.7/css/plugin-highlight.css\" rel=\"stylesheet\" />\n<link href=\"libs/gitbook-2.6.7/css/plugin-search.css\" rel=\"stylesheet\" />\n<link href=\"libs/gitbook-2.6.7/css/plugin-fontsettings.css\" rel=\"stylesheet\" />\n\n\n\n\n\n\n\n\n\n<style type=\"text/css\">\ndiv.sourceCode { overflow-x: auto; }\ntable.sourceCode, tr.sourceCode, td.lineNumbers, td.sourceCode {\n margin: 0; padding: 0; vertical-align: baseline; border: none; }\ntable.sourceCode { width: 100%; line-height: 100%; }\ntd.lineNumbers { text-align: right; padding-right: 4px; padding-left: 4px; color: #aaaaaa; border-right: 1px solid #aaaaaa; }\ntd.sourceCode { padding-left: 5px; }\ncode > span.kw { color: #007020; font-weight: bold; } /* Keyword */\ncode > span.dt { color: #902000; } /* DataType */\ncode > span.dv { color: #40a070; } /* DecVal */\ncode > span.bn { color: #40a070; } /* BaseN */\ncode > span.fl { color: #40a070; } /* Float */\ncode > span.ch { color: #4070a0; } /* Char */\ncode > span.st { color: #4070a0; } /* String */\ncode > span.co { color: #60a0b0; font-style: italic; } /* Comment */\ncode > span.ot { color: #007020; } /* Other */\ncode > span.al { color: #ff0000; font-weight: bold; } /* Alert */\ncode > span.fu { color: #06287e; } /* Function */\ncode > span.er { color: #ff0000; font-weight: bold; } /* Error */\ncode > span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */\ncode > span.cn { color: #880000; } /* Constant */\ncode > span.sc { color: #4070a0; } /* SpecialChar */\ncode > span.vs { color: #4070a0; } /* VerbatimString */\ncode > span.ss { color: #bb6688; } /* SpecialString */\ncode > span.im { } /* Import */\ncode > span.va { color: #19177c; } /* Variable */\ncode > span.cf { color: #007020; font-weight: bold; } /* ControlFlow */\ncode > span.op { color: #666666; } /* Operator */\ncode > span.bu { } /* BuiltIn */\ncode > span.ex { } /* Extension */\ncode > span.pp { color: #bc7a00; } /* Preprocessor */\ncode > span.at { color: #7d9029; } /* Attribute */\ncode > span.do { color: #ba2121; font-style: italic; } /* Documentation */\ncode > span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */\ncode > span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */\ncode > span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */\n</style>\n\n</head>\n\n<body>\n\n\n\n <div class=\"book without-animation with-summary font-size-2 font-family-1\" data-basepath=\".\">\n\n <div class=\"book-summary\">\n <nav role=\"navigation\">\n\n<ul class=\"summary\">\n<li class=\"chapter\" data-level=\"1\" data-path=\"index.html\"><a href=\"index.html\"><i class=\"fa fa-check\"></i><b>1</b> O CepespData</a></li>\n<li class=\"chapter\" data-level=\"2\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html\"><i class=\"fa fa-check\"></i><b>2</b> Como usar o site</a><ul>\n<li class=\"chapter\" data-level=\"2.1\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#bancos-de-dados-disponiveis-no-site-do-cepespdata\"><i class=\"fa fa-check\"></i><b>2.1</b> Bancos de dados disponíveis no site do Cepespdata</a><ul>\n<li class=\"chapter\" data-level=\"2.1.1\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#resultados-de-eleicoes-por-cargo\"><i class=\"fa fa-check\"></i><b>2.1.1</b> Resultados de eleições por cargo</a></li>\n<li class=\"chapter\" data-level=\"2.1.2\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#perfil-dos-candidatos\"><i class=\"fa fa-check\"></i><b>2.1.2</b> Perfil dos candidatos</a></li>\n<li class=\"chapter\" data-level=\"2.1.3\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#coligacoes\"><i class=\"fa fa-check\"></i><b>2.1.3</b> Coligações</a></li>\n<li class=\"chapter\" data-level=\"2.1.4\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#votos\"><i class=\"fa fa-check\"></i><b>2.1.4</b> Votos</a></li>\n<li class=\"chapter\" data-level=\"2.1.5\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#resumo-das-eleicoes\"><i class=\"fa fa-check\"></i><b>2.1.5</b> Resumo das eleições</a></li>\n<li class=\"chapter\" data-level=\"2.1.6\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#bens-de-candidatos\"><i class=\"fa fa-check\"></i><b>2.1.6</b> Bens de Candidatos</a></li>\n<li class=\"chapter\" data-level=\"2.1.7\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#filiados\"><i class=\"fa fa-check\"></i><b>2.1.7</b> Filiados</a></li>\n<li class=\"chapter\" data-level=\"2.1.8\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#secretarios\"><i class=\"fa fa-check\"></i><b>2.1.8</b> Secretários</a></li>\n<li class=\"chapter\" data-level=\"2.1.9\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#bancos-a-serem-disponibilizados\"><i class=\"fa fa-check\"></i><b>2.1.9</b> Bancos a serem disponibilizados</a></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"2.2\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#ferramentas\"><i class=\"fa fa-check\"></i><b>2.2</b> Ferramentas</a><ul>\n<li class=\"chapter\" data-level=\"2.2.1\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#selecao-dos-anos\"><i class=\"fa fa-check\"></i><b>2.2.1</b> Seleção dos Anos</a></li>\n<li class=\"chapter\" data-level=\"2.2.2\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#selecao-de-colunas\"><i class=\"fa fa-check\"></i><b>2.2.2</b> Seleção de colunas</a></li>\n<li class=\"chapter\" data-level=\"2.2.3\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#filtro-de-colunas\"><i class=\"fa fa-check\"></i><b>2.2.3</b> Filtro de colunas</a></li>\n<li class=\"chapter\" data-level=\"2.2.4\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#agregacao-politica\"><i class=\"fa fa-check\"></i><b>2.2.4</b> Agregação Política</a></li>\n<li class=\"chapter\" data-level=\"2.2.5\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#agregacao-regional\"><i class=\"fa fa-check\"></i><b>2.2.5</b> Agregação Regional</a></li>\n<li class=\"chapter\" data-level=\"2.2.6\" data-path=\"como-usar-o-site.html\"><a href=\"como-usar-o-site.html#download-dos-dados\"><i class=\"fa fa-check\"></i><b>2.2.6</b> Download dos dados</a></li>\n</ul></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"3\" data-path=\"o-que-e-o-r.html\"><a href=\"o-que-e-o-r.html\"><i class=\"fa fa-check\"></i><b>3</b> O que é o R?</a><ul>\n<li class=\"chapter\" data-level=\"3.1\" data-path=\"o-que-e-o-r.html\"><a href=\"o-que-e-o-r.html#instalando-o-r-e-o-rstudio\"><i class=\"fa fa-check\"></i><b>3.1</b> Instalando o R e o RStudio</a></li>\n<li class=\"chapter\" data-level=\"3.2\" data-path=\"o-que-e-o-r.html\"><a href=\"o-que-e-o-r.html#onde-estudar-r\"><i class=\"fa fa-check\"></i><b>3.2</b> Onde estudar R?</a></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"4\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html\"><i class=\"fa fa-check\"></i><b>4</b> Como usar a API R</a><ul>\n<li class=\"chapter\" data-level=\"4.1\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#instalando-o-cepespr\"><i class=\"fa fa-check\"></i><b>4.1</b> Instalando o CepespR</a></li>\n<li class=\"chapter\" data-level=\"4.2\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#explorando-as-funcoes-do-cepespr\"><i class=\"fa fa-check\"></i><b>4.2</b> Explorando as funções do <em>cepespR</em></a><ul>\n<li class=\"chapter\" data-level=\"4.2.1\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#perfil-dos-candidatos-get_candidates\"><i class=\"fa fa-check\"></i><b>4.2.1</b> Perfil dos candidatos <code>get_candidates</code></a></li>\n<li class=\"chapter\" data-level=\"4.2.2\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#votos-por-eleicao-get_votes\"><i class=\"fa fa-check\"></i><b>4.2.2</b> Votos por eleição <code>get_votes</code></a></li>\n<li class=\"chapter\" data-level=\"4.2.3\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#coligacoes-get_coalitions\"><i class=\"fa fa-check\"></i><b>4.2.3</b> Coligações <code>get_coalitions</code></a></li>\n<li class=\"chapter\" data-level=\"4.2.4\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#resultado-de-eleicoes-por-cargo-get_elections\"><i class=\"fa fa-check\"></i><b>4.2.4</b> Resultado de eleições por cargo <code>get_elections</code></a></li>\n<li class=\"chapter\" data-level=\"4.2.5\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#filiados-get_filiates\"><i class=\"fa fa-check\"></i><b>4.2.5</b> Filiados <code>get_filiates</code></a></li>\n<li class=\"chapter\" data-level=\"4.2.6\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#bens-de-candidatos-get_assets\"><i class=\"fa fa-check\"></i><b>4.2.6</b> Bens de candidatos <code>get_assets</code></a></li>\n<li class=\"chapter\" data-level=\"4.2.7\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#secretarios-get_secretaries\"><i class=\"fa fa-check\"></i><b>4.2.7</b> Secretários <code>get_secretaries</code></a></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"4.3\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#utilizando-codigos-ao-inves-de-nomes\"><i class=\"fa fa-check\"></i><b>4.3</b> Utilizando códigos ao invés de nomes</a></li>\n<li class=\"chapter\" data-level=\"4.4\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#filtrando-resultados\"><i class=\"fa fa-check\"></i><b>4.4</b> Filtrando resultados</a><ul>\n<li class=\"chapter\" data-level=\"4.4.1\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#selecionando-partidos-candidatos-e-estados\"><i class=\"fa fa-check\"></i><b>4.4.1</b> Selecionando partidos, candidatos e Estados</a></li>\n<li class=\"chapter\" data-level=\"4.4.2\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#selecionando-colunas\"><i class=\"fa fa-check\"></i><b>4.4.2</b> Selecionando colunas</a></li>\n<li class=\"chapter\" data-level=\"4.4.3\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#informacoes-para-mais-de-um-ano\"><i class=\"fa fa-check\"></i><b>4.4.3</b> Informações para mais de um ano</a></li>\n<li class=\"chapter\" data-level=\"4.4.4\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#informacoes-para-mais-de-um-cargo\"><i class=\"fa fa-check\"></i><b>4.4.4</b> Informações para mais de um cargo</a></li>\n<li class=\"chapter\" data-level=\"4.4.5\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#cache-das-consultas\"><i class=\"fa fa-check\"></i><b>4.4.5</b> Cache das consultas</a></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"4.5\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#sobrevoando-os-dados\"><i class=\"fa fa-check\"></i><b>4.5</b> Sobrevoando os dados</a></li>\n<li class=\"chapter\" data-level=\"4.6\" data-path=\"como-usar-a-api-r.html\"><a href=\"como-usar-a-api-r.html#salvando-os-resultados\"><i class=\"fa fa-check\"></i><b>4.6</b> Salvando os resultados</a></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"5\" data-path=\"api-r-exercicios-para-praticar.html\"><a href=\"api-r-exercicios-para-praticar.html\"><i class=\"fa fa-check\"></i><b>5</b> API R: Exercícios para praticar</a><ul>\n<li class=\"chapter\" data-level=\"5.1\" data-path=\"api-r-exercicios-para-praticar.html\"><a href=\"api-r-exercicios-para-praticar.html#respostas-aos-exercicios\"><i class=\"fa fa-check\"></i><b>5.1</b> Respostas aos exercícios</a></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"6\" data-path=\"como-manipular-os-dados-no-r.html\"><a href=\"como-manipular-os-dados-no-r.html\"><i class=\"fa fa-check\"></i><b>6</b> Como manipular os dados no R</a><ul>\n<li class=\"chapter\" data-level=\"6.1\" data-path=\"como-manipular-os-dados-no-r.html\"><a href=\"como-manipular-os-dados-no-r.html#filtrar\"><i class=\"fa fa-check\"></i><b>6.1</b> Filtrar</a></li>\n<li class=\"chapter\" data-level=\"6.2\" data-path=\"como-manipular-os-dados-no-r.html\"><a href=\"como-manipular-os-dados-no-r.html#selecionar-e-ordenar\"><i class=\"fa fa-check\"></i><b>6.2</b> Selecionar e Ordenar</a></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"7\" data-path=\"como-visualizar-dados-no-r.html\"><a href=\"como-visualizar-dados-no-r.html\"><i class=\"fa fa-check\"></i><b>7</b> Como visualizar dados no R</a><ul>\n<li class=\"chapter\" data-level=\"7.1\" data-path=\"como-visualizar-dados-no-r.html\"><a href=\"como-visualizar-dados-no-r.html#graficos\"><i class=\"fa fa-check\"></i><b>7.1</b> Gráficos</a><ul>\n<li class=\"chapter\" data-level=\"7.1.1\" data-path=\"como-visualizar-dados-no-r.html\"><a href=\"como-visualizar-dados-no-r.html#grafico-de-barras\"><i class=\"fa fa-check\"></i><b>7.1.1</b> Gráfico de Barras</a></li>\n</ul></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"8\" data-path=\"como-usar-a-api-rest.html\"><a href=\"como-usar-a-api-rest.html\"><i class=\"fa fa-check\"></i><b>8</b> Como usar a API REST</a><ul>\n<li class=\"chapter\" data-level=\"8.1\" data-path=\"como-usar-a-api-rest.html\"><a href=\"como-usar-a-api-rest.html#estrutura-das-resquisicoes\"><i class=\"fa fa-check\"></i><b>8.1</b> Estrutura das resquisições</a><ul>\n<li class=\"chapter\" data-level=\"8.1.1\" data-path=\"como-usar-a-api-rest.html\"><a href=\"como-usar-a-api-rest.html#api-rest-no-navegador\"><i class=\"fa fa-check\"></i><b>8.1.1</b> API Rest no navegador</a></li>\n<li class=\"chapter\" data-level=\"8.1.2\" data-path=\"como-usar-a-api-rest.html\"><a href=\"como-usar-a-api-rest.html#api-rest-no-r\"><i class=\"fa fa-check\"></i><b>8.1.2</b> API Rest no R</a></li>\n</ul></li>\n</ul></li>\n<li class=\"chapter\" data-level=\"9\" data-path=\"documentos-auxiliares.html\"><a href=\"documentos-auxiliares.html\"><i class=\"fa fa-check\"></i><b>9</b> Documentos auxiliares</a></li>\n</ul>\n\n </nav>\n </div>\n\n <div class=\"book-body\">\n <div class=\"body-inner\">\n <div class=\"book-header\" role=\"navigation\">\n <h1>\n <i class=\"fa fa-circle-o-notch fa-spin\"></i><a href=\"./\">Como usar o CepespData/FGV</a>\n </h1>\n </div>\n\n <div class=\"page-wrapper\" tabindex=\"-1\" role=\"main\">\n <div class=\"page-inner\">\n\n <section class=\"normal\" id=\"section-\">\n<div id=\"como-usar-a-api-rest\" class=\"section level1\">\n<h1><span class=\"header-section-number\">8</span> Como usar a API REST</h1>\n<p>A <a href=\"https://github.com/Cepesp-Fgv/cepesp-rest\">API Rest</a> é outra forma de acessar os dados do CepespData e pode ser utilizada em diferentes <em>softwares</em> ou programas.</p>\n<p>Aqui vamos mostrar como fazer a requisição usando apenas o navegador, o <em>web browser</em>, e usando o <strong>R</strong>.</p>\n<div id=\"estrutura-das-resquisicoes\" class=\"section level2\">\n<h2><span class=\"header-section-number\">8.1</span> Estrutura das resquisições</h2>\n<p>A estrutura da consulta no browser é:</p>\n<pre><code>cepesp.io/api/consulta/athena/query?table=&lt;TABELA&gt;&amp;&lt;ARGUMENTOS&gt;</code></pre>\n<p>O argumento <code>table</code> indica qual é a base de dados que se deseja acessar: <code>tse</code> (banco <strong>Resultado de eleições por cargo</strong>), <code>candidatos</code> (banco <strong>Perfil de candidatos</strong>), <code>legendas</code> (<strong>Coligações</strong>), <code>votos</code> (banco de <strong>Votos</strong>), <code>bem_candidato</code> (<strong>Bens de candidatos</strong>), <code>secretarios</code> (<strong>Secretários</strong>) ou <code>filiados</code> (<strong>Filiados</strong>).</p>\n<p>Os demais argumentos (<code>&lt;ARGUMENTOS&gt;</code> acima) possíveis são:</p>\n<table>\n<thead>\n<tr class=\"header\">\n<th>Argumentos</th>\n<th>Bases de dados que suportam este argumento</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd\">\n<td>anos</td>\n<td>tse, candidatos, legendas, votos, bem_candidato</td>\n</tr>\n<tr class=\"even\">\n<td>cargo</td>\n<td>tse, candidatos, legendas, votos</td>\n</tr>\n<tr class=\"odd\">\n<td>agregacao_regional</td>\n<td>tse, votos</td>\n</tr>\n<tr class=\"even\">\n<td>agregacao_politica</td>\n<td>tse</td>\n</tr>\n<tr class=\"odd\">\n<td>uf_filter</td>\n<td>tse, votos, bem_candidato, filiados</td>\n</tr>\n<tr class=\"even\">\n<td>mun_filter</td>\n<td>tse, votos</td>\n</tr>\n<tr class=\"odd\">\n<td>only_elected</td>\n<td>tse, candidatos</td>\n</tr>\n<tr class=\"even\">\n<td>brancos</td>\n<td>tse, votos</td>\n</tr>\n<tr class=\"odd\">\n<td>nulos</td>\n<td>tse, votos</td>\n</tr>\n<tr class=\"even\">\n<td>name_filter</td>\n<td>secretarios</td>\n</tr>\n<tr class=\"odd\">\n<td>goverment_period</td>\n<td>secretarios</td>\n</tr>\n<tr class=\"even\">\n<td>party</td>\n<td>filiados</td>\n</tr>\n</tbody>\n</table>\n<p>Além disso, caso as colunas desejadas não estajam na lista de colunas-padrão da consulta de determinado banco, é possível selecionar colunas acrescentando o texto <code>&amp;c[]=&lt;COLUNA&gt;</code> ao final do link de requisição, e/ou filtrar colunas, acrescentando o texto <code>&amp;filters[&lt;COLUNA&gt;]=&lt;VALOR&gt;</code>. <em>Atenção: garanta que a coluna a ser filtrada foi devidamente selecionada.</em></p>\n<p>Por exemplo, para filtrar candidatos(as) à Presidência em 2018 que se declaram pretos(as), utilizamos o seguinte link:</p>\n<pre><code>cepesp.io/api/consulta/athena/query?table=candidatos&amp;anos=2014&amp;cargo=1&amp;c[]=ANO_ELEICAO&amp;c[]=NUM_TURNO&amp;c[]=SIGLA_UE&amp;c[]=DESCRICAO_CARGO&amp;c[]=SIGLA_PARTIDO&amp;c[]=NUMERO_CANDIDATO&amp;c[]=CPF_CANDIDATO&amp;c[]=NOME_URNA_CANDIDATO&amp;c[]=DESCRICAO_SEXO&amp;c[]=DESCRICAO_COR_RACA&amp;c[]=DESC_SIT_TOT_TURNO&amp;filters[DESCRICAO_COR_RACA]=PRETA</code></pre>\n<p>É possível acessar a lista de colunas disponíveis para cada banco de dados no nosso <a href=\"http://www.cepespdata.io/static/docs/cepespdata_dicionario_publico.xlsx\">dicionário de variáveis</a> ou no <a href=\"https://github.com/Cepesp-Fgv/tse-dados/wiki/Colunas\">nosso GitHub</a>.</p>\n<div id=\"api-rest-no-navegador\" class=\"section level3\">\n<h3><span class=\"header-section-number\">8.1.1</span> API Rest no navegador</h3>\n<p>Quando se insere o link conforme a estrutura descrita acima no navegador, a requisição deve ser bem-sucedida. Isso pode ser verificado por meio do aviso <code>last_status:&quot;SUCCEEDED&quot;</code> que deverá aparecer no canto superior esquerdo tela.</p>\n<p>Neste caso, guarde o número do <code>id</code> devolvido pela consulta. No caso da consulta do exemplo cima: <code>id: 7738</code>.</p>\n<p>Finalmente, faça o download da requisição em formato CSV inserindo o link <code>cepesp.io/api/consulta/athena/result?id=&lt;ID&gt;&amp;ignore_version=true</code> em seu navegador, mas substituindo o termo <code>&lt;ID&gt;</code> pelo número de <code>id</code> recuperado na consulta acima. No nosso exemplo: <code>7738</code>. Ou seja, neste caso, o link a ser inserido seria:</p>\n<pre><code>cepesp.io/api/consulta/athena/result?id=7738&amp;ignore_version=true</code></pre>\n</div>\n<div id=\"api-rest-no-r\" class=\"section level3\">\n<h3><span class=\"header-section-number\">8.1.2</span> API Rest no R</h3>\n<p>Utilizar a API Rest no R permite importar dados de forma mais automática e estruturada, diretamente para o ambiente <strong>R</strong>, onde eles podem ser manipulados de acordo com seu interesse.</p>\n<p>Para usar a API Rest por meio do R, você vai precisar instalar os pacotes <code>httr</code> e <code>jasonlite</code>.</p>\n<div class=\"sourceCode\"><pre class=\"sourceCode r\"><code class=\"sourceCode r\"><span class=\"kw\">install.packages</span>(<span class=\"st\">&quot;httr&quot;</span>)\n<span class=\"kw\">install.packages</span>(<span class=\"st\">&quot;jsonlite&quot;</span>)</code></pre></div>\n<p>Em seguida, vamos requerir a utilização dos pacotes:</p>\n<div class=\"sourceCode\"><pre class=\"sourceCode r\"><code class=\"sourceCode r\"><span class=\"kw\">require</span>(<span class=\"st\">&quot;httr&quot;</span>)\n<span class=\"kw\">require</span>(<span class=\"st\">&quot;jsonlite&quot;</span>)</code></pre></div>\n<p>E depois fazer a requisição utilizando a estrutura descrita acima em <strong>Estrutura das resquisições</strong>. Vamos usar um exemplo:</p>\n<div class=\"sourceCode\"><pre class=\"sourceCode r\"><code class=\"sourceCode r\"><span class=\"co\"># Definindo link da requisição:</span>\nlink &lt;-<span class=\"st\"> &quot;cepesp.io/api/consulta/athena/query?table=candidatos&amp;anos=2014&amp;cargo=1&amp;c[]=ANO_ELEICAO&amp;c[]=NUM_TURNO&amp;c[]=SIGLA_UE&amp;c[]=DESCRICAO_CARGO&amp;c[]=SIGLA_PARTIDO&amp;c[]=NUMERO_CANDIDATO&amp;c[]=CPF_CANDIDATO&amp;c[]=NOME_URNA_CANDIDATO&amp;c[]=DESCRICAO_SEXO&amp;c[]=DESCRICAO_COR_RACA&amp;c[]=DESC_SIT_TOT_TURNO&amp;filters[DESCRICAO_COR_RACA]=PRETA&quot;</span>\n\n<span class=\"co\"># Fazendo requisição:</span>\ncall &lt;-<span class=\"st\"> </span>httr<span class=\"op\">::</span><span class=\"kw\">GET</span>(<span class=\"st\">&quot;cepesp.io/api/consulta/athena/query?table=candidatos&amp;anos=2014&amp;cargo=1&amp;c[]=ANO_ELEICAO&amp;c[]=NUM_TURNO&amp;c[]=SIGLA_UE&amp;c[]=DESCRICAO_CARGO&amp;c[]=SIGLA_PARTIDO&amp;c[]=NUMERO_CANDIDATO&amp;c[]=CPF_CANDIDATO&amp;c[]=NOME_URNA_CANDIDATO&amp;c[]=DESCRICAO_SEXO&amp;c[]=DESCRICAO_COR_RACA&amp;c[]=DESC_SIT_TOT_TURNO&amp;filters[DESCRICAO_COR_RACA]=PRETA&quot;</span>)\n\n<span class=\"co\"># Transformando a lista em texto:</span>\ncall_text &lt;-<span class=\"st\"> </span>httr<span class=\"op\">::</span><span class=\"kw\">content</span>(call, <span class=\"st\">&#39;text&#39;</span>)\n\n<span class=\"co\"># Abrindo a nossa lista JSON:</span>\ncall_json &lt;-<span class=\"st\"> </span><span class=\"kw\">fromJSON</span>(call_text, <span class=\"dt\">flatten =</span> <span class=\"ot\">TRUE</span>) \n<span class=\"kw\">View</span>(call_json) <span class=\"co\"># Aqui conseguimos acessar o id que contém a nossa requisição, que pode ser acessado no:</span>\ncall_json<span class=\"op\">$</span>id\n\n<span class=\"co\"># Assim vamos importar o banco desejado inserindo o nosso id na requisição do resultado da consulta:</span>\nrequis &lt;-<span class=\"st\"> </span>httr<span class=\"op\">::</span><span class=\"kw\">GET</span>(<span class=\"kw\">paste0</span>(<span class=\"st\">&#39;cepesp.io/api/consulta/athena/result?id=&#39;</span>,call_json<span class=\"op\">$</span>id,<span class=\"st\">&#39;&amp;ignore_version=true&#39;</span>))\n\n<span class=\"co\"># Transformando a requisição em formato de banco de dados:</span>\nrequis_df &lt;-<span class=\"st\"> </span>httr<span class=\"op\">::</span><span class=\"kw\">content</span>(requis, <span class=\"st\">&#39;parsed&#39;</span>)</code></pre></div>\n<p>Assim, o objeto final <code>requis_df</code> deve conter o banco de dados desejado, que pode ser manipulado dentro do seu ambiente R e salvo conforme interesse. Veja as seções 3 e 4 deste tutorial para mais detalhes sobre a utilização do R.</p>\n<p>Para mais detalhes sobre a API Rest do CepespData/FGV, consulte nossa página no <a href=\"https://github.com/Cepesp-Fgv/cepesp-rest/\">GitHub</a>.</p>\n\n</div>\n</div>\n</div>\n </section>\n\n </div>\n </div>\n </div>\n<a href=\"como-visualizar-dados-no-r.html\" class=\"navigation navigation-prev \" aria-label=\"Previous page\"><i class=\"fa fa-angle-left\"></i></a>\n<a href=\"documentos-auxiliares.html\" class=\"navigation navigation-next \" aria-label=\"Next page\"><i class=\"fa fa-angle-right\"></i></a>\n </div>\n </div>\n<script src=\"libs/gitbook-2.6.7/js/app.min.js\"></script>\n<script src=\"libs/gitbook-2.6.7/js/lunr.js\"></script>\n<script src=\"libs/gitbook-2.6.7/js/plugin-search.js\"></script>\n<script src=\"libs/gitbook-2.6.7/js/plugin-sharing.js\"></script>\n<script src=\"libs/gitbook-2.6.7/js/plugin-fontsettings.js\"></script>\n<script src=\"libs/gitbook-2.6.7/js/plugin-bookdown.js\"></script>\n<script src=\"libs/gitbook-2.6.7/js/jquery.highlight.js\"></script>\n<script>\ngitbook.require([\"gitbook\"], function(gitbook) {\ngitbook.start({\n\"sharing\": {\n\"github\": false,\n\"facebook\": true,\n\"twitter\": true,\n\"google\": false,\n\"linkedin\": false,\n\"weibo\": false,\n\"instapaper\": false,\n\"vk\": false,\n\"all\": [\"facebook\", \"google\", \"twitter\", \"linkedin\", \"weibo\", \"instapaper\"]\n},\n\"fontsettings\": {\n\"theme\": \"white\",\n\"family\": \"sans\",\n\"size\": 2\n},\n\"edit\": {\n\"link\": null,\n\"text\": null\n},\n\"history\": {\n\"link\": null,\n\"text\": null\n},\n\"download\": null,\n\"toc\": {\n\"collapse\": \"subsection\"\n}\n});\n});\n</script>\n\n</body>\n\n</html>\n" }, { "alpha_fraction": 0.7220398187637329, "alphanum_fraction": 0.7585905194282532, "avg_line_length": 55.407405853271484, "blob_id": "204f7c36ba0aa3990cb2a81516a7e9b7abec412a", "content_id": "2c528faf21c3a342f4381734c78bfe0249af0623", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4648, "license_type": "no_license", "max_line_length": 511, "num_lines": 81, "path": "/README.md", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# TSE Dados\n\nEsse projeto contém os códigos e análises para criação do CEPESPData (cepesp.io). O CEPESPData é um repositório que permite o acesso, de forma fácil e confiável, aos resultados e dados eleitorais. O CEPESPData permite a consulta e fácil visualização de resultados e dados eleitorais para todos os cargos, a partir de diferentes agregações geográficas (Brasil, UF, município, micro-região, entre outras), de 1998 a 2014. \n\nAlém de facilitar o acesso aos dados eleitorais, temos o objetivo de criar um repositório aberto e transparente. Dessa forma, nossos códigos, análises e testes de consistências dos dados estão disponíveis neste repositório. Estamos desenvolvendo também, um R API (https://github.com/Cepesp-Fgv/cepesp-r), Python API (https://github.com/Cepesp-Fgv/cepesp-python) e Rest API (https://github.com/Cepesp-Fgv/cepesp-rest) para auxiliar na consulta dos dados no nosso repositório a partir de liguagens de programação.\n\nSugestões, correções e demais contribuições são bem-vidas.\n\n## Pré-requisitos\n- Python 3.6 ou mais recente\n\n## Instalação\n\nPrimeiro, você precisa instalar a ferramenta [virtualenv](https://virtualenv.pypa.io/en/latest/installation/)\n```bash\npip install virtualenv\n```\n\nE então, criar um ambiente virtual.\n```bash\ncd /caminho/para/o/tse-dados\nvirtualenv .venv --python=python3\n```\n\nPara ativar o seu ambiente virtual recém criado:\n\n- No Linux/MacOS:\n ```bash\n source .venv/bin/activate\n ```\n- No Windows:\n ```bash\n ./.venv/Scripts/activate\n ```\n\nInstale os módulos requeridos pelo sistema.\n```bash\npip install -r requirements.txt\n```\n\nCrie um arquivo `.env` e edite-o\n```bash\ncp .env.example .env\n```\n\nPronto! Agora já pode iniciar a aplicação\n\n## Rodando o Servidor Web (CepespData)\n```bash\nFLASK_APP=web/application.py python -m flask run\n```\n\n## Rodando o ETL\nAntes de dite o arquivo `etl/config.py` com as configurações necessárias.\n```bash\npython -c \"from etl.run import run; run()\"\n```\n\n### Fonte Original dos Dados:\n\nMuitos dos dados originais utilizados aqui podem ser encontrados em no Repositório de Dados do TSE: http://www.tse.jus.br/eleicoes/estatisticas/repositorio-de-dados-eleitorais\n\n### Wiki\n\n - [Estrutura do Projeto](https://github.com/Cepesp-Fgv/tse-dados/wiki/Estrutura)\n - [Seletores de Colunas](https://github.com/Cepesp-Fgv/tse-dados/wiki/Colunas)\n - [Eleições por Cargo](https://github.com/Cepesp-Fgv/tse-dados/wiki/Colunas#reposit%C3%B3rio-elei%C3%A7%C3%B5es-por-cargo-libtsecolumnspy)\n - [Resultados da Eleição](https://github.com/Cepesp-Fgv/tse-dados/wiki/Colunas#reposit%C3%B3rio-resultados-da-elei%C3%A7%C3%A3o-libvotoscolumnspy)\n - [Candidatos](https://github.com/Cepesp-Fgv/tse-dados/wiki/Colunas#reposit%C3%B3rio-candidatos-libcandidatoscolumnspy)\n - [Legendas](https://github.com/Cepesp-Fgv/tse-dados/wiki/Colunas#reposit%C3%B3rio-legendas-liblegendascolumnspy)\n - [Leitores de Dados](https://github.com/Cepesp-Fgv/tse-dados/wiki/Leitores)\n - [Caching](https://github.com/Cepesp-Fgv/tse-dados/wiki/Leitores#caching)\n - [Correções em Pré-Processamento](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento)\n - [Adicionar Cargos Extras (2014)](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#adicionar-cargos-extras-2014)\n - [Correção Descrição Eleição (2014)](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#corre%C3%A7%C3%A3o-descri%C3%A7%C3%A3o-elei%C3%A7%C3%A3o-2014)\n - [Correção Descrição Eleição (2010)](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#corre%C3%A7%C3%A3o-descri%C3%A7%C3%A3o-elei%C3%A7%C3%A3o-2010)\n - [Padronização do Código Situação Turno](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#padroniza%C3%A7%C3%A3o-do-c%C3%B3digo-situa%C3%A7%C3%A3o-turno)\n - [Corrigir Sequencia Coligação (2014)](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#corrigir-sequencia-coliga%C3%A7%C3%A3o-2014)\n - [Corrigir Sequencia Coligação (2010)](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#corrigir-sequencia-coliga%C3%A7%C3%A3o-2010)\n - [Corrigir Código Cor Raça](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#corrigir-c%C3%B3digo-cor-ra%C3%A7a)\n - [Corrigir Email Candidato](https://github.com/Cepesp-Fgv/tse-dados/wiki/Corre%C3%A7%C3%B5es-em-Pr%C3%A9-Processamento#corrigir-email-candidato)\n" }, { "alpha_fraction": 0.5349084138870239, "alphanum_fraction": 0.5418104529380798, "avg_line_length": 32.63393020629883, "blob_id": "5e1843018574267c6775a4021fb52fea4b4cffc9", "content_id": "2d4584d45ab5b502f14794214e138c3c2a820b2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3767, "license_type": "no_license", "max_line_length": 95, "num_lines": 112, "path": "/web/cepesp/athena/builders/others.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.columns.votos import VotesColumnsSelector\nfrom web.cepesp.athena.builders.base import AthenaBuilder\nfrom web.cepesp.columns.candidatos import CandidatesColumnsSelector\nfrom web.cepesp.columns.legendas import CoalitionsColumnsSelector\n\n\nclass CandidatesCoalitionsQueryBuilder(AthenaBuilder):\n\n def __init__(self, **options):\n super().__init__(**options)\n table = self.arg('table')\n\n if table == 'candidatos':\n self.selector = CandidatesColumnsSelector()\n elif table == 'legendas':\n self.selector = CoalitionsColumnsSelector()\n\n def build(self):\n table = self.arg('table')\n years = \"', '\".join(map(str, self.arg('years')))\n\n columns = \", \".join([f\"{self._map_column(c)} AS {c}\" for c in self.selected_columns()])\n return f'''\n SELECT {columns} FROM {table} AS v\n WHERE p_ano IN (\\'{years}\\') \n AND {self._build_filter_job()}\n {self._build_filters('AND')}\n {self._build_order_by()}\n '''\n\n # region def _build_filters(self, start): [...]\n def _build_filters(self, start):\n where = self._build_base_filters()\n\n if self.opt('turno'):\n where.append(f\"v.NUM_TURNO = '{self.options['turno']}'\")\n\n if self.opt('only_elected', False) and self.arg('table') == 'candidatos':\n where.append(f\"v.COD_SIT_TOT_TURNO IN ('1', '2', '3')\")\n\n if len(where) > 0:\n return f\"{start} \" + \"\\n AND \".join(where)\n else:\n return \"\"\n # endregion\n\n\nclass VotesQueryBuilder(AthenaBuilder):\n\n def __init__(self, **options):\n super().__init__(**options)\n self.selector = VotesColumnsSelector(self.arg('reg'))\n\n def build(self):\n table = self.table_name(\"votos\")\n years = \"', '\".join(map(str, self.arg('years')))\n selected_columns = self.selected_columns()\n sum_columns = [c for c in self.selector.sum_columns() if c in selected_columns]\n columns = [c for c in self.selected_columns() if c not in sum_columns]\n\n sum_columns = \", \".join([f\"SUM({c}) AS {c}\" for c in sum_columns])\n group_columns = \", \".join(map(str, range(1, len(columns) + 1)))\n columns = \", \".join([f\"{c} AS {c}\" for c in columns])\n\n if sum_columns:\n sum_columns = \", \" + sum_columns\n\n return f'''\n SELECT {columns}{sum_columns} \n FROM {table}\n WHERE p_ano IN (\\'{years}\\') \n AND {self._build_filter_job()}\n {self._build_filter_uf()}\n GROUP BY {group_columns}\n {self._build_filters('HAVING')}\n {self._build_order_by()}\n '''\n\n def _build_filter_uf(self):\n uf = self.opt('uf_filter')\n if self.arg('reg') >= 2 and uf:\n years = self.arg('years')\n if 2018 in years or 2014 in years or 2002 in years:\n return f\"AND UF = '{uf}'\"\n else:\n return f\"AND p_uf = '{uf}'\"\n else:\n return \"\"\n\n # region def _build_filters(self, start): [...]\n def _build_filters(self, start):\n where = self._build_base_filters()\n\n where.append(\"NUMERO_CANDIDATO <> '97'\")\n\n if not self.opt('brancos', True):\n where.append(\"NUMERO_CANDIDATO <> '95'\")\n\n if not self.opt('nulos', True):\n where.append(\"NUMERO_CANDIDATO <> '96'\")\n\n if self.opt('mun_filter'):\n where.append(f\"COD_MUN_TSE = '{self.options['mun_filter']}'\")\n\n if self.opt('turno'):\n where.append(f\"NUM_TURNO = '{self.options['turno']}'\")\n\n if len(where) > 0:\n return f\"{start} \" + \"\\n AND \".join(where)\n else:\n return \"\"\n # endregion\n" }, { "alpha_fraction": 0.5186625123023987, "alphanum_fraction": 0.5435459017753601, "avg_line_length": 21.946428298950195, "blob_id": "9b42609f7a68a57a32df2157eacbd93c2e781910", "content_id": "6e049da841d9379af448f94d5791ac1d34f9d317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "no_license", "max_line_length": 104, "num_lines": 56, "path": "/etl/crawler/items.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport scrapy\n\n\ndef extract_year(url):\n reg = r'(19[0-8][0-9]|199[0-9]|20[0-8][0-9]|209[0-9])'\n try:\n return int(re.search(reg, url).group(0))\n except:\n return 0\n\n\ndef extract_uf(url):\n reg = r'(BR|ZZ|VT|AC|AL|AP|AM|BA|CE|DF|ES|GO|MA|MT|MS|MG|PA|PB|PR|PE|PI|RJ|RN|RS|RO|RR|SC|SP|SE|TO)'\n try:\n return re.search(reg, url).group(0)\n except:\n return None\n\n\ndef extract_turn(url):\n reg = r'(1t|2t)'\n try:\n return re.search(reg, url).group(0)\n except:\n return None\n\n\nclass TSEFileItem(scrapy.Item):\n file = scrapy.Field()\n path = scrapy.Field()\n year = scrapy.Field()\n name = scrapy.Field()\n uf = scrapy.Field()\n turn = scrapy.Field()\n\n file_urls = scrapy.Field()\n files = scrapy.Field()\n\n @classmethod\n def create(cls, href):\n item = cls()\n item['file'] = href\n item['name'] = os.path.split(href)[-1]\n item['path'] = '/'.join(os.path.split(href)[-4:-1])\n item['year'] = extract_year(item['name'])\n if item['year'] == 0:\n item['year'] = extract_year(item['file'])\n\n item['uf'] = extract_uf(item['name'])\n item['turn'] = extract_turn(item['name'])\n\n item['file_urls'] = [href]\n\n return item\n\n" }, { "alpha_fraction": 0.5163511037826538, "alphanum_fraction": 0.5249569416046143, "avg_line_length": 35.3125, "blob_id": "aa29a71328d278e7b40239078599eb0df6c5d9e4", "content_id": "361c77baa72b12b0a3f46981767fb5ce92de6e92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 70, "num_lines": 16, "path": "/etl/fixes/SiglaUEFix.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "class SiglaUEFix:\n\n def check(self, item):\n if item['database'] == 'votos' and item['uf'] == 'ZZ':\n return False\n\n return item['database'] in ['votos', 'legendas', 'candidatos']\n\n def apply(self, df):\n df.loc[df.CODIGO_CARGO == '1', 'SIGLA_UE'] = 'BR'\n df.loc[df.CODIGO_CARGO == '3', 'SIGLA_UE'] = df['SIGLA_UF']\n df.loc[df.CODIGO_CARGO == '5', 'SIGLA_UE'] = df['SIGLA_UF']\n df.loc[df.CODIGO_CARGO == '6', 'SIGLA_UE'] = df['SIGLA_UF']\n df.loc[df.CODIGO_CARGO == '7', 'SIGLA_UE'] = df['SIGLA_UF']\n\n return df\n" }, { "alpha_fraction": 0.7475247383117676, "alphanum_fraction": 0.7475247383117676, "avg_line_length": 30.076923370361328, "blob_id": "214d7731a89efb42bcfa9bbf79d5e483709e4e9a", "content_id": "ecb693446373512a876b4104ee6889a35ec1e620", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 88, "num_lines": 13, "path": "/web/cepesp/routes/sql.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from flask import make_response\n\nfrom web.cepesp.athena.builders.factory import build_query\nfrom web.cepesp.athena.options import AthenaQueryOptions\n\n\ndef sql():\n options = AthenaQueryOptions()\n response = make_response(build_query(**options.__dict__))\n response.headers['Content-Disposition'] = f'attachment; filename={options.name}.sql'\n response.mimetype = 'text/txt'\n\n return response\n" }, { "alpha_fraction": 0.5844336152076721, "alphanum_fraction": 0.6574009656906128, "avg_line_length": 48.620689392089844, "blob_id": "552a80c8310ba984a6c500b9876dbdbcbb680654", "content_id": "65876dab34dc5d470d5524279436747587308107", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1439, "license_type": "no_license", "max_line_length": 108, "num_lines": 29, "path": "/etl/fixes/FixCodigoCorRaca.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass FixCodigoCorRaca:\n\n def check(self, item):\n return item['database'] == 'candidatos' and item['year'] < 2014\n\n def apply(self, df: pd.DataFrame):\n df[\"CODIGO_COR_RACA\"] = \"-1\"\n df[\"DESCRICAO_COR_RACA\"] = \"#NE#\"\n\n return df\n\n def test(self, client):\n df_2010 = client.get_candidates(year=2010, job=1, columns=['CODIGO_COR_RACA', 'DESCRICAO_COR_RACA'])\n df_2006 = client.get_candidates(year=2006, job=1, columns=['CODIGO_COR_RACA', 'DESCRICAO_COR_RACA'])\n df_2002 = client.get_candidates(year=2002, job=1, columns=['CODIGO_COR_RACA', 'DESCRICAO_COR_RACA'])\n df_1998 = client.get_candidates(year=1998, job=1, columns=['CODIGO_COR_RACA', 'DESCRICAO_COR_RACA'])\n\n assert \"CODIGO_COR_RACA\" in df_2010.columns, \"CODIGO_COR_RACA not in 2010\"\n assert \"CODIGO_COR_RACA\" in df_2006.columns, \"CODIGO_COR_RACA not in 2006\"\n assert \"CODIGO_COR_RACA\" in df_2002.columns, \"CODIGO_COR_RACA not in 2002\"\n assert \"CODIGO_COR_RACA\" in df_1998.columns, \"CODIGO_COR_RACA not in 1998\"\n\n assert \"DESCRICAO_COR_RACA\" in df_2010.columns, \"DESCRICAO_COR_RACA not in 2010\"\n assert \"DESCRICAO_COR_RACA\" in df_2006.columns, \"DESCRICAO_COR_RACA not in 2006\"\n assert \"DESCRICAO_COR_RACA\" in df_2002.columns, \"DESCRICAO_COR_RACA not in 2002\"\n assert \"DESCRICAO_COR_RACA\" in df_1998.columns, \"DESCRICAO_COR_RACA not in 1998\"\n" }, { "alpha_fraction": 0.6237499713897705, "alphanum_fraction": 0.625, "avg_line_length": 20.052631378173828, "blob_id": "4488e91b741034ea8f7f99aa12397fcd0b65dbe5", "content_id": "2376d8aae75fc76b3b29cbd3b564f705888ce0f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 66, "num_lines": 38, "path": "/web/cepesp/utils/session.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from flask import session, request\n\n\ndef get_selected_columns_session_key():\n base = request.path.split('/')[-1]\n reg = str(request.args.get('agregacao_regional'))\n pol = str(request.args.get('agregacao_politica'))\n\n key = 'columns_' + base\n if reg:\n key += '_' + reg\n\n if pol:\n key += '_' + pol\n\n return key\n\n\ndef set_session_selected_columns(columns):\n key = get_selected_columns_session_key()\n session[key] = columns\n\n\ndef session_selected_columns(available_columns):\n key = get_selected_columns_session_key()\n\n if key in session:\n return [c for c in session[key] if c in available_columns]\n else:\n return []\n\n\ndef back(default='/'):\n return session.get('back', default)\n\n\ndef get_locale():\n return session.get('locale', 'pt')\n" }, { "alpha_fraction": 0.5930736064910889, "alphanum_fraction": 0.6017315983772278, "avg_line_length": 24.66666603088379, "blob_id": "e3309f9f6354d9df8d2023ca6f88f7bf628d7878", "content_id": "a7c1972c9be72079397d065705bcd4d1e4028be0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 87, "num_lines": 18, "path": "/etl/fixes/FixTituloEleitor.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass FixTituloEleitor:\n titulo = 'NUM_TITULO_ELEITORAL_CANDIDATO'\n cpf = 'CPF_CANDIDATO'\n\n def check(self, item):\n return item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n df.at[df[self.titulo] != '#NULO#', self.titulo] = df[self.titulo].str.zfill(12)\n df.at[df[self.cpf] != '#NULO#', self.cpf] = df[self.cpf].str.zfill(11)\n\n return df\n\n def test(self, client):\n pass\n" }, { "alpha_fraction": 0.668367326259613, "alphanum_fraction": 0.6751700639724731, "avg_line_length": 27, "blob_id": "30d2e63b38e8d57a09194414fb305f347b8ca5b8", "content_id": "fccaa7b0f48162ae0c65653d5ff1d6e619d41890", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1176, "license_type": "no_license", "max_line_length": 97, "num_lines": 42, "path": "/web/cepesp/routes/static.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from flask import render_template, request, session, redirect, send_from_directory\n\nfrom web.cepesp.config import APP_ENV\nfrom web.cepesp.utils.session import get_locale\n\n\ndef home():\n host = request.headers['Host']\n\n if APP_ENV == 'master' and 'cepesp.io' in host:\n return redirect('http://www.cepesp.io')\n else:\n session['back'] = request.path\n return render_template(\"home.html\", page=0, lang=get_locale())\n\n\ndef others():\n session['back'] = request.path\n return render_template(\"others.html\", page=0, lang=get_locale())\n\n\ndef about():\n session['back'] = request.path\n return render_template(\"about.%s.html\" % get_locale(), page=1, lang=get_locale())\n\n\ndef about_state_secretaries():\n session['back'] = request.path\n return render_template(\"about_secretarios.%s.html\" % get_locale(), page=1, lang=get_locale())\n\n\ndef documentation():\n session['back'] = request.path\n return render_template(\"documentation.%s.html\" % get_locale(), page=1, lang=get_locale())\n\n\ndef spatial2_docs():\n return redirect('http://docs.spatial2.cepesp.io')\n\n\ndef static_from_root():\n return send_from_directory('static', request.path[1:])\n" }, { "alpha_fraction": 0.5361663699150085, "alphanum_fraction": 0.5411392450332642, "avg_line_length": 25.650602340698242, "blob_id": "8a007818da846e59ac0fb01b752c7d421b705be3", "content_id": "b61f451e316993ce5c43ff9aab987f31f6ea860b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2212, "license_type": "no_license", "max_line_length": 116, "num_lines": 83, "path": "/etl/process/DetalheVotSecProcess.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import os\nfrom _csv import QUOTE_ALL\nfrom glob import glob\n\nimport pandas as pd\n\nfrom web.cepesp.utils.data import resolve_conflicts\n\n\nclass DetalheVotSecProcess:\n columns = [\n 'ANO_ELEICAO',\n 'NUM_TURNO',\n 'NUM_ZONA',\n 'NUM_SECAO',\n 'COD_MUN_TSE',\n 'COD_MUN_IBGE',\n 'NOME_MUNICIPIO',\n 'CODIGO_MICRO',\n 'NOME_MICRO',\n 'CODIGO_MESO',\n 'NOME_MESO',\n 'UF',\n 'NOME_UF',\n 'CODIGO_MACRO',\n 'NOME_MACRO',\n 'DESCRICAO_ELEICAO',\n 'CODIGO_CARGO',\n 'DESCRICAO_CARGO',\n 'QTD_APTOS',\n 'QTD_COMPARECIMENTO',\n 'QTD_ABSTENCOES',\n 'QT_VOTOS_NOMINAIS',\n 'QT_VOTOS_BRANCOS',\n 'QT_VOTOS_NULOS',\n 'QT_VOTOS_LEGENDA',\n 'QT_VOTOS_ANULADOS_APU_SEP',\n ]\n\n def __init__(self, mun_df_path, output):\n self.output = output\n self.aux_mun = pd.read_csv(mun_df_path, sep=',', dtype=str)\n\n def check(self, item):\n return item['database'] == \"detalhe\"\n\n def done(self, item):\n return os.path.exists(self._output(item))\n\n def handle(self, item):\n chunk = 0\n\n for df in pd.read_csv(item['path'], sep=';', dtype=str, chunksize=100000):\n df = self.join_mun(df)\n df = df[self.columns]\n self._save(df, item, chunk)\n\n chunk += 1\n\n def join_mun(self, vot):\n df = vot.merge(self.aux_mun, on='COD_MUN_TSE', how='left', sort=False)\n df = resolve_conflicts(df, prefer='_y', drop='_x')\n df = df.rename(columns={'SIGLA_UF': 'UF'})\n\n return df\n\n def _output(self, item):\n return os.path.join(self.output, item['name'])\n\n def _save(self, df, item, chunk):\n output_path = self._output(item)\n\n directory = os.path.dirname(output_path)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n header = chunk == 0\n mode = 'a' if chunk > 0 else 'w+'\n df.to_csv(output_path, mode=mode, header=header, compression='gzip', sep=';', encoding='utf-8', index=False,\n quoting=QUOTE_ALL)\n\n def output_files(self):\n return glob(os.path.join(self.output, '*.gz'))\n" }, { "alpha_fraction": 0.5488424897193909, "alphanum_fraction": 0.5505363941192627, "avg_line_length": 29.55172348022461, "blob_id": "778640f3f33b74421a157278d74cfcc36701ca31", "content_id": "84dd280968c9ee0d42fe2974a2207134685f4f60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1771, "license_type": "no_license", "max_line_length": 103, "num_lines": 58, "path": "/web/cepesp/athena/builders/candidates_assets.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.athena.builders.base import AthenaBuilder\nfrom web.cepesp.columns.bem_candidato import CandidateAssetsColumnsSelector\n\n\nclass CandidateAssetsQueryBuilder(AthenaBuilder):\n bens = [\n \"DATA_GERACAO\",\n \"HORA_GERACAO\",\n \"ANO_ELEICAO\",\n \"DESCRICAO_ELEICAO\",\n \"SIGLA_UF\",\n \"SEQUENCIAL_CANDIDATO\",\n \"CD_TIPO_BEM_CANDIDATO\",\n \"DS_TIPO_BEM_CANDIDATO\",\n \"DETALHE_BEM\",\n \"VALOR_BEM\",\n \"DATA_ULTIMA_ATUALIZACAO\",\n \"HORA_ULTIMA_ATUALIZACAO\",\n \"ID_CANDIDATO\"\n ]\n\n def __init__(self, **options):\n super().__init__(**options)\n self.selector = CandidateAssetsColumnsSelector()\n\n def build(self):\n years = \"', '\".join(map(str, self.arg('years')))\n columns_renamed = \", \".join([f\"{self._map_column(c)} AS {c}\" for c in self.selected_columns()])\n\n return f'''\n SELECT DISTINCT {columns_renamed}\n FROM bem_candidato as b\n LEFT JOIN candidatos as c ON b.ID_CANDIDATO = c.ID_CANDIDATO AND c.NUM_TURNO = '1'\n WHERE b.p_ano IN (\\'{years}\\') \n {self._build_filters('AND')}\n {self._build_order_by()}\n '''\n\n def _map_column(self, column):\n if column in self.bens:\n return f\"b.{column}\"\n else:\n return f\"c.{column}\"\n\n # region def _build_filters(self, start): [...]\n def _build_filters(self, start):\n where = self._build_base_filters()\n\n where.append(\"b.DESCRICAO_ELEICAO <> '2'\")\n\n if self.opt('uf_filter'):\n return f\"AND b.SIGLA_UF = '{self.opt('uf_filter')}'\"\n\n if len(where) > 0:\n return f\"{start} \" + \"\\n AND \".join(where)\n else:\n return \"\"\n # endregion" }, { "alpha_fraction": 0.7541064023971558, "alphanum_fraction": 0.75606769323349, "avg_line_length": 49.98749923706055, "blob_id": "1c22e2e183628b31dd8cbc19dace774a57b88b2d", "content_id": "d337087e2a68461e06ee8fe9eeef0e9add6cb8a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4079, "license_type": "no_license", "max_line_length": 117, "num_lines": 80, "path": "/web/application.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_babel import Babel\n\nfrom web.cepesp.config import APP_SECRET_KEY, APP_DEBUG, BUGSNAG_API_KEY, FLASK_ENV\nfrom web.cepesp.routes import lang\nfrom web.cepesp.routes.api import athena_query_api, athena_status_api, athena_result_api, athena_api, columns_api\nfrom web.cepesp.routes.error import handle_error\nfrom web.cepesp.routes.filters import asset_filter\nfrom web.cepesp.routes.lang import lang\nfrom web.cepesp.routes.queries import consulta_tse, consulta_candidatos, consulta_legendas, consulta_votos, \\\n consulta_bem_candidato, consulta_filiados, consulta_secretarios, consulta_tse_2\nfrom web.cepesp.routes.sql import sql\nfrom web.cepesp.routes.static import home, about, others, about_state_secretaries, static_from_root, documentation, \\\n spatial2_docs\nfrom web.cepesp.utils.session import get_locale\n\napplication = Flask(__name__)\napplication.env = FLASK_ENV\napplication.secret_key = APP_SECRET_KEY\napplication.testing = APP_DEBUG\n\nif BUGSNAG_API_KEY:\n import bugsnag\n import bugsnag.flask\n\n bugsnag.configure(\n api_key=BUGSNAG_API_KEY,\n project_root=\"/web\",\n release_stage=FLASK_ENV\n )\n bugsnag.flask.handle_exceptions(application)\n\nbabel = Babel(application)\nbabel.localeselector(get_locale)\napplication.register_error_handler(Exception, handle_error)\napplication.add_template_filter(lambda fl: asset_filter(fl, application.root_path), \"asset\")\n\napplication.add_url_rule(\"/\", \"home\", home)\napplication.add_url_rule(\"/sobre\", \"about\", about)\napplication.add_url_rule(\"/others\", \"others\", others)\napplication.add_url_rule(\"/about-state-secretaries\", \"about_state_secretaries\", about_state_secretaries)\napplication.add_url_rule(\"/documentacao\", \"documentation\", documentation)\napplication.add_url_rule(\"/docs/cepesp-data\", \"docs_cepesp_data\", documentation)\napplication.add_url_rule(\"/docs/spatial2\", \"docs_spatial2\", spatial2_docs)\n\napplication.add_url_rule(\"/api/consulta/tse\", \"api_tse\", lambda: athena_api(\"tse\"))\napplication.add_url_rule(\"/api/consulta/candidatos\", \"api_candidatos\", lambda: athena_api(\"candidatos\"))\napplication.add_url_rule(\"/api/consulta/legendas\", \"api_legendas\", lambda: athena_api(\"legendas\"))\napplication.add_url_rule(\"/api/consulta/votos\", \"api_votos\", lambda: athena_api(\"votos\"))\napplication.add_url_rule(\"/api/consulta/bem_candidato\", \"api_bem_candidato\", lambda: athena_api(\"bem_candidato\"))\napplication.add_url_rule(\"/api/consulta/filiados\", \"api_filiados\", lambda: athena_api(\"filiados\"))\napplication.add_url_rule(\"/api/consulta/secretarios\", \"api_secretarios\", lambda: athena_api(\"secretarios\"))\n\napplication.add_url_rule(\"/api/consulta/athena/columns\", \"athena_columns_api\", columns_api)\napplication.add_url_rule(\"/api/consulta/athena/query\", \"athena_query_api\", athena_query_api)\napplication.add_url_rule(\"/api/consulta/athena/status\", \"athena_status_api\", athena_status_api)\napplication.add_url_rule(\"/api/consulta/athena/result\", \"athena_result_api\", athena_result_api)\n\napplication.add_url_rule(\"/consulta/tse\", \"query_tse\", consulta_tse)\napplication.add_url_rule(\"/consulta/tse2\", \"query_tse_2\", consulta_tse_2)\napplication.add_url_rule(\"/consulta/candidatos\", \"query_candidatos\", consulta_candidatos)\napplication.add_url_rule(\"/consulta/legendas\", \"query_legendas\", consulta_legendas)\napplication.add_url_rule(\"/consulta/votos\", \"query_votos\", consulta_votos)\napplication.add_url_rule(\"/consulta/bem_candidato\", \"query_bem_candidato\", consulta_bem_candidato)\napplication.add_url_rule(\"/consulta/filiados\", \"query_filiados\", consulta_filiados)\napplication.add_url_rule(\"/consulta/secretarios\", \"query_secretarios\", consulta_secretarios)\n\napplication.add_url_rule(\"/consulta/sql\", \"sql\", sql)\n\napplication.add_url_rule(\"/pt\", \"lang_pt\", lambda: lang(\"pt\"))\napplication.add_url_rule(\"/en\", \"lang_en\", lambda: lang(\"en\"))\n\n# SEO\napplication.add_url_rule(\"/robots.txt\", \"robots_txt\", static_from_root)\napplication.add_url_rule(\"/sitemap.xml\", \"sitemap_xml\", static_from_root)\n\n\nif __name__ == \"__main__\":\n application.debug = APP_DEBUG\n application.run()\n" }, { "alpha_fraction": 0.44277673959732056, "alphanum_fraction": 0.44277673959732056, "avg_line_length": 25, "blob_id": "91ea1743e8b8e8fb2c862b118b0041557dbe83c1", "content_id": "140e466af9be8abdcfc0315ae89f040202e7960c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 52, "num_lines": 41, "path": "/web/cepesp/columns/filiados.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "class PartyAffiliationsColumnsSelector:\n\n def columns(self):\n return [\n \"DATA_EXTRACAO\",\n \"HORA_EXTRACAO\",\n \"NUMERO_INSCRICAO\",\n \"NOME_FILIADO\",\n \"SIGLA_PARTIDO\",\n \"NOME_PARTIDO\",\n \"UF\",\n \"COD_MUN_TSE\",\n \"NOME_MUNICIPIO\",\n \"NUM_ZONA\",\n \"NUM_SECAO\",\n \"DATA_FILIACAO\",\n \"SITUACAO_REGISTRO\",\n \"TIPO_REGISTRO\",\n \"DATA_PROCESSAMENTO\",\n \"DATA_DESFILIACAO\",\n \"DATA_CANCELAMENTO\",\n \"DATA_REGULARIZACAO\",\n \"MOTIVO_CANCELAMENTO\"\n ]\n\n def visible_columns(self):\n return [\n \"NUMERO_INSCRICAO\",\n \"NOME_FILIADO\",\n \"SIGLA_PARTIDO\",\n \"NOME_PARTIDO\",\n \"UF\",\n \"COD_MUN_TSE\",\n \"NOME_MUNICIPIO\",\n \"NUM_ZONA\",\n \"NUM_SECAO\",\n \"SITUACAO_REGISTRO\"\n ]\n\n def order_by_columns(self):\n return ['NOME_FILIADO', 'UF', 'COD_MUN_TSE']\n" }, { "alpha_fraction": 0.7128912806510925, "alphanum_fraction": 0.7347694635391235, "avg_line_length": 40.845069885253906, "blob_id": "29c183ed8b661ec950e02dfa4f7c15ca95e0d6a8", "content_id": "f22f9ab4cba81ef9d3ce166e15b2c5d76696de88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 3026, "license_type": "no_license", "max_line_length": 360, "num_lines": 71, "path": "/bookdown/04-como_manipular_dados_no_R.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# Como manipular os dados no R\n\n```{r, include=FALSE}\nknitr::opts_chunk$set(echo = TRUE,\n eval = FALSE)\n```\n\nTransformações de bancos de dados podem ser realizadas com funções presentes no pacote _dplyr_. Execute o código abaixo no seu R a fim de que possamos utilizá-lo nos exemplos desse tutorial.\n\n```{r}\ninstall.packages(\"dplyr\")\n```\n\nEste tutorial não tem como objetivo ensinar a linguagem de programação R, mas fornecer um __esqueleto__ para algumas análises simples, que podem ser feitas em alguns minutos.\n\n## Filtrar\n\nAo realizar uma requisição no CepespData, a função escolhida irá retornar todas as observações condizentes com os parâmetros informados. Contudo, nem sempre queremos todas as observações, mas apenas um conjunto específico. \n\nPor exemplo, talvez nós desejemos todos os candidatos negros que concorreram a Deputado Federal em 2018, com a função `filter`.\n\n```{r}\nlibrary(cepespR)\nlibrary(dplyr)\n\nel2018 <- get_elections(2018, 6, regional_aggregation = 0)\n```\n\nUma vez que você tenha feito a requisição do banco e salvado a tabela em um objeto (`el2018`), podemos aplicar a função. É importante notar que, ao comparar valores no R, utilizamos dois sinais de igual `==`. \n\n```{r}\ncand_negros_2018 <- el2018 %>% \n filter(CODIGO_COR_RACA == '02' | CODIGO_COR_RACA == '03') # Selecionando candidatos pretos e pardos (códigos 2 e 3, respectivamente).\n\n# Vejamos como ficou o banco de dados:\nView(cand_negros_2018)\n\n```\n\nRepare no uso do `%>%` (leia-se _pipe_). O _pipe_ nos permite concatenar a execução de diversas funções, de tal maneira que seja mais fácil realizar operações \"simultâneas\" em um banco de dados. Por exemplo, podemos filtrar os candidatos negros eleitos, e em seguida, tabelar o banco de dados para ver qual a frequência de homens e mulheres dentro deste grupo:\n\n```{r}\ntabela_sexo_negros_eleitos <- el2018 %>% \n filter(CODIGO_COR_RACA == '02' | CODIGO_COR_RACA == '03') %>%\n filter(DESC_SIT_TOT_TURNO == 'ELEITO POR QP' | DESC_SIT_TOT_TURNO == 'ELEITO POR MEDIA')\n \ntable(tabela_sexo_negros_eleitos$DESCRICAO_SEXO)\n\n```\n\n## Selecionar e Ordenar\n\nTambém é possível selecionar algumas variáveis por meio da função `select` e ordená-las com a função `arrange`. Digamos, por exemplo, que você queira saber quais foram os deputados federais mais votados no estado de São Paulo. Em primeiro lugar, podemos fazer uma requisição no `get_votes`, utilizando o estado como agregação regional.\n\n```{r}\nlibrary(cepespR)\nlibrary(dplyr)\n\ncand_2014 <- get_elections(2014, 6, regional_aggregation = \"Estado\")\n\ncand_2014_sp <- cand_2014 %>% \n filter(UF == \"SP\")\n```\n\nUma vez com o nossa tabela filtrada, basta apenas ordenar os resultados. A fim de facilitar a leitura, podemos selecionar apenas as colunas relevantes. No caso, iremos utilizar a sigla do partido, o nome do candidato e a quantidade de votos.\n\n```{r}\ncand_2014_sp %>% \n select(SIGLA_PARTIDO, NOME_CANDIDATO, QTDE_VOTOS) %>% \n arrange(desc(QTDE_VOTOS))\n```\n" }, { "alpha_fraction": 0.4735226333141327, "alphanum_fraction": 0.5886415839195251, "avg_line_length": 30.780487060546875, "blob_id": "cd900a34db7e72da4b4213258db02507a75e372f", "content_id": "f687bef4d1f10cfb9d653e7e4c88a59db0e36a50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1303, "license_type": "no_license", "max_line_length": 118, "num_lines": 41, "path": "/web/tests/responses/test_duplicated_votes.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nfrom web.tests.utils import get_years, get_request_url\n\nAPTOS = {\n 1998: 106_049_062,\n 2000: 102_644_778,\n 2002: 115_166_810,\n 2004: 118_464_969,\n 2006: 125_826_156,\n 2008: 128_746_974,\n 2010: 135_721_843,\n 2012: 138_544_294,\n 2014: 142_821_358,\n 2016: 144_048_995\n}\n\n\ndef assert_duplicated_votes(uri, **options):\n url = get_request_url(uri, **options)\n df = pd.read_csv(url, sep=',', lineterminator='\\n', encoding='utf-8', dtype=str)\n s = pd.to_numeric(df[\"QTDE_VOTOS\"], errors='coerce').sum()\n\n if s > APTOS[options['ano']]:\n print(options, \" - SUM(QTDE_VOTOS) = %d [DUPLICATED]\" % s)\n else:\n print(options, \" - SUM(QTDE_VOTOS) = %d [OK]\" % s)\n\n\ndef test():\n for y in get_years(1):\n assert_duplicated_votes(\"votos\", ano=y, cargo=1, agregacao_regional=6, turno=1)\n assert_duplicated_votes(\"votos\", ano=y, cargo=1, agregacao_regional=6, turno=2)\n assert_duplicated_votes(\"tse\", ano=y, cargo=1, agregacao_regional=6, agregacao_politica=2, turno=1, brancos=1,\n nulos=1)\n assert_duplicated_votes(\"tse\", ano=y, cargo=1, agregacao_regional=6, agregacao_politica=2, turno=2, brancos=1,\n nulos=1)\n\n\nif __name__ == \"__main__\":\n test()\n" }, { "alpha_fraction": 0.4983425438404083, "alphanum_fraction": 0.5226519107818604, "avg_line_length": 24.13888931274414, "blob_id": "cd8b117df2f23999e13e693924f2e99d357c43ce", "content_id": "2d620e3cd1cf0373b32e328f86ce009bf078ad93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1810, "license_type": "no_license", "max_line_length": 119, "num_lines": 72, "path": "/web/tests/responses/test_response_ok.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import re\nimport time\n\nfrom web.tests.utils import get_years, run_request\n\n\ndef assert_request(uri, **options):\n print(\"Asserting \", {i: options[i] for i in options if i != 'selected_columns'}, \"->\", end='')\n start = time.time()\n result = run_request(uri, **options)\n elapsed = time.time() - start\n\n if result.status_code != 200:\n print(\" ERROR [%d]\" % result.status_code)\n else:\n print(\" OK [%.3f ms]\" % elapsed, end=' ')\n\n if re.match(r'([^\\n]*\\n|[^\\n]+$){3,}', result.text):\n print(\"\")\n else:\n print(\"[EMPTY]\")\n\n\ndef assert_votos(jobs):\n reg = [0, 2, 6, 7, 8, 1, 4, 5, 9]\n\n print(\"VOTOS ----\")\n for job in jobs:\n for year in get_years(job):\n for r in reg:\n assert_request(\"votos\", cargo=job, ano=year, agregacao_regional=r)\n\n\ndef assert_tse(jobs):\n reg = [0, 2, 6, 7, 8, 1, 4, 5, 9]\n pol = [2]\n\n print(\"TSE ----\")\n for job in jobs:\n for year in get_years(job):\n if year in [2014, 2010]:\n for r in reg:\n for p in pol:\n assert_request(\"tse\", cargo=job, ano=year, agregacao_regional=r, agregacao_politica=p, start=0,\n length=15)\n\n\ndef assert_legendas(jobs):\n print(\"LEGENDAS ----\")\n for job in jobs:\n for year in get_years(job):\n assert_request(\"legendas\", cargo=job, ano=year)\n\n\ndef assert_candidatos(jobs):\n print(\"CANDIDATOS ----\")\n for job in jobs:\n for year in get_years(job):\n assert_request(\"candidatos\", cargo=job, ano=year)\n\n\ndef test():\n jobs = [1, 3, 5, 6, 7, 11, 13]\n\n # assert_candidatos(jobs)\n # assert_legendas(jobs)\n # assert_votos(jobs)\n assert_tse(jobs)\n\n\nif __name__ == \"__main__\":\n test()\n" }, { "alpha_fraction": 0.5682326555252075, "alphanum_fraction": 0.6275168061256409, "avg_line_length": 33.38461685180664, "blob_id": "888714367d1f9f23c403b86a1ac3e4a43355d563", "content_id": "a13ee8fb1ca6a4be1635fb399e8c603a4f5ffa00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "no_license", "max_line_length": 101, "num_lines": 26, "path": "/etl/fixes/AppendExtraJobs2014.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass AppendExtraJobs2014:\n\n def __init__(self, candidatos_2014_semvotos):\n self.candidatos = pd.read_csv(candidatos_2014_semvotos, sep=';', dtype=str, low_memory=False)\n\n def check(self, item):\n return item['year'] == 2014 and item['database'] == 'candidatos'\n\n def apply(self, df):\n df = df.append(self.candidatos, ignore_index=True, verify_integrity=True)\n\n return df\n\n def test(self, client):\n df_2 = client.get_candidates(year=2014, job=2)\n df_4 = client.get_candidates(year=2014, job=4)\n df_9 = client.get_candidates(year=2014, job=9)\n df_10 = client.get_candidates(year=2014, job=10)\n\n assert len(df_2) > 0, \"empty vice-president\"\n assert len(df_4) > 0, \"empty vice-governor\"\n assert len(df_9) > 0, \"empty 1st substitute\"\n assert len(df_10) > 0, \"empty 2st substitute\"\n" }, { "alpha_fraction": 0.4884233772754669, "alphanum_fraction": 0.552921712398529, "avg_line_length": 24.18055534362793, "blob_id": "9f2c75a30c4f32f7fd5f4b7d9c2a8af153e42a0c", "content_id": "8584131a482523e08732a306d076c919d50c798f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1814, "license_type": "no_license", "max_line_length": 109, "num_lines": 72, "path": "/etl/process/DimensionProcess.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import os\nfrom _csv import QUOTE_ALL\nfrom glob import glob\n\nimport pandas as pd\n\n\nclass DimensionsProcess:\n\n def __init__(self, output):\n self.output = output\n self.added_brancos = False\n self.last_id = {\n 2018: 10_000_000,\n 2016: 9_000_000,\n 2014: 8_000_000,\n 2012: 7_000_000,\n 2010: 6_000_000,\n 2008: 5_000_000,\n 2006: 4_000_000,\n 2004: 3_000_000,\n 2002: 2_000_000,\n 2000: 1_000_000,\n 1998: 1,\n }\n\n def get_columns(self):\n raise NotImplemented\n\n def get_id_column(self):\n raise NotImplemented\n\n def get_brancos_df(self, item, job):\n raise NotImplemented\n\n def check(self, item):\n raise NotImplemented\n\n def done(self, item):\n return os.path.exists(self._output(item))\n\n def handle(self, item):\n df = pd.read_csv(item['path'], sep=';', dtype=str)\n df = self._set_ids(df, item)\n\n self._save(df[self.get_columns()], item)\n\n # region Private Methods\n def output_files(self):\n return glob(os.path.join(self.output, '*.gz'))\n\n def _set_ids(self, df, item):\n last_id = self.last_id[item['year']]\n\n df.is_copy = False\n df[self.get_id_column()] = range(last_id, last_id + len(df))\n\n self.last_id[item['year']] += len(df)\n\n return df\n\n def _save(self, df, item):\n output_path = self._output(item)\n directory = os.path.dirname(output_path)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n df.to_csv(output_path, compression='gzip', sep=';', encoding='utf-8', index=False, quoting=QUOTE_ALL)\n\n def _output(self, item):\n return os.path.join(self.output, item['name'])\n # endregion\n\n" }, { "alpha_fraction": 0.6941261887550354, "alphanum_fraction": 0.7073834538459778, "avg_line_length": 52.046749114990234, "blob_id": "7df4b668bc53ad6a232200f4fdb6c377baf461ca", "content_id": "8dcdb6259ad9d8b69a3f23ab52fee89a26b868f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 26671, "license_type": "no_license", "max_line_length": 387, "num_lines": 492, "path": "/bookdown/03-como_acessar_a_API_R.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# Como usar a API R\n\n```{r, echo=FALSE}\nknitr::opts_chunk$set(echo = TRUE, warning = F, results = 'hide', eval = F)\n```\n\nO __cepespR__ é um pacote criado para auxiliar o acesso dos usuários a API do CepespData/FGV. Por meio dessa ferramenta, é possível realizar requisições de maneira mais rápida e estruturada aos dados eleitorais presentes no nosso repositório.\n\nNesta seção, vamos demonstrar algumas funcionalidades básicas do __cepespR__, assim como alguns exemplos de requisições e análises que podem ser feitas em poucos minutos no R. Também apresentaremos algumas operações simples com os dados com o pacote _dplyr_, em especial para mostrar como trabalhar com o dados do CepespData com outros bancos.\n\nSeguindo a rotina de códigos abaixo, o usuário, ao final deste tutorial, poderá ter as consultas salvas com sucesso em formato compatível com outros softwares de dados, como Excel e o SPSS.\n\n## Instalando o CepespR\n\nO pacote __cepespR__ está hospedado em [nosso github](https://github.com/Cepesp-Fgv/cepesp-r), então para instalá-lo, é preciso rodar o código abaixo apenas uma vez. Também vamos instalar o pacote _dplyr_ para nos auxiliar nas operações com os dados.\n\n```{r, eval = FALSE}\n# Instalando o cepespR:\nif (!require(\"devtools\")) install.packages(\"devtools\")\ndevtools::install_github(\"Cepesp-Fgv/cepesp-r\") \n\n# Instalando o dplyr:\ninstall.packages(\"dplyr\")\n```\n\nEm caso de troca de computador, é preciso instalar novamente o __cepespR__, mas recomendamos atualizá-lo sempre que possível, para garantir acesso às funções atualizadas -- lembrando que estamos ainda em fase de desenvolvimento do repositório do CepespData/FGV, de modo que novas funções são criadas no __cepespR__ sempre que um novo banco de dados é incluído em nosso repositório.\n\nUma vez instalado o __cepespR__, o pacote é ativado mediante a função `library`. Lembre-se que é preciso executar essa função toda vez que iniciar o R, senão as funções do __cepespR__ não irão funcionar. Isso também vale para o pacote _dplyr_ e qualquer outro que seja necessário em sua sessão.\n\n```{r, eval = FALSE}\nlibrary(cepespR)\nlibrary(dplyr)\n```\n\n## Explorando as funções do _cepespR_\n\nExistem 7 requisições disponíveis hoje no pacote __cepespR__. Todas têm como parâmetros obrigatórios o ano (parâmetro `year`) e o cargo (parâmetro `position`) disputado e recebem como padrão os dados por município. \nCada função recebe um *data.frame* -- isto é, um banco de dados -- com as colunas já formatadas no tipo de variável correto, por exemplo, *numeric* ou *character*. (Veja [aqui](https://swcarpentry.github.io/r-novice-inflammation/13-supp-data-structures/) uma referência sobre os tipos de dado no R). \n\nNo nomento da requisição, é preciso indicar um objeto para salvar este *data.frame* no seu ambiente no R. Por exemplo:\n\n```{r, eval = FALSE}\nminha_requisicao <- get_candidates(year = 2018, position = 'Presidente')\n```\n\nNo código acima, atribuímos ao novo objeto `minha_requisicao` o banco de dados obtido na requisição feita por meio da função `get_candidates` do pacote __cepespR__.\n\nOpcionalmente, as funções recebem parâmetros que auxiliam no filtro dos dados. \nOs parâmetros podem ser indicados tanto em português quanto em inglês. Para mais detalhes, consulte [a nossa documentação no GitHub](https://github.com/Cepesp-Fgv/cepesp-r).\n\n### Perfil dos candidatos `get_candidates`\n\nA função `get_candidates` retorna uma tabela com informações sobre características individuais dos candidatos. Com ela, é possível obter, por exemplo, informações sobre partido, cor/raça, idade, gênero, ou outra informação que diga respeito ao candidato. \n\nPara utilizá-la, você deve informar um __ano__ e uma __posição__. De modo geral, ela terá a seguinte estrutura:\n\n```{r, eval = FALSE}\nbase_de_dados <- get_candidates(year = <Ano escolhido>, position = <cargo escolhido>).\n```\n\nNo exemplo abaixo, faremos uma consulta para os candidatos à __presidência__ durante as eleições de __2014__. Repare que a função `get_candidates` salva a tabela no objeto `candpres_14`. Esse passo é importante uma vez que, caso seja de interesse utilizar essa tabela depois, faremos referência ao nome `candpres_14`.\n\n```{r, eval = FALSE}\ncandpres_14 <- get_candidates(year = 2014, position = \"Presidente\")\n```\n\nPara visualizar os dados da tabela criada, usamos a função `View` e dentro dos parênteses colocamos o nome da nossa tabela. No caso, utilizamos `candpres_14`.\n\n```{r, eval = FALSE}\nView(candpres_14)\n```\n\nOutro atributo da função `get_candidates` que podemos usar para filtrar os dados é o `only_elected`. Quando `only_elected = TRUE` (ou `only_elected = T`), a função nos retorna apenas os candidatos que foram eleitos naquele ano, para o cargo desejado indicado.\n\nPor exemplo, podemos obter os deputados federais do PSL eleitos em 2018:\n\n```{r,warning=F,message=F,eval=FALSE}\ndeputadosPSL2018 <- get_candidates(year=2018, # Obrigatório: ano da eleição\n position=\"Deputado Federal\", #Obrigatório: cargo disputado\n only_elected = T, # Opcional: receber apenas os eleitos\n party = 17) # Opcional: receber apenas os candidatos do PT\n``` \n\n### Votos por eleição `get_votes`\n\nRecupera quantos votos cada candidato recebeu em determinada eleição. É obrigatório informar o ano (`year`) e o cargo (`position`), recebendo como padrão a votação por município de todos os candidatos que receberam votos naquele ano e cargo. \n\nNo exemplo abaixo, podemos ver uma requisição para os votos para __presidente__, em __2018__, com agregação regional configurada para __municípios__.\n\n```{r, eval = FALSE}\nvtpres_18_mun <- get_votes(year = 2018, position = \"Presidente\", regional_aggregation = \"Municipio\")\n\nView(vtpres_18_mun)\n```\n\nE se estivéssemos interessados em ver os votos de um candidato específico por estado (unidade da federação)? Opcionalmente, podemos pedir os dados agregados por estado:\n\n```{r,warning=F,message=F,eval=FALSE}\nvotos_pres18_PT <- get_votes(year=2018, # Obrigatório: ano da eleição.\n position=\"Presidente\", # Obrigatório: cargo disputado.\n candidate_number = 13, # Opcional: filtra o candidato de nº13, ou seja, do PT.\n regional_aggregation=\"Estado\") # Opcional: votos agregados por estado.\n```\n\n### Coligações `get_coalitions`\n\nCaso o interesse seja pelas coligações realizadas por diferentes partidos, podemos utilizar a função `get_coalitions`.\n\nÉ importante ter em mente que o banco de dados sobre coligações fornecido pelo TSE apresenta diversas inconsistências. Por exemplo, estão presentes no banco tanto coligações que concorreram quanto aquelas que, por algum motivo, não puderam concorrer. Sabendo disso, indicamos que essas informações sejam utilizadas com cuidado. \n\nDe modo geral, caso deseje obter informações sobre coligações, é mais seguro utilizar a função `get_elections`, como iremos demonstrar mais para frente, pois o tratamento realizado na montagem deste último banco corrige o problema descrito acima.\n\nO funcionamento de `get_coalitions` é similar ao de `get_candidates`. Basta fornecer uma __posição__ e um __ano__ para acessar as informações desejadas.\n\n```{r, eval = FALSE}\nbase_de_dados <- get_coalitions(year = <Ano escolhido>, position = <cargo escolhido>).\n```\n\nPor exemplo, caso seja de interesse as coligações realizadas para __prefeitura__ durante as eleições de __2016__, basta informar esses parâmetros e executar a função.\n\n```{r, eval = FALSE}\ncolpres_14 <- get_coalitions(year = 2016, position = \"Prefeito\")\n```\n\nNovamente, para ter uma visão geral da tabela devolvida pela função, podemos utilizar a função `View`.\n\n```{r, eval = FALSE}\nView(colpres_14)\n```\n\nSuponhamos que agora estamos interessados nas coligações da eleição para __presidente__ em __2002__. Neste caso, a função será escrita assim:\n\n```{r, eval = FALSE}\ncolpres_02 <- get_coalitions(year = 2002, position = \"Presidente\")\nView(colpres_02)\n```\n\n### Resultado de eleições por cargo `get_elections`\n\nAlém das consultas disponíveis no TSE, é possível fazer uma consulta integrada das eleições. Esta consulta agrega informações de _candidatos_, _coligações_ e _votos_. Trata-se de um dos diferenciais do CepespData/FGV frente a outras fontes.\n\nPara obter detalhes a respeito de uma eleição usando a função `get_elections` é preciso especificar obrigatoriamente __ano__ e __cargo__:\n\n```{r, eval = FALSE}\nelpres_14 <- get_elections(year = 2014, # Obrigatório especificar o ano.\n position = \"Presidente\") # Obrigatório especificar o cargo.\n\nView(elpres_14)\n```\n\nEsta função permite também consultar o resultado por diferentes agregações políticas: `Candidato`, `Partido`, `Coligação` e `Consolidado da Eleição`. Cada uma delas agrega os votos e outras informações. Por exemplo, caso seja feita um requisição para agregação política `Partido`, será retornado os votos obtidos e a coligação da qual o partido fez parte, além de outras informações.\n\nA agregação política `Consolidado da Eleição` possui informações um tanto quanto diferente das outras. Ela não retorna os votos de um candidato, partido ou coligação. Nela, você pode encontrar dados de __comparecimento__, __votos válidos__, __votos brancos e nulos__. Veja:\n\n```{r message=FALSE, warning=FALSE, paged.print=TRUE}\nelpres_14_2 <- get_elections(year = 2014, # Obrigatório.\n position = \"Presidente\", # Obrigatório.\n regional_aggregation = \"Estado\", # Opcional: dados agregados por estado. Quando este parâmetro não é informado, retorna dados agregados por município.\n political_aggregation = \"Consolidado\") # Opcional: agregação política Consolidado da Eleição. Quando não informado este parâmetro, a requisição retorna dados agregados por candidato.\n\nView(elpres_14_2)\n```\n\n### Filiados `get_filiates`\n\nRetorna os dados dos filiados conforme declarado pelos partidos. É preciso informar o __estado__ (parâmetro `state`) e o __partido__ (parâmetro `party`) do qual se deseja consultar a lista de filiados. \n\nExemplo: filiados ao partido NOVO no estado da Bahia:\n\n```{r,warning=F,message=F,eval=FALSE}\nnovoBA <- get_filiates(state=\"BA\", # Obrigatório. Sigla do Estado.\n party = \"NOVO\") # Obrigatório. Sigla do partido.\n```\n\nEstes dados foram atualizados pela última vez em nosso repositório em novembro de 2018 e será atualizado anualmente.\n\n### Bens de candidatos `get_assets`\n\nRecupera os bens declarados ao TSE pelos candidatos em cada eleição. Neste caso, o único parâmetro obrigatório é o __ano__ (`year`). \n\nExemplo: Bens declarados pelos candidatos do Piauí em 2018:\n\n```{r,warning=F,message=F,eval=FALSE}\n\nbensPiaui2018 <- get_assets(year = 2018, # Obrigatório: ano da eleição\n state = \"PI\") # Opcional: receber apenas dados do estado do Piauí. Quando não informato este parâmetro, retorna os bens declarados por todos candidatos no país em 2018.\n```\n\n\n### Secretários `get_secretaries`\n\nA função `get_secretaries` recupera informações sobre ocupantes de cargos do primeiro escalão dos governos estaduais e do Distrito Federal. (Para mais informações sobre esses dados, inéditos do CepespData/FGV, [clique aqui](http://cepespdata.io/about-state-secretaries)).\n\nNessa função, o único parâmetro obrigatório é o __estado__ (`state`). É possível ainda filtrar o banco por nome do(a) secretário(a) e/ou período de governo no momento da requisição, preenchendo os parâmetros `name` e `period`, respectivamente.\n\nExemplo: Todas as secretáias e secretários estaduais de São Paulo entre 1998 e 2002:\n\n```{r,warning=F,message=F,eval=FALSE}\nsecSP <- get_secretaries(state = \"SP\", # Obrigatório: Estado. \n name = NULL, # Obrigatório. NULL para receber todos ou parte do nome para fitrar.\n period = \"1998-2002\") # Opcional: indicar o quadriênio de interesse.\n```\n\nCaso não seja informado o parâmetro `period`, a consulta retornará todos os períodos disponíveis.\n\n## Utilizando códigos ao invés de nomes\n\nAo invés de escrever os nomes das posições desejadas, uma alternativa é fornecer o código do cargo. Essa solução pode se demonstrar mais rápida com o tempo, uma vez que escrever os nomes é relativamente mais demorado e, caso digitado errado, levará a um erro durante a execução da função.\n\nOs códigos estão disponíveis na nossa página do [GitHub](https://github.com/Cepesp-Fgv/cepesp-rest/blob/master/EleicoesPorCargo_BETA.md).\n\nVamos ver um exemplo. Suponhamos que estamos interessados nas eleições para prefeito ocorridas em __2012__. O código do cargo para prefeito é __11__. Sabendo disso, basta fornecer os valores desejados para a função e executá-la.\n\n```{r, eval = FALSE}\ncandpref_12 <- get_candidates(year = 2012, position = 11)\nView(candpref_12)\n```\n\n## Filtrando resultados\n\nPor padrão, as funções do __cepespR__ retornam todas colunas disponíveis como também todos os partidos, candidatos e estados. A fim de reduzir o tamanho da tabela, é possível selecionar valores específicos para essas consultas e, assim, obter resultados menores e mais fáceis de se trabalhar.\n\n### Selecionando partidos, candidatos e Estados\n\nPara limitar os resultados a valores específicos (um estado, um partido ou um candidato, por exemplo), basta acrescentar os parâmetros `state`, `party` ou `candidate_number` e alterá-los de acordo com o interesse.\n\nVariável | Parâmetro |\n-------------------|-----------------|\nEstado | state |\nPartido | party |\nNúmero do Candidato| candidate_number|\n\nPara mostrar apenas os resultados do Rio Grande do Sul (RS), por exemplo, acrescente o parâmetro `state`.\n\n```{r, eval = FALSE}\nelpres_14_RS <- get_elections(year=2014,\n position=\"Presidente\", \n regional_aggregation=\"Estado\",\n political_aggregation=\"Partido\", \n state=\"RS\")\n\nView(elpres_14_RS)\n```\n\nPara mostrar apenas os resultados referentes ao PT (13), por exemplo, acrescente o parâmetro `party`.\n\n```{r, eval = FALSE}\nelpres_14_PT <- get_elections(year=2014, # Obrigatório: ano da eleição.\n position=\"Presidente\", # Obrigatório: cargo disputado.\n regional_aggregation=\"Estado\", # Opcional: votos agregados por estado.\n political_aggregation=\"Partido\", # Opcional: votos agregados por partido.\n party=\"13\") # Opcional: filtro para partido.\n\nView(elpres_14_PT)\n```\n\nPara mostrar apenas os resultados referentes ao candidato 2511, por exemplo, acrescente o parâmetro `candidate_number`. Vamos escrever os parâmetros usando os códigos. Para o cargo de deputado federal, o código é 6; para a agregação regional por UF, o código é 2; e para a agregação política por candidato, o código é 2:\n\n```{r, eval = FALSE}\neldepfed_2511 <- get_elections(year=2014, # Obrigatório: ano da eleição.\n position=6, # Obrigatório: cargo disputado.\n regional_aggregation=2, # Opcional: votos agregados por estado.\n political_aggregation=2, # Opcional: votos agregados por candidato.\n candidate_number=2511) # Opcional: filtro para candidato.\n\nView(eldepfed_2511)\n```\n\nOutro exemplo: obter o total de votos que os candidatos a prefeito eleitos pelo MDB no Rio de Janeiro.\n\n```{r,warning=F,message=F,eval=FALSE}\nprefeitosMDBrio <- get_elections(year=2012, # Obrigatório: ano da eleição.\n position=\"Prefeito\", # Obrigatório: cargo disputado.\n regional_aggregation=\"Estado\", # Opcional: votos agregados por estado.\n political_aggregation=\"Partido\", # Opcional: votos agregados por partido.\n state = \"RJ\", # Opcional: receber apenas dados do estado do RJ.\n party = 15, # Opcional: receber apenas dados do MDB.\n only_elected = T) # Opcional: receber apenas os eleitos.\n``` \n\n\n### Selecionando colunas\n\nPor padrão, as funções do __cepespR__ retornam todas as colunas disponíveis, mas é possível limitar o tamanho das tabelas para apenas a quantidade de variáveis desejadas.\n\n**Passo 1: Visualizar quais são as colunas-padrão**\n\nExistem duas maneiras de realizar esse procedimento. Em primeiro lugar, você pode acessar a nossa página do [GitHub](https://github.com/Cepesp-Fgv/cepesp-rest) e selecionar a consulta desejada. Lá você poderá ver quais colunas são retornadas para cada requisição e escolher as pretendidas. Em segunda lugar, é possível realizar esse procedimento dentro do `R`, mediante a função `names`.\n\nNa função `get_candidates`, por exemplo, as colunas padrões são:\n\n```{r, eval = FALSE}\nnames(get_candidates(year = 2014, position = \"Presidente\"))\n```\n\nNote que uma lista de 46 colunas apareceu no seu console. E para as outras funções?\n\n```{r, eval = FALSE}\n#Lista as colunas da função get_coalitions\nnames(get_coalitions(year = 2014, position = \"Presidente\"))\n\n#Lista as colunas da função get_votes\nnames(get_votes(year = 2014, position = \"Presidente\"))\n\n#Lista as colunas da função get_elections\nnames(get_elections(year = 2014, position = \"Presidente\"))\n```\n\nE assim sucessivamente. Você também pode ver todas as colunas disponíveis para cada banco de dados do CepespData no nosso [Dicionário de Variáveis](http://cepespdata.io/static/docs/cepespdata_dicionario_publico.xlsx).\n\n**Passo 2: Criar uma lista com o nome das colunas que desejamos**\n\nSe queremos analisar os dados referentes aos votos, por exemplo, poderíamos reduzir nosso banco de dados às seguintes colunas:\n\n```{r, eval = FALSE}\nminhas_colunas <- list(\"NUMERO_CANDIDATO\", \"UF\", \"QTDE_VOTOS\", \"COD_MUN_IBGE\")\n```\n\n**Passo 3: Acrescentar o parâmetro `columns_list` a nossa função**\n\nIndicamos a lista criada com o nome das colunas:\n\n```{r, eval = FALSE}\nvtpres_14_new <- get_votes(year = \"2014\", # Obrigatório: indicar o ano da eleição.\n position = \"Presidente\", # Obrigatório: indicar o cargo de interesse.\n regional_aggregation = \"Municipio\", # Opcional: dados agregados por município.\n columns_list = minhas_colunas) # Opcional: restringir a requisiçãi às colunas de interesse.\n\nView(vtpres_14_new)\n```\n\nRepare que, em primeiro lugar, criamos um objeto chamado `minhas_colunas`, contendo os nomes das variáveis de nosso interesse. Em seguida, indicamos este novo objeto `minhas_colunas` no parâmetro `columns_list` dentro da função `get_votes`.\n\nOutra maneira de selecionar as colunas de interesse é através da função `subset`. Para isto, basta:\n\n```{r, eval = FALSE}\nelpres_14_ <- subset(elpres_14_2, select = c(\"ANO_ELEICAO\", \"QT_VOTOS_BRANCOS\", \"QT_VOTOS_NULOS\"))\n\nView(elpres_14_)\n```\n\nNote que a função foi escrita na seguinte ordem: \n\n <nome do novo banco de dados> <- subset(<nome do antigo data frame>, select = c(<nomes das colunas de interesse>))\n\n### Informações para mais de um ano\n\nTodas as requisições aceitam que se consulte mais de um ano de uma vez. Para isso, basta informar entre parênteses os anos a serem consultados e separá-los por vírgulas -- tomando o cuidado de informar anos eleitorais válidos. \n\nExemplo: Todos os prefeitos eleitos pelo PMDB no Rio de Janeiro entre 2008 e 2016:\n\n```{r,warning=F,message=F,eval=FALSE}\nprefsPMDBrio <- get_elections(year=\"2008,2012,2016\", # Indica os três anos que queremos\n position=\"Prefeito\",\n regional_aggregation=\"Municipality\",\n political_aggregation=\"Candidate\",\n state = \"RJ\",\n party = \"15\",\n only_elected = T)\n```\n\n### Informações para mais de um cargo\n\nPara conseguir os resultados para mais de um cargo, é preciso fazer um _for loop_ para cada um dos cargos e empilhar os resultados no formato de um banco de dados (`data.frame`). Essa mesma lógica se aplica também para recuperar os dados de mais de um partido ou mais de um estado (uma UF) na função `get_filiates`. \n\nExemplo: Todos os prefeitos e vereadores eleitos pelo PMDB no Rio de Janeiro entre 2008 e 2012:\n\n```{r,warning=F,message=F,eval=FALSE}\n# Criando um vetor com cada cargo requisitado separado por v?rgula e entre aspas:\nlista.cargos <- c(\"Vereador\",\"Prefeito\") \n\n# Criando um dataframe vazio para receber os dados:\nbancocompleto <- data.frame() \n\n# Pedindo para que a requisição seja feita para cargo da lista, um por vez, até o final da lista:\nfor(cargo in lista.cargos){ \n # Salvando a requisição num banco temporário:\n bancotemporario <- get_elections(year=\"2008,2012,2016\", # Requisição de dados para os três anos de interesse.\n position=cargo, # Será preenchido com um cargo da lista de cargos por vez.\n regional_aggregation=\"Municipality\", # Dados agregados por município.\n political_aggregation=\"Candidate\", # Dados agregados por candidato.\n state = \"RJ\", # Requisição apenas dos candidatos do estado do Rio de Janeiro.\n party = \"15\", # Requisição apenas dos candidatos do PMDB.\n only_elected = T) # Filtrar: apenas candidatos eleitos\n \n # Empilhando os dados temporários no banco de dados completo: \n bancocompleto <- rbind(bancocompleto,bancotemporario) \n\n # Removendo o banco temporário com os dados parciais:\n rm(bancotemporario) \n}\n```\n\n### Cache das consultas\n\nA cada consulta feita na API, o banco de dados pedido será construído e baixado em sua máquina. Para limitar a banda consumida e agilizar as requisições mais comuns, é possível salvar uma cópia dos dados em sua máquina. (Você poderá deletá-la manualmente depois, caso queira atualizar a requisição).\n\nPara isso, basta incluir o parâmetro `cached = T` ao final de qualquer uma das funções disponíveis. Assim, uma cópia dos dados será salva em \"/static/cache\" no seu diretório de trabalho e estará disponível automaticamente quando repetir a consulta. Por exemplo:\n\n```{r,warning=F,message=F,eval=FALSE}\n\npslAC<- get_filiates(state=\"AC\",\n party = \"PSL\",\n cached = T) # Parâmetro cached marcado como \"TRUE\" -- salva uma cópia no seu computador.\n```\n\n## Sobrevoando os dados\n\nUma das vantagem de se utilizar o R é que podemos criar tabelas de frequência com uma certa facilidade, útil para sobrevoar rapidamente variáveis qualitativas (categóricas). \n\nCaso você queira, por exemplo, ver a distribuição de __gênero__ (DESCRICAO_SEXO) ou de __partidos__ (SIGLA_PARTIDO) entre todos os candidatos, podemos utilizar a função `table` da seguinte maneira:\n\n```{r, eval = FALSE}\ntable(candpres_14$DESCRICAO_SEXO)\ntable(candpres_14$SIGLA_PARTIDO)\n```\n\nSuponha agora que você tem interesse na quantidade de candidatas do sexo feminino para as eleições a prefeito de 2016. A função abaixo retorna a frequência absoluta de homens e mulheres:\n\n```{r, eval = FALSE}\nelpref_16 <- get_elections(year=2016, position=\"Prefeito\", regional_aggregation=\"Municipio\", political_aggregation=\"Candidato\")\n\ntable(elpref_16$DESCRICAO_SEXO)\n```\n\nE para as eleições a deputado federal em 2014? Quantas mulheres se candidataram neste ano? Veja:\n\n```{r, eval = FALSE}\neldepfed_14 <- get_elections(year=2014, \n position=\"Deputado Federal\", \n regional_aggregation=\"Estado\",\n political_aggregation=\"Candidato\")\n\ntable(eldepfed_14$DESCRICAO_SEXO)\n```\n\nOutra função que pode ser executada para análises rápidas é a `summary`. Com ela, podemos obter a média, o desvio-padrão e outros estatísticas descritivas de uma variável quantitativa. No exemplo abaixo, temos a __média de idade__ (IDADE_DATA_ELEICAO) dos candidatos.\n\n```{r, eval = FALSE}\nsummary(candpres_14$IDADE_DATA_ELEICAO)\n```\n\nNote que a função foi escrita assim: \n\n table(<nome do data frame>$<variável do data frame em que estou interessado>)\n\nPara variáveis quantitativas (contínuas), podemos usar a função `summary`. Esta função retorna média, mediana, mínimo e máximo das variáveis. Veja: \n\nSuponhamos que estamos interessados na média das idades dos candidatos nas eleições a presidente de 2014. Veja:\n\n```{r, eval = FALSE}\nsummary(elpres_14$IDADE_DATA_ELEICAO)\n```\n\nNote que a função `summary` funciona de maneira similar a `table`: \n\n summary( <nome do data frame>$<nome da variável do data frame em que estou interessado>)\n\n## Salvando os resultados\n\nPara salvar os _data frames_ gerados neste script em formato `.csv`, basta usar a função abaixo `write.csv2`. \n\nA função está organizada da seguinte maneira: \n\n write.csv2( <nome do data frame que quero exportar>, <nome que quero dar ao meu arquivo>.csv)\n\nPor exemplo:\n\n```{r, eval = FALSE}\nwrite.csv2(elpres_14, \"eleicoes_presidente_2014.csv\")\n```\n\nVocê também pode salvar os arquivos em outros formatos, como arquivos próprios de **SPSS** e **Stata**. Para isso, vamos usar os pacotes `haven`.\n\nInstalando o pacote:\n\n```{r, eval = FALSE}\ninstall.packages(\"haven\")\n```\n\nSalvando o arquivo:\n\n```{r, eval = FALSE}\n# Salvando o arquivo em SAV (para SPSS):\nhaven::write_sav(data = elpres_14, # Indicando qual objeto do meu ambiente R quero salvar\n path = \"eleicoes_presidente_2014.sav\") # Indicando em que pasta e com que nome salvar o arquivo.\n\n# Salvando o arquivo em DTA (para Stata):\nhaven::write_dta(data = elpres_14, # Indicando qual objeto do meu ambiente R quero salvar\n path = \"eleicoes_presidente_2014.sav\", # Indicando em que pasta e com que nome salvar o arquivo.\n version = 14) # Indicando a versão do Stata com a qual meu arquivo deve ser compatível\n```\n" }, { "alpha_fraction": 0.6219512224197388, "alphanum_fraction": 0.6219512224197388, "avg_line_length": 19.5, "blob_id": "98843e32125ae69ddc66eb6bfe9a49498a368963", "content_id": "f53ed2d844001c050e9a7b8ccc1e4da301b49687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "no_license", "max_line_length": 39, "num_lines": 4, "path": "/web/migrate.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.database import migrate\n\nif __name__ == \"__main__\":\n migrate()\n" }, { "alpha_fraction": 0.6095534563064575, "alphanum_fraction": 0.614745557308197, "avg_line_length": 32.2068977355957, "blob_id": "8cd20df716108b00f39c72c0bcfe040373510aa2", "content_id": "ae22d6549fbf208d72e18448a80ac7abfc16d06f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 963, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/web/cepesp/utils/analytics.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import requests\n\nfrom web.cepesp.config import GA_TRACKING_ID\n\n\ndef track_event(category, action, label=None, value=0):\n if not GA_TRACKING_ID:\n return\n\n data = {\n 'v': '1', # API Version.\n 'tid': GA_TRACKING_ID, # Tracking ID / Property ID.\n # Anonymous Client Identifier. Ideally, this should be a UUID that\n # is associated with particular user, device, or browser instance.\n 'cid': '555',\n 't': 'event', # Event hit type.\n 'ec': category, # Event category.\n 'ea': action, # Event action.\n 'el': label, # Event label.\n 'ev': value, # Event value, must be an integer\n }\n\n response = requests.post(\n 'http://www.google-analytics.com/collect', data=data)\n\n # If the request fails, this will raise a RequestException. Depending\n # on your application's needs, this may be a non-error and can be caught\n # by the caller.\n response.raise_for_status()\n" }, { "alpha_fraction": 0.7850000262260437, "alphanum_fraction": 0.7850000262260437, "avg_line_length": 90, "blob_id": "a8d2d57adff08b07531099820416ec7c002c483b", "content_id": "ee1d2feaea8b17d287fae156b4e55ac9164cdd4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 1021, "license_type": "no_license", "max_line_length": 326, "num_lines": 11, "path": "/bookdown/08-documentos_auxiliares.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# Documentos auxiliares\n\nNão se esqueça que nossos [documentos auxiliares](http://www.cepespdata.io/documentacao) podem ajudar a selecionar e manipular seus dados de interesse no CepespData. São eles:\n\n- [Dicionário de variáveis](http://www.cepespdata.io/static/docs/cepespdata_dicionario_publico.xlsx) do CepespData, que indica o nome e descrição das variáveis que constam em cada um dos bancos de dados, em Português e Inglês.\n\n- Tabela que relaciona os [códigos de municípios do TSE e do IBGE](http://www.cepespdata.io/static/docs/cod_municipios.csv)\n\n- Tabelas com o número de vagas concorridas por eleição a [Senador](http://www.cepespdata.io/static/docs/vagas_senadores.csv), [Deputado Federal e Estadual](http://www.cepespdata.io/static/docs/vagas_deputados_fed_est.csv) e a [Vereador](http://www.cepespdata.io/static/docs/vagas_vereadores.csv) -- esta última por município.\n\n- [Codebook](http://www.cepesp.io/dados/) com os códigos das categorias de cada variável e as respectivas descrições." }, { "alpha_fraction": 0.6041666865348816, "alphanum_fraction": 0.6101190447807312, "avg_line_length": 33.46154022216797, "blob_id": "b3d96143f0ca5830954a3f3da56638e92182c0f8", "content_id": "fc6e2e3bdd6db16295b2c0ae8636357cf5efb509", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1344, "license_type": "no_license", "max_line_length": 102, "num_lines": 39, "path": "/etl/fixes/FixComposicaoLegendaCandidato.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nfrom web.cepesp.utils.data import resolve_conflicts\n\n\nclass FixComposicaoLegendaCandidato2006:\n\n def __init__(self, leg_path, leg_presidente_path):\n self.leg_path = leg_path\n self.leg_presidente_path = leg_presidente_path\n\n def check(self, item):\n return item['database'] == 'candidatos' and item['year'] == 2006\n\n def apply(self, df):\n leg = pd.read_csv(self.leg_path, sep=';', dtype=str, low_memory=False)\\\n .append(pd.read_csv(self.leg_presidente_path, sep=';', dtype=str, low_memory=False))\n\n idx = [\"ANO_ELEICAO\", \"CODIGO_CARGO\", \"NUMERO_PARTIDO\", \"SIGLA_UE\"]\n columns = df.columns.tolist()\n\n leg = leg.drop_duplicates(idx)\n\n before = len(df)\n df = df.set_index(idx)\n df = df.merge(leg.set_index(idx), how='left', left_index=True, right_index=True).reset_index()\n\n df['COMPOSICAO_LEGENDA'] = df['COMPOSICAO_COLIGACAO']\n df['CODIGO_LEGENDA'] = df['SEQUENCIA_COLIGACAO']\n df['SIGLA_LEGENDA'] = df['SIGLA_PARTIDO_y']\n df['SIGLA_PARTIDO_x'] = df['SIGLA_PARTIDO_y']\n df['NOME_COLIGACAO_x'] = df['NOME_COLIGACAO_y']\n df = resolve_conflicts(df)\n\n after = len(df)\n if after > before:\n raise Exception(f'Duplicating Values {after - before}')\n\n return df[columns]\n" }, { "alpha_fraction": 0.6940298676490784, "alphanum_fraction": 0.6940298676490784, "avg_line_length": 21.41666603088379, "blob_id": "10fec8cbd16bb5bd22c0ab8beab171829c1ec7d9", "content_id": "edd7f0567b82ed2a7a8295d8f1e3bfdced830176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/web/cepesp/routes/lang.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from flask import session\nfrom flask_babel import refresh\nfrom werkzeug.utils import redirect\n\nfrom web.cepesp.utils.session import back\n\n\ndef lang(locale):\n if locale in ['pt', 'en']:\n session['locale'] = locale\n refresh()\n return redirect(back())" }, { "alpha_fraction": 0.5873287916183472, "alphanum_fraction": 0.6044520735740662, "avg_line_length": 25.545454025268555, "blob_id": "696d1726b49e06230d7a8e3b29f9d78e559df424", "content_id": "4de4196851055b5406f9db6232b545548af56c0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "no_license", "max_line_length": 104, "num_lines": 22, "path": "/web/tests/responses/test_repeated_macro.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nfrom web.tests.utils import get_years, get_request_url\n\n\ndef assert_repeated_macro(ano):\n url = get_request_url(\"votos\", ano=ano, cargo=1, agregacao_regional=1, numero_candidato=13, turno=1)\n df = pd.read_csv(url, sep=',', lineterminator='\\n', encoding='utf-8', dtype=str)\n size = len(df)\n if size == 5 or size == 6: # 5 Macro Regiões + Exterior\n print(\"%d - MACRO OK\" % ano)\n else:\n print(\"%d - MACRO REPEATED\" % ano)\n\n\ndef test():\n for y in get_years(1):\n assert_repeated_macro(y)\n\n\nif __name__ == \"__main__\":\n test()\n" }, { "alpha_fraction": 0.535736083984375, "alphanum_fraction": 0.5373650789260864, "avg_line_length": 30.082279205322266, "blob_id": "14da05f5a68e5c322edffb00dc124d5ebc8a74fb", "content_id": "4d1f7d1c9c6b364b15c250f5357e7c447710befc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4911, "license_type": "no_license", "max_line_length": 88, "num_lines": 158, "path": "/etl/process/TestProcess.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import time\nimport traceback\n\nimport pandas as pd\nimport requests\nfrom requests import HTTPError\n\n\nclass QueryFailedException(Exception):\n pass\n\n\nclass CepespClient:\n\n def __init__(self, base):\n self.base = base\n self.headers = {\n 'Accept': 'application/json'\n }\n\n def _get_query_id(self, table, args):\n args['table'] = table\n url = self.base + \"/api/consulta/athena/query\"\n response = requests.get(url, self._translate(args), headers=self.headers).json()\n if 'error' in response:\n raise QueryFailedException(response['error'])\n\n return response['id']\n\n def _get_query_status(self, query_id):\n url = self.base + \"/api/consulta/athena/status\"\n response = requests.get(url, {'id': query_id}, headers=self.headers).json()\n if 'error' in response:\n raise QueryFailedException(response['error'])\n\n return response['status'], response['message']\n\n def _get_query_result(self, query_id):\n url = self.base + \"/api/consulta/athena/result?id=\" + query_id\n\n try:\n df = pd.read_csv(url, sep=',', dtype=str)\n df.columns = map(str.upper, df.columns)\n except HTTPError as e:\n raise QueryFailedException(str(e))\n\n return df\n\n def _request(self, table, args):\n query_id = self._get_query_id(table, args)\n status, message = (\"RUNNING\", None)\n sleep = 1\n\n while status in [\"RUNNING\", \"QUEUED\"]:\n status, message = self._get_query_status(query_id)\n time.sleep(sleep)\n sleep *= 2\n\n if status == \"FAILED\":\n raise QueryFailedException(message)\n\n return self._get_query_result(query_id)\n\n def _translate(self, args):\n options = {'table': args['table'], 'ano': args['year'], 'filters': []}\n\n if 'position' in args:\n options['cargo'] = args['position']\n elif 'job' in args:\n options['cargo'] = args['job']\n else:\n raise Exception('Position argument is mandatory')\n\n if 'regional_aggregation' in args:\n options['agregacao_regional'] = args['regional_aggregation']\n elif 'reg' in args:\n options['agregacao_regional'] = args['reg']\n\n if 'political_aggregation' in args:\n options['agregacao_politica'] = args['political_aggregation']\n elif 'pol' in args:\n options['agregacao_politica'] = args['pol']\n\n if 'columns' in args:\n if isinstance(args['columns'], list):\n options['c'] = \",\".join(args['columns'])\n else:\n options['c'] = args['columns']\n\n if 'filters' in args and isinstance(args['filters'], dict):\n for column in args['filters']:\n value = args['filters'][column]\n options['filters[' + column + ']'] = value\n\n if 'uf' in args:\n options['uf_filter'] = args['uf']\n\n if 'mun' in args:\n options['mun_filter'] = args['mun']\n\n if 'candidate_number' in args:\n options['filters[NUMERO_CANDIDATO]'] = args['candidate_number']\n\n if 'party' in args:\n options['filters[NUMERO_PARTIDO]'] = args['party']\n\n if 'only_elected' in args:\n options['only_elected'] = args['only_elected']\n\n options['sep'] = ','\n options['brancos'] = 1\n options['nulos'] = 1\n\n return options\n\n def get_votes(self, **args):\n return self._request(\"votos\", args)\n\n def get_candidates(self, **args):\n return self._request(\"candidatos\", args)\n\n def get_coalitions(self, **args):\n return self._request(\"legendas\", args)\n\n def get_elections(self, **args):\n return self._request(\"tse\", args)\n\n\nclass TestProcess:\n\n def __init__(self, fixes):\n self.client = CepespClient(\"http://localhost:5000\")\n self.fixes = fixes\n\n def handle(self):\n for fix in self.fixes:\n name = fix.__class__.__name__\n\n if self._has_test_method(fix):\n print(\"TESTING: %s\" % name, end='')\n try:\n fix.test(self.client)\n print(\" [OK]\")\n except AssertionError as ex:\n print(\" [ASSERT-ERROR] %s: %s\" % (name, ex))\n except HTTPError as ex:\n print(\" [HTTP-ERROR][%d] %s: %s\" % (ex.errno, name, ex))\n except QueryFailedException as ex:\n print(\" [QUERY-ERROR] %s: %s\" % (name, ex))\n except Exception as ex:\n print(\" [ERROR] %s: %s\" % (name, ex))\n traceback.print_exc()\n else:\n print(\"[WARN] %s has no test method\" % name)\n\n def _has_test_method(self, fix):\n method = getattr(fix, \"test\", None)\n return callable(method)\n" }, { "alpha_fraction": 0.4341132938861847, "alphanum_fraction": 0.4341132938861847, "avg_line_length": 26.066667556762695, "blob_id": "73f37221987f67a5401bc9f544a81920af73c5a2", "content_id": "45797899f736bf5c6022dfe32dcfe20dcb09f586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1624, "license_type": "no_license", "max_line_length": 62, "num_lines": 60, "path": "/web/cepesp/columns/secretarios.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "class SecretariesColumnsSelector:\n\n def columns(self):\n return [\n \"STATUS\",\n \"NOME_SECRETARIO\",\n \"RG\",\n \"SEXO\",\n \"NOME_MUNICIPIO_NASCIMENTO\",\n \"UF_NASCIMENTO\",\n \"CARGO\",\n \"ORGAO_OCUPADO\",\n \"UF_ORGAO_OCUPADO\",\n \"COD_GRAU_INSTRUCAO\",\n \"DESCRICAO_GRAU_INSTRUCAO\",\n \"CURSO_MESTRADO\",\n \"CURSO_DOUTORADO\",\n \"JA_ERA_FUNCIONARIO_PUBLICO\",\n \"NIVEL_DE_GOVERNO\",\n \"TRABALHAVA_NA_SECRETARIA_NO_MOMENTO_DA_NOMEACAO\",\n \"ORGAO_EM_QUE_TRABALHAVA\",\n \"ANO_INGRESSO_ORGAO\",\n \"MES_INGRESSO_ORGAO\",\n \"PROFISSAO_ANTES_DE_NOMEADO\",\n \"UF\",\n \"ID_SECRETARIO\",\n \"CPF\",\n \"TITULO_DE_ELEITOR\",\n \"ORGAO_NOME\",\n \"ID_CARGO\",\n \"ID_ORGAO\",\n \"DATA_ASSUMIU\",\n \"DATA_DEIXOU\",\n \"MOTIVO_SAIDA\",\n \"ORIGEM_FILIACAO\",\n \"SIGLA_PARTIDO\",\n \"NOME_PARTIDO\",\n \"CODIGO_MUNICIPIO\",\n \"NOME_MUNICIPIO\",\n \"RACA_RAIS\",\n \"DATA_NASCIMENTO\"\n ]\n\n def visible_columns(self):\n return [\n \"NOME_SECRETARIO\",\n \"RG\",\n \"CARGO\",\n \"ORGAO_OCUPADO\",\n \"UF_ORGAO_OCUPADO\",\n \"UF\",\n \"CPF\",\n \"TITULO_DE_ELEITOR\",\n \"DATA_ASSUMIU\",\n \"DATA_DEIXOU\",\n \"DATA_NASCIMENTO\",\n ]\n\n def order_by_columns(self):\n return ['NOME_SECRETARIO']\n" }, { "alpha_fraction": 0.6339285969734192, "alphanum_fraction": 0.6339285969734192, "avg_line_length": 25.352941513061523, "blob_id": "d981c6a043ce863682621228a5b1eafa4d939a2e", "content_id": "90cd992e8a9a98d0aab277fd4957905f48022d0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 448, "license_type": "no_license", "max_line_length": 67, "num_lines": 17, "path": "/web/cepesp/routes/filters.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import os\n\nfrom flask import url_for\n\n\ndef dated_url_for(endpoint, root_path, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(root_path, endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n\n return url_for(endpoint, **values)\n\n\ndef asset_filter(fl, root_path):\n return dated_url_for('static', root_path, filename=fl)\n" }, { "alpha_fraction": 0.7502714395523071, "alphanum_fraction": 0.7524430155754089, "avg_line_length": 47.47368240356445, "blob_id": "0d9975b945530046f1ade9cdfda39ae5802b4516", "content_id": "f36072745431ba1c05041160c7149e843785f09e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 936, "license_type": "no_license", "max_line_length": 315, "num_lines": 19, "path": "/bookdown/02-o_que_e_o_R.Rmd", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# O que é o R?\n\nR é uma linguagem de programação muito utilizado para análise de dados. Com ele, podemos fazer gráficos, mapas, análises estatísticas das mais simples às mais sofisticadas. Uma das grandes vantagens do R é a comunidade bastante ativa que atualiza os pacotes e traz constante novas funcionalidades para nós usuários.\n\nComo o CepespData/FGV pode ser acessado por meio de uma [API para R](https://github.com/Cepesp-Fgv/cepesp-r) -- hoje a mais usada do CepespData/FGV -- disponibilizamos a seguir materiais de introdução ao R.\n\n## Instalando o R e o RStudio\n\nPara instalar o R, clique [aqui](https://cran.r-project.org/)\n\nPara instalar o RStudio, interface de desenvolvimento (IDE) na qual realizamos nossas análises, clique [aqui](https://www.rstudio.com/)\n\n## Onde estudar R?\n\n- [Datacamp](https://www.datacamp.com/)\n\n- [Curso R](http://material.curso-r.com/)\n\n- [R 4 Data Science](http://r4ds.had.co.nz/)\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5017064809799194, "avg_line_length": 28.299999237060547, "blob_id": "6e389485f269f19fd6b93d77af6006ad84af8f39", "content_id": "3997e30269ba7f6506d4e2d36ad9dcfa67c96f50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 77, "num_lines": 20, "path": "/etl/fixes/FixBemValor.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass FixBemValor:\n\n def check(self, item):\n return item['database'] == 'bem_candidato'\n\n def apply(self, df: pd.DataFrame):\n df['VALOR_BEM'] = df['VALOR_BEM'].str.replace(',', '.').astype(float)\n df['VALOR_BEM'] = df['VALOR_BEM'].map('{:,.2f}'.format)\n\n df['VALOR_BEM'] = df['VALOR_BEM'].str.replace(',', '_') # , -> _\n df['VALOR_BEM'] = df['VALOR_BEM'].str.replace('.', ',') # . -> ,\n df['VALOR_BEM'] = df['VALOR_BEM'].str.replace('_', '.') # _ -> .\n\n return df\n\n def test(self, client):\n pass\n" }, { "alpha_fraction": 0.6313531994819641, "alphanum_fraction": 0.6332833170890808, "avg_line_length": 43.409523010253906, "blob_id": "88f230e4384cd6bc7260a218ea49ad54565c7567", "content_id": "0673282e28961286e7f8c317e4cd5b1c640f4c54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4663, "license_type": "no_license", "max_line_length": 117, "num_lines": 105, "path": "/web/cepesp/athena/options.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import re\n\nfrom flask_babel import gettext\n\nfrom web.cepesp.columns.filiados import PartyAffiliationsColumnsSelector\nfrom web.cepesp.columns.bem_candidato import CandidateAssetsColumnsSelector\nfrom web.cepesp.columns.candidatos import CandidatesColumnsSelector\nfrom web.cepesp.columns.legendas import CoalitionsColumnsSelector\nfrom web.cepesp.columns.tse import ElectionsColumnsSelector\nfrom web.cepesp.columns.votos import VotesColumnsSelector\nfrom web.cepesp.columns.secretarios import SecretariesColumnsSelector\nfrom werkzeug.exceptions import BadRequest\n\nfrom web.cepesp.utils.data import JOBS, REG, POL\nfrom web.cepesp.utils.request import get_request_years, get_selected_columns, get_request_filters, request_get, \\\n request_get_list\n\n\nclass AthenaQueryOptions:\n\n def __init__(self, table=None):\n self.table = request_get('table') if table is None else table\n self.reg = request_get('agregacao_regional', 0, int)\n self.pol = request_get('agregacao_politica', 2, int)\n self.job = request_get('cargo', 1, int)\n self.jobs = request_get_list('cargo', int)\n self.years = get_request_years(self.job) if self.table != 'filiados' else None\n self.uf_filter = request_get('uf_filter', request_get('uf'))\n self.mun_filter = request_get('mun_filter', request_get('mun'))\n self.turno = request_get('turno')\n self.brancos = not (not (request_get('brancos')))\n self.nulos = not (not (request_get('nulos')))\n self.turno = request_get('turno')\n self.only_elected = not (not (request_get('only_elected')))\n self.start = request_get(\"start\", 0, int)\n self.length = request_get(\"length\", -1, int)\n self.separator = request_get('sep', ',')\n self.format = request_get('format', 'csv')\n self.party = request_get('party')\n self.name_filter = request_get('name_filter', '')\n self.government_period = request_get('government_period', '')\n\n if self.table == 'filiados' and not (self.party and self.uf_filter):\n self.party = 'avante'\n self.uf_filter = 'ac'\n\n (self.name, selector) = self.get_selector()\n\n if self.table == 'filiados' and not (self.party or self.uf_filter):\n raise BadRequest(f'Filiados require PARTY and UF')\n\n self.all_columns = selector.columns()\n self.default_columns = selector.visible_columns()\n self.selected_columns = get_selected_columns(self.default_columns, self.all_columns)\n self.order_by_columns = selector.order_by_columns()\n self.translated_columns = [gettext('columns.' + c) for c in self.selected_columns]\n\n self.filters = get_request_filters(self.selected_columns)\n\n def get_selector(self):\n if self.table == 'tse':\n name = 'TSE_%s_%s_%s_%s' % (JOBS[self.job], REG[self.reg], POL[self.pol], \"_\".join(map(str, self.years)))\n selector = ElectionsColumnsSelector(self.pol, self.reg)\n elif self.table == 'votos':\n name = 'VOTOS_%s_%s_%s' % (JOBS[self.job], REG[self.reg], \"_\".join(map(str, self.years)))\n selector = VotesColumnsSelector(self.reg)\n elif self.table == 'candidatos':\n name = 'CANDIDATOS_%s_%s' % (JOBS[self.job], \"_\".join(map(str, self.years)))\n selector = CandidatesColumnsSelector()\n elif self.table == 'legendas':\n name = 'LEGENDAS_%s_%s' % (JOBS[self.job], \"_\".join(map(str, self.years)))\n selector = CoalitionsColumnsSelector()\n elif self.table == 'bem_candidato':\n name = 'BEM_CANDIDATO_%s' % (\"_\".join(map(str, self.years)))\n selector = CandidateAssetsColumnsSelector()\n elif self.table == 'filiados':\n name = 'FILIADOS_%s' % (\"_\".join([str(self.party), str(self.uf_filter)]))\n selector = PartyAffiliationsColumnsSelector()\n elif self.table == 'secretarios':\n name = 'SECRETARIOS'\n selector = SecretariesColumnsSelector()\n else:\n raise BadRequest(f'Invalid table {self.table} supplied')\n\n return name, selector\n\n def to_dict(self):\n return self.__dict__\n\n\nclass AthenaResultOptions:\n\n def __init__(self):\n self.query_id = str(request_get('id')).lower()\n self.start = request_get(\"start\", 0, int)\n self.length = request_get(\"length\", -1, int)\n self.separator = request_get('sep', ',')\n self.format = request_get('format', 'csv')\n\n def validate(self):\n if not re.match('[\\-0-9a-f]+', self.query_id):\n raise BadRequest('Invalid ID Provided')\n\n def to_dict(self):\n return self.__dict__\n" }, { "alpha_fraction": 0.5878787636756897, "alphanum_fraction": 0.6010100841522217, "avg_line_length": 29.9375, "blob_id": "342ed11354fcbc2ecbae032cb3f69f875e4dc121", "content_id": "0d3929843b6f8ecc34c0c471d058ce5b1512b6be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 990, "license_type": "no_license", "max_line_length": 142, "num_lines": 32, "path": "/web/resources/js/app.js", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "window.$ = window.jQuery = require('jquery');\nwindow.Popper = require('popper.js/dist/umd/popper.js').default;\n\nrequire('bootstrap');\nrequire('datatables.net-bs4');\nrequire('jquery-file-download');\nrequire('autocomplete.js/index_jquery');\n\nwindow.toastr = require(\"toastr\");\n\n// Utils\nwindow.wait = (time) => {\n return new Promise( (resolve) => setTimeout((() => resolve()), time))\n};\n\ntoastr.options = {closeButton: true, positionClass: 'toast-top-right', onclick: null};\n\n/* Set the defaults for DataTables initialisation */\n$.extend(true, $.fn.dataTable.defaults, {\n \"dom\": \"<'row'<'col-md-6 col-sm-12'l><'col-md-6 col-sm-12'f>r><'table-responsive't><'row'<'col-md-5 col-sm-12'i><'col-md-7 col-sm-12'p>>\",\n \"language\": {\n \"lengthMenu\": \" _MENU_ records \",\n \"paginate\": {\n \"previous\": '<i class=\"fa fa-angle-left\"></i>',\n \"next\": '<i class=\"fa fa-angle-right\"></i>'\n }\n }\n});\n\n$(() => {\n $('[data-toggle=\"tooltip\"]').tooltip();\n});\n" }, { "alpha_fraction": 0.6452442407608032, "alphanum_fraction": 0.647814929485321, "avg_line_length": 31.41666603088379, "blob_id": "34c2634ae1704c9ab02b56415637e471b39720ad", "content_id": "12ae677eeab8777d91b9c4ec691e39589321f499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/etl/process/utillities.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\ndef resolve_conflicts(df, prefer='_x', drop='_y') -> pd.DataFrame:\n columns = df.columns.values.tolist()\n conflicts = [c for c in columns if c.endswith(prefer)]\n drops = [c for c in columns if c.endswith(drop)]\n renames = dict()\n for c in conflicts:\n renames[c] = c.replace(prefer, '')\n\n return df.rename(columns=renames).drop(drops, axis=1)\n" }, { "alpha_fraction": 0.5729166865348816, "alphanum_fraction": 0.5868055820465088, "avg_line_length": 18.200000762939453, "blob_id": "be62383e00ffbc2ccdab9b7f075fbc440c0417c4", "content_id": "2ff347a2c31cc492a4ea5292dce1d49d5d9ec0cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 72, "num_lines": 15, "path": "/etl/fixes/FixEmailCandidato.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass FixEmailCandidato:\n\n def check(self, item):\n return item['database'] == 'candidatos' and item['year'] <= 2010\n\n def apply(self, df: pd.DataFrame):\n df[\"EMAIL_CANDIDATO\"] = \"#NE#\"\n\n return df\n\n def test(self, client):\n pass\n" }, { "alpha_fraction": 0.5381984114646912, "alphanum_fraction": 0.5444697737693787, "avg_line_length": 34.79591751098633, "blob_id": "beed9cbd3cbae6ec81f34ad1dfc4ea8ec11acbae", "content_id": "6f548076bbbc255de4eb9e08e1452590f8610211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1754, "license_type": "no_license", "max_line_length": 90, "num_lines": 49, "path": "/web/cepesp/athena/builders/secretaries.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from web.cepesp.athena.builders.base import AthenaBuilder\nfrom web.cepesp.columns.secretarios import SecretariesColumnsSelector\n\n\nclass SecretariesQueryBuilder(AthenaBuilder):\n\n def __init__(self, **options):\n super().__init__(**options)\n self.selector = SecretariesColumnsSelector()\n\n def build(self):\n columns_renamed = \", \".join([f\"{c} AS {c}\" for c in self.selected_columns()])\n\n return f'''\n SELECT {columns_renamed}\n FROM secretarios\n {self._build_filters()}\n {self._build_order_by()}\n '''\n\n # region def _build_filters(self, start): [...]\n def _build_filters(self):\n where = self._build_base_filters()\n\n if self.opt('uf_filter'):\n where.append(f\"UF = '{self.options['uf_filter']}'\")\n\n if self.opt('name_filter'):\n words = self.opt('name_filter').lower().split(' ')\n\n for w in words:\n where.append(f\"REGEXP_LIKE(LOWER(NOME_SECRETARIO), '{w}')\")\n\n if self.opt('government_period'):\n period = self.opt('government_period').split('-')\n where.append(f\"DATA_ASSUMIU <> '#NE#'\")\n where.append(f\"DATA_DEIXOU <> '#NE#'\")\n where.append(f\"DATA_ASSUMIU <> ''\")\n where.append(f\"DATA_DEIXOU <> ''\")\n if len(period) > 1:\n where.append(f\"CAST(SUBSTR(DATA_ASSUMIU, 1, 4) as integer) > {period[0]}\")\n where.append(f\"CAST(SUBSTR(DATA_DEIXOU, 1, 4) as integer) <= {period[1]}\")\n else:\n where.append(f\"CAST(SUBSTR(DATA_ASSUMIU, 1, 4) as integer) = {period[0]}\")\n\n if len(where) > 0:\n return f\"WHERE \" + \"\\n AND \".join(where)\n else:\n return \"\"\n" }, { "alpha_fraction": 0.6005057096481323, "alphanum_fraction": 0.6194690465927124, "avg_line_length": 38.5, "blob_id": "7025845bd0ddf92b674902496205ae8176b93391", "content_id": "3d1f481b86087998647df295ee2d7aad8222d027", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "no_license", "max_line_length": 99, "num_lines": 20, "path": "/etl/fixes/FixSequencial2014Legendas.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass FixSequencial2014Legendas:\n\n def check(self, item):\n return item['year'] == 2014 and item['database'] == 'legendas' and not item['president']\n\n def apply(self, df: pd.DataFrame):\n df.loc[df['SEQUENCIA_COLIGACAO'] == '#NE#', 'SEQUENCIA_COLIGACAO'] = df['CODIGO_COLIGACAO']\n df.loc[df['TIPO_LEGENDA'] == 'PARTIDO ISOLADO', 'NOME_COLIGACAO'] = df['SIGLA_COLIGACAO']\n\n return df\n\n def test(self, client):\n df = client.get_coalitions(year=2014, job=3, columns=['SEQUENCIAL_COLIGACAO'])\n df = df[df['TIPO_LEGENDA'] == 'PARTIDO ISOLADO']\n\n assert len(df[df['NOME_COLIGACAO'] == '#NE#']) == 0, \"wrong NOME_COLIGACAO\"\n assert len(df[df['SEQUENCIA_COLIGACAO'] == '#NE#']) == 0, \"wrong SEQUENCIAL_COLIGACAO\"\n\n" }, { "alpha_fraction": 0.4494229853153229, "alphanum_fraction": 0.5156617760658264, "avg_line_length": 53.7870979309082, "blob_id": "34a8953423d8ec57a16df100d25ea5841b0ba336", "content_id": "ab707a903127b7cfae30d3867f05302e3d4ba769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17006, "license_type": "no_license", "max_line_length": 114, "num_lines": 310, "path": "/etl/fixes/SitTotTurnoFix.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\ndef apply_description(df: pd.DataFrame):\n df.loc[df['COD_SIT_TOT_TURNO'].isnull(), 'COD_SIT_TOT_TURNO'] = '-1'\n df.loc[df['COD_SIT_TOT_TURNO'] == '-1', 'DESC_SIT_TOT_TURNO'] = '#NULO#'\n df.loc[df['COD_SIT_TOT_TURNO'] == '1', 'DESC_SIT_TOT_TURNO'] = 'ELEITO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'DESC_SIT_TOT_TURNO'] = 'ELEITO POR MEDIA'\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'DESC_SIT_TOT_TURNO'] = 'ELEITO POR QP'\n df.loc[df['COD_SIT_TOT_TURNO'] == '4', 'DESC_SIT_TOT_TURNO'] = 'NÃO ELEITO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'DESC_SIT_TOT_TURNO'] = '2º TURNO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'DESC_SIT_TOT_TURNO'] = 'SUPLENTE'\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'DESC_SIT_TOT_TURNO'] = 'RENÚNCIA/FALECIMENTO/CASSAÇÃO ANTES A ELEIÇÃO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '10', 'DESC_SIT_TOT_TURNO'] = 'RENÚNCIA/FALECIMENTO/CASSAÇÃO APÓS A ELEIÇÃO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '11', 'DESC_SIT_TOT_TURNO'] = 'REGISTRO NEGADO ANTES DA ELEIÇÃO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '12', 'DESC_SIT_TOT_TURNO'] = 'REGISTRO NEGADO APÓS A ELEIÇÃO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '13', 'DESC_SIT_TOT_TURNO'] = 'INDEFERIDO COM RECURSO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '14', 'DESC_SIT_TOT_TURNO'] = 'CASSADO COM RECURSO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '15', 'DESC_SIT_TOT_TURNO'] = 'SUBSTITUÍDO'\n df.loc[df['COD_SIT_TOT_TURNO'] == '16', 'DESC_SIT_TOT_TURNO'] = 'RENÚNCIA/FALECIMENTO COM SUBSTITUIÇÃO'\n\n return df\n\n\nclass SitTotTurnoFix1998:\n\n def check(self, item):\n return item['year'] == 1998 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '206' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '309' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '502' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7', 'COD_SIT_TOT_TURNO'] = '7010' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8', 'COD_SIT_TOT_TURNO'] = '8011' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'COD_SIT_TOT_TURNO'] = '9012' # 9 -> 12\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '206', 'COD_SIT_TOT_TURNO'] = '6' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '309', 'COD_SIT_TOT_TURNO'] = '9' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '502', 'COD_SIT_TOT_TURNO'] = '2' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7010', 'COD_SIT_TOT_TURNO'] = '10' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8011', 'COD_SIT_TOT_TURNO'] = '11' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9012', 'COD_SIT_TOT_TURNO'] = '12' # 9 -> 12\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2000:\n\n def check(self, item):\n return item['year'] == 2000 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '206' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '309' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '502' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7', 'COD_SIT_TOT_TURNO'] = '7010' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8', 'COD_SIT_TOT_TURNO'] = '8011' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'COD_SIT_TOT_TURNO'] = '9012' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '10', 'COD_SIT_TOT_TURNO'] = '1016' # 10 -> 16\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '206', 'COD_SIT_TOT_TURNO'] = '6' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '309', 'COD_SIT_TOT_TURNO'] = '9' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '502', 'COD_SIT_TOT_TURNO'] = '2' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7010', 'COD_SIT_TOT_TURNO'] = '10' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8011', 'COD_SIT_TOT_TURNO'] = '11' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9012', 'COD_SIT_TOT_TURNO'] = '12' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '1016', 'COD_SIT_TOT_TURNO'] = '16' # 10 -> 16\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2002:\n\n def check(self, item):\n return item['year'] == 2002 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n df.loc[df['COD_SIT_TOT_TURNO'] == '-3', 'COD_SIT_TOT_TURNO'] = '-1' # -3 -> -1\n\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '206' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '309' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '502' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7', 'COD_SIT_TOT_TURNO'] = '7010' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8', 'COD_SIT_TOT_TURNO'] = '8011' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'COD_SIT_TOT_TURNO'] = '9012' # 9 -> 12\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '206', 'COD_SIT_TOT_TURNO'] = '6' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '309', 'COD_SIT_TOT_TURNO'] = '9' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '502', 'COD_SIT_TOT_TURNO'] = '2' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7010', 'COD_SIT_TOT_TURNO'] = '10' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8011', 'COD_SIT_TOT_TURNO'] = '11' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9012', 'COD_SIT_TOT_TURNO'] = '12' # 9 -> 12\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2004:\n\n def check(self, item):\n return item['year'] == 2004 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n df.loc[df['COD_SIT_TOT_TURNO'] == '-3', 'COD_SIT_TOT_TURNO'] = '-1' # -3 -> -1\n\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '206' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '309' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '502' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7', 'COD_SIT_TOT_TURNO'] = '7010' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8', 'COD_SIT_TOT_TURNO'] = '8011' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'COD_SIT_TOT_TURNO'] = '9012' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '10', 'COD_SIT_TOT_TURNO'] = '1016' # 10 -> 16\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '206', 'COD_SIT_TOT_TURNO'] = '6' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '309', 'COD_SIT_TOT_TURNO'] = '9' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '502', 'COD_SIT_TOT_TURNO'] = '2' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7010', 'COD_SIT_TOT_TURNO'] = '10' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8011', 'COD_SIT_TOT_TURNO'] = '11' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9012', 'COD_SIT_TOT_TURNO'] = '12' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '1016', 'COD_SIT_TOT_TURNO'] = '16' # 10 -> 16\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2006:\n\n def check(self, item):\n return item['year'] == 2006 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '206' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '309' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '502' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7', 'COD_SIT_TOT_TURNO'] = '7010' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8', 'COD_SIT_TOT_TURNO'] = '8011' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'COD_SIT_TOT_TURNO'] = '9012' # 9 -> 12\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '206', 'COD_SIT_TOT_TURNO'] = '6' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '309', 'COD_SIT_TOT_TURNO'] = '9' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '502', 'COD_SIT_TOT_TURNO'] = '2' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7010', 'COD_SIT_TOT_TURNO'] = '10' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8011', 'COD_SIT_TOT_TURNO'] = '11' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9012', 'COD_SIT_TOT_TURNO'] = '12' # 9 -> 12\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2008:\n\n def check(self, item):\n return item['year'] == 2008 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '206' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '309' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '502' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7', 'COD_SIT_TOT_TURNO'] = '7010' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8', 'COD_SIT_TOT_TURNO'] = '8011' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'COD_SIT_TOT_TURNO'] = '9012' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '10', 'COD_SIT_TOT_TURNO'] = '1015' # 10 -> 15\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '206', 'COD_SIT_TOT_TURNO'] = '6' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '309', 'COD_SIT_TOT_TURNO'] = '9' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '502', 'COD_SIT_TOT_TURNO'] = '2' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7010', 'COD_SIT_TOT_TURNO'] = '10' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8011', 'COD_SIT_TOT_TURNO'] = '11' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9012', 'COD_SIT_TOT_TURNO'] = '12' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '1015', 'COD_SIT_TOT_TURNO'] = '15' # 10 -> 15\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2010:\n\n def check(self, item):\n return item['year'] == 2010 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '206' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '309' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '502' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7', 'COD_SIT_TOT_TURNO'] = '7010' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8', 'COD_SIT_TOT_TURNO'] = '8011' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9', 'COD_SIT_TOT_TURNO'] = '9012' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '10', 'COD_SIT_TOT_TURNO'] = '1015' # 10 -> 15\n df.loc[df['COD_SIT_TOT_TURNO'] == '11', 'COD_SIT_TOT_TURNO'] = '1113' # 11 -> 13\n df.loc[df['COD_SIT_TOT_TURNO'] == '12', 'COD_SIT_TOT_TURNO'] = '1214' # 12 -> 14\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '206', 'COD_SIT_TOT_TURNO'] = '6' # 2 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '309', 'COD_SIT_TOT_TURNO'] = '9' # 3 -> 9\n df.loc[df['COD_SIT_TOT_TURNO'] == '502', 'COD_SIT_TOT_TURNO'] = '2' # 5 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n df.loc[df['COD_SIT_TOT_TURNO'] == '7010', 'COD_SIT_TOT_TURNO'] = '10' # 7 -> 10\n df.loc[df['COD_SIT_TOT_TURNO'] == '8011', 'COD_SIT_TOT_TURNO'] = '11' # 8 -> 11\n df.loc[df['COD_SIT_TOT_TURNO'] == '9012', 'COD_SIT_TOT_TURNO'] = '12' # 9 -> 12\n df.loc[df['COD_SIT_TOT_TURNO'] == '1015', 'COD_SIT_TOT_TURNO'] = '15' # 10 -> 15\n df.loc[df['COD_SIT_TOT_TURNO'] == '1113', 'COD_SIT_TOT_TURNO'] = '13' # 11 -> 13\n df.loc[df['COD_SIT_TOT_TURNO'] == '1214', 'COD_SIT_TOT_TURNO'] = '14' # 12 -> 14\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2012:\n\n def check(self, item):\n return item['year'] == 2012 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '203' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '302' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '506' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '203', 'COD_SIT_TOT_TURNO'] = '3' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '302', 'COD_SIT_TOT_TURNO'] = '2' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '506', 'COD_SIT_TOT_TURNO'] = '6' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n\n return apply_description(df)\n\nclass SitTotTurnoFix2014:\n\n def check(self, item):\n return item['year'] == 2014 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '203' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '302' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '506' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '203', 'COD_SIT_TOT_TURNO'] = '3' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '302', 'COD_SIT_TOT_TURNO'] = '2' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '506', 'COD_SIT_TOT_TURNO'] = '6' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2016:\n\n def check(self, item):\n return item['year'] == 2016 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '203' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '302' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '506' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '203', 'COD_SIT_TOT_TURNO'] = '3' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '302', 'COD_SIT_TOT_TURNO'] = '2' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '506', 'COD_SIT_TOT_TURNO'] = '6' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n\n return apply_description(df)\n\n\nclass SitTotTurnoFix2018:\n\n def check(self, item):\n return item['year'] == 2018 and item['database'] == 'candidatos'\n\n def apply(self, df: pd.DataFrame):\n # Move all COD to AUX\n df.loc[df['COD_SIT_TOT_TURNO'] == '2', 'COD_SIT_TOT_TURNO'] = '203' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '3', 'COD_SIT_TOT_TURNO'] = '302' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '5', 'COD_SIT_TOT_TURNO'] = '506' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '6', 'COD_SIT_TOT_TURNO'] = '605' # 6 -> 5\n\n # Apply COD changes\n df.loc[df['COD_SIT_TOT_TURNO'] == '203', 'COD_SIT_TOT_TURNO'] = '3' # 2 -> 3\n df.loc[df['COD_SIT_TOT_TURNO'] == '302', 'COD_SIT_TOT_TURNO'] = '2' # 3 -> 2\n df.loc[df['COD_SIT_TOT_TURNO'] == '506', 'COD_SIT_TOT_TURNO'] = '6' # 5 -> 6\n df.loc[df['COD_SIT_TOT_TURNO'] == '605', 'COD_SIT_TOT_TURNO'] = '5' # 6 -> 5\n\n return apply_description(df)\n" }, { "alpha_fraction": 0.5503597259521484, "alphanum_fraction": 0.6474820375442505, "avg_line_length": 25.380952835083008, "blob_id": "474938e1f30da50db9a2bb537327e88f8e459ec4", "content_id": "3422ee08a75ec21edfd4ca4bc90ce9001687e2af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 556, "license_type": "no_license", "max_line_length": 76, "num_lines": 21, "path": "/web/tests/utils.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "from urllib.parse import urlencode\n\nimport requests\nfrom requests import Response\n\n\ndef get_years(job):\n if job is 1 or job is 3 or job is 5 or job is 6 or job is 7 or job is 8:\n return [2018, 2014, 2010, 2006, 2002, 1998]\n elif job is 11 or 13:\n return [2016, 2012, 2008, 2004, 2000]\n\n\ndef get_request_url(uri, **options):\n url = \"http://test.cepesp.io/api/consulta/\" + uri\n return url + \"?\" + urlencode(options)\n\n\ndef run_request(uri, **options) -> Response:\n r = requests.get(get_request_url(uri, **options))\n return r\n\n\n" }, { "alpha_fraction": 0.4875183701515198, "alphanum_fraction": 0.4958394467830658, "avg_line_length": 38.28845977783203, "blob_id": "088dbc2e04e5dcdec12eb48d31f1c79d22eb3fbf", "content_id": "1fbf78c70b101bf9e12ff149b03d003b354a9654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2043, "license_type": "no_license", "max_line_length": 120, "num_lines": 52, "path": "/etl/crawler/spiders.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom etl.crawler.items import TSEFileItem\n\n\nclass TSESpider(scrapy.Spider):\n name = \"tse\"\n start_urls = ['http://www.tse.jus.br/hotsites/pesquisas-eleitorais/']\n allowed_domains = ['agencia.tse.jus.br', 'www.tse.jus.br']\n loaded_parties = False\n parties = [\"avante\", \"dc\", \"dem\", \"mdb\", \"novo\", \"patri\", \"pc_do_b\", \"pcb\", \"pco\", \"pdt\", \"phs\", \"pmb\", \"pmn\",\n \"pode\", \"pp\", \"ppl\", \"pps\", \"pr\", \"prb\", \"pros\", \"prp\", \"prtb\", \"psb\", \"psc\", \"psd\", \"psdb\", \"psl\",\n \"psol\", \"pstu\", \"pt\", \"ptb\", \"ptc\", \"pv\", \"rede\", \"solidariedade\"]\n ufs = [\"ac\", \"al\", \"am\", \"ap\", \"ba\", \"ce\", \"df\", \"es\", \"go\", \"ma\", \"mg\", \"ms\", \"mt\", \"pa\", \"pb\", \"pe\", \"pi\", \"pr\",\n \"rj\", \"rn\", \"ro\", \"rr\", \"rs\", \"sc\", \"se\", \"sp\", \"to\"]\n\n def parse(self, response):\n if not self.loaded_parties:\n for party in self.parties:\n for uf in self.ufs:\n url = f\"http://agencia.tse.jus.br/estatistica/sead/eleitorado/filiados/uf/filiados_{party}_{uf}.zip\"\n item = TSEFileItem.create(url)\n if self.is_valid(item):\n yield item\n\n self.loaded_parties = True\n\n for link in response.css('a::attr(href)'):\n href = link.extract()\n if href.find('.zip') != -1 and href.find('.sha1') == -1:\n item = TSEFileItem.create(href)\n if self.is_valid(item):\n yield item\n elif href.find('.html') != -1:\n yield response.follow(link, self.parse)\n\n def is_valid(self, item):\n found_db = False\n for db in self.settings.get('DATABASES'):\n if db in item['path']:\n found_db = True\n\n if not found_db:\n return False\n\n if item['year'] and item['year'] not in self.settings.get('YEARS'):\n return False\n\n if item['year'] in [1998, 2006, 2010] and 'BR' in item['name']:\n return False\n\n return True\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5750916004180908, "avg_line_length": 17.200000762939453, "blob_id": "e87f8c503a8655611898f8afdabf4dad936ade4e", "content_id": "ceb38fea9cdfcef225ec2df71d06f593cad30c6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 58, "num_lines": 15, "path": "/etl/fixes/FixCodMunTSE.py", "repo_name": "Cepesp-Fgv/tse-dados", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass FixCodMunTSE:\n\n def check(self, item):\n return item['database'] == 'votos'\n\n def apply(self, df: pd.DataFrame):\n df['COD_MUN_TSE'] = df['COD_MUN_TSE'].str.zfill(5)\n\n return df\n\n def test(self, client):\n pass\n" } ]
69
shuntrho/randomsite
https://github.com/shuntrho/randomsite
f2b73fa28f5bc0e3693d3499397bb4348a635de0
41b74894d6027c0342d239dd8ed31c1abfb39be8
fbc7a60eeb3b5329c21db59442815133e731ca79
refs/heads/master
2023-03-29T13:28:37.213115
2021-03-31T11:52:55
2021-03-31T11:52:55
329,713,680
0
1
null
2021-01-14T19:26:16
2021-01-21T17:53:30
2021-01-21T18:34:52
Python
[ { "alpha_fraction": 0.6435643434524536, "alphanum_fraction": 0.6534653306007385, "avg_line_length": 19.200000762939453, "blob_id": "61b35192a5722ad50811fb2ac253599e7efd9882", "content_id": "eb0eef4df20ff528944ee34acbe96e5c5db0b935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 55, "num_lines": 10, "path": "/rand/forms.py", "repo_name": "shuntrho/randomsite", "src_encoding": "UTF-8", "text": "from django import forms\n\n\nclass PersonForm(forms.Form):\n name = forms.CharField(label='Name', max_length=30)\n\n def clean_name(self):\n data = self.cleaned_data['name']\n\n return data\n" }, { "alpha_fraction": 0.6555944085121155, "alphanum_fraction": 0.6555944085121155, "avg_line_length": 37.13333511352539, "blob_id": "16e6381adff3e0afefc1bef9cc7e8f6d0ad49cf8", "content_id": "320d5be0b538aaea67b8433b85c960530f7fb562", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 79, "num_lines": 15, "path": "/rand/urls.py", "repo_name": "shuntrho/randomsite", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('randint/', views.randint, name='randint'),\n path('lottery/', views.lottery, name='lottery'),\n path('group/', views.group, name='group'),\n path('dice_throw/', views.dice_throw, name='dice_throw'),\n path('group_randomizer/', views.group_randomizer, name='group_randomizer'),\n path('elements_draw/', views.elements_draw, name='elements_draw'),\n path('coin/', views.coin, name='coin'),\n path('guesser/', views.guesser, name='guesser')\n]\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7555555701255798, "avg_line_length": 24.714284896850586, "blob_id": "2f3040b11a6ae08fa80a278171408f1018e88f16", "content_id": "8f0de9f3e0205d1d0e7cc98c2b4790cfbe90eb30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 42, "num_lines": 7, "path": "/rand/models.py", "repo_name": "shuntrho/randomsite", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.forms import ModelForm\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=50)\n nick = models.CharField(max_length=30)\n" }, { "alpha_fraction": 0.7349397540092468, "alphanum_fraction": 0.7349397540092468, "avg_line_length": 15.600000381469727, "blob_id": "24e7f7b02225023ed2e78d778a8b328d33e918c5", "content_id": "dda46a7c6bf2e963cf432a6ed015896bd344d440", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/rand/apps.py", "repo_name": "shuntrho/randomsite", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass RandConfig(AppConfig):\n name = 'rand'\n" }, { "alpha_fraction": 0.5537905097007751, "alphanum_fraction": 0.5575652718544006, "avg_line_length": 30.949748992919922, "blob_id": "f9f6ba3d1cc2229f23c90e16b20fe9d1eeffa6e1", "content_id": "3ef364abe28f3a4b6fc960d479158fc77c7f107e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6360, "license_type": "no_license", "max_line_length": 110, "num_lines": 199, "path": "/rand/views.py", "repo_name": "shuntrho/randomsite", "src_encoding": "UTF-8", "text": "import random\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\n\nfrom rand.models import Person\n\n\ndef index(request):\n \"\"\"View function for home page of site.\"\"\"\n\n # # Generate counts of some of the main objects\n # num_books = Book.objects.all().count()\n # num_instances = BookInstance.objects.all().count()\n #\n # # Available books (status = 'a')\n # num_instances_available = BookInstance.objects.filter(status__exact='a').count()\n #\n # # The 'all()' is implied by default.\n # num_authors = Author.objects.count()\n #\n # context = {\n # 'num_books': num_books,\n # 'num_instances': num_instances,\n # 'num_instances_available': num_instances_available,\n # 'num_authors': num_authors,\n # }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html')\n\n\ndef randint(request):\n req = request.GET\n print(req)\n if req:\n lower_bound = int(req['lower_bound_field'])\n upper_bound = int(req['upper_bound_field'])\n rand_int = random.randint(lower_bound, upper_bound)\n else:\n lower_bound = 0\n upper_bound = 10\n rand_int = ''\n return render(request, 'generators/randint.html', {'lower_bound': lower_bound, 'upper_bound': upper_bound,\n 'rand_int': rand_int})\n\n\ndef lottery(request):\n req = request.GET\n print(req)\n if req:\n total_balls = int(req['total_balls'])\n drawn_balls = int(req['drawn_balls'])\n rand_balls = sorted(random.sample(list(range(1, total_balls + 1)), drawn_balls))\n rand_balls = ', '.join(str(n) for n in rand_balls)\n else:\n total_balls = 49\n drawn_balls = 6\n rand_balls = ''\n return render(request, 'generators/lottery.html', {'total_balls': total_balls, 'drawn_balls': drawn_balls,\n 'rand_balls': rand_balls})\n\n\ndef dice_throw(request):\n rolls = []\n req = request.GET\n print(req)\n\n def roll_many(sides, times):\n for _ in range(times):\n roll = random.randint(1, sides)\n rolls.append(roll)\n print(roll)\n\n if req:\n sides = int(req['sides'])\n times = int(req['times'])\n roll_many(sides, times)\n else:\n sides = 6\n times = 2\n roll_many(sides, times)\n return render(request, 'generators/dice_throw.html', {'sides': sides, 'times': times, 'result': rolls})\n\n\ndef group_randomizer(request):\n people = []\n to_add = ''\n req = request.GET\n print(req)\n if req:\n while to_add != 'NO':\n to_add = str(req['Add person. To stop write NO'])\n people.append(to_add)\n print(people)\n people = people[:-1]\n number_of_teams = int(req['How many teams?'])\n number_people = len(people)\n while number_people > 0 and number_of_teams > 0:\n team = random.sample(people, int(number_people / number_of_teams))\n for x in team:\n people.remove(x)\n number_people -= int(number_people / number_of_teams)\n number_of_teams -= 1\n print(team)\n\n else:\n people = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\"]\n number_of_teams = 3\n number_people = len(people)\n team = random.sample(people, int(number_people / number_of_teams))\n for x in team:\n people.remove(x)\n number_people -= int(number_people / number_of_teams)\n number_of_teams -= 1\n print(team)\n return render(request, 'generators/group_randomizer.html',\n {'Add person. To stop write NO': to_add, 'How many teams?': number_of_teams})\n\n\ndef elements_draw(request):\n req = request.GET\n print(req)\n items = []\n add_item = ''\n if req:\n while add_item != 'NO':\n add_item = str(req['Add person. To stop write NO'])\n items.append(add_item)\n number_of_items = int(req[\"How many items?\"])\n random_items = random.sample(items, number_of_items)\n print(random_items)\n else:\n items = [\"koc\", \"termos\", \"kawa\", \"sitko\", \"zabawka\", \"nóż\"]\n number_of_items = 2\n random_items = random.sample(items, number_of_items)\n print(random_items)\n return render(request, 'generators/elements_draw.html',\n {'Add item. To stop write NO': add_item, \"How many items?\": number_of_items})\n\n\n@csrf_exempt\ndef group(request):\n people = None\n person = None\n if request.method == 'POST':\n p = request.POST\n print(p)\n if 'name' in p and 'nick' in p:\n if p['name'] and p['nick']:\n print(p['name'])\n person = Person(name=p['name'], nick=p['nick'])\n person.save()\n else:\n Person.objects.all().delete()\n return HttpResponseRedirect(request.path_info)\n\n elif request.method == 'GET':\n print(request.GET)\n if people:\n person = random.choice(people)\n else:\n person = None\n\n people = Person.objects.all()\n if (not person) and people:\n person = random.choice(people)\n\n return render(request, 'generators/person.html', {'people': people, 'person': person})\n\n\ndef coin(request):\n req = request.GET\n print(req)\n if req:\n coin_q = int(req['coin_q'])\n coins = random.choices([0, 1], k=coin_q)\n else:\n coin_q = 1\n coins = ''\n return render(request, 'generators/coin.html', {'coin_q': coin_q, 'coins': coins})\n\n\ndef guesser(request):\n req = request.GET\n print(req)\n if req:\n lower_bound = int(req['lower_bound_field'])\n upper_bound = int(req['upper_bound_field'])\n user_num = int(req['user_num'])\n rand_int = random.randint(lower_bound, upper_bound)\n else:\n lower_bound = 0\n upper_bound = 10\n user_num = lower_bound\n rand_int = ''\n return render(request, 'generators/guesser.html', {'lower_bound': lower_bound, 'upper_bound': upper_bound,\n 'rand_int': rand_int, 'user_num': user_num})\n" } ]
5
mfs6174/Twitdao11
https://github.com/mfs6174/Twitdao11
00dc33ff1416bbccdd48fddc4fb3770888e05adc
c349d7b005f3aea241a3f240db5b17023eff7389
835d5a8994e1aa25c4f16f3880b0eb86ee092823
refs/heads/master
2020-06-01T04:38:51.273118
2013-06-14T18:23:14
2013-06-14T18:23:14
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.606656551361084, "alphanum_fraction": 0.608169436454773, "avg_line_length": 20.354839324951172, "blob_id": "7d74f3f11b11b4a87cc652e08773bc2e280a92e4", "content_id": "38c4ce7b68cf48e41b17bcb8375e5b52d7e02494", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 661, "license_type": "no_license", "max_line_length": 141, "num_lines": 31, "path": "/templates/settings-twitdao.html", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "{% extends \"settings-base.html\" %}\n\n{% block title %}{% endblock title %}\n\n{% block head %}\n<script type=\"text/javascript\">\n</script>\n<style type=\"text/css\">\n</style>\n{% endblock head %}\n\n{% block main %}\n<div id=\"Settings\">\n\t<div class=\"heading\">Twitdao Settings:</div>\n\t<div>\n\t<form method=\"post\">\n\t<table>\n\t<tr>\n\t\t<td><label for=\"Td_show_media\">Show Media</label></td>\n\t\t<td>\n\t\t\t<input type=\"checkbox\" name=\"show_media\" id=\"Td_show_media\" value=\"True\"{% if token.settings.show_media %} checked=\"checked\"{% endif %} />\n\t\t</td>\n\t</tr>\n\t<tr>\n\t\t<td colspan=\"2\"><input type=\"submit\" value=\"Save\" /></td>\n\t</tr>\n\t</table>\n\t</form>\n\t</div>\n</div>\n{% endblock main%}" }, { "alpha_fraction": 0.5689922571182251, "alphanum_fraction": 0.5720930099487305, "avg_line_length": 645, "blob_id": "ec82954f073286a7ef58d23a172f478332c127d4", "content_id": "ce9880754f779476d487e1fcea468c4ab097550d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 645, "license_type": "no_license", "max_line_length": 645, "num_lines": 1, "path": "/templates/mobile/nav-bottom.html", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "<h2>{% ifequal where \"home\" %}[<a href=\"/m/u-/home\">Home</a>]{% else %}<a href=\"/m/u-/home\">Home</a>{% endifequal %}{% ifequal token_user.id owner_user.id %}{% ifequal where \"user\" %} | [<a href=\"/m/u-{{ token_user.screen_name }}\">{{ token_user.screen_name }}</a>]{% else %} | <a href=\"/m/u-{{ token_user.screen_name }}\">{{ token_user.screen_name }}</a>{% endifequal %}{% else %} | <a href=\"/m/u-{{ token_user.screen_name }}\">{{ token_user.screen_name }}</a>{% endifequal %}{% ifequal where \"settings\" %} | [<a href=\"/m/s-\">Settings</a>]{% else %} | <a href=\"/m/s-\">Settings</a>{% endifequal %} | <a href=\"{{ self.logout_url }}\">SignOut</a></h2>" }, { "alpha_fraction": 0.6241258978843689, "alphanum_fraction": 0.6451048851013184, "avg_line_length": 80.85713958740234, "blob_id": "8673d07d32dd81392ac57954396f8bed92a48296", "content_id": "1759ceb05b1b6877fef9ddfdc26a73aead95f530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 572, "license_type": "no_license", "max_line_length": 233, "num_lines": 7, "path": "/templates/mobile/base.html", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//WAPFORUM//DTD XHTML Mobile 1.0//EN\" \"http://www.wapforum.org/DTD/xhtml-mobile10.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\"><head><meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\"><title> Twitdao | {{ owner_user.name }}</title><link rel=\"stylesheet\" href=\"/css/m.css\" type=\"text/css\"></head><body>\n<h1><a href=\"/m/u-/home\" id=\"logo\">Twitdao</a></h1>{% if tip %}<p class=\"n\">{{ tip }}</p>{% endif %}\n{% include \"mobile/nav-top.html\" %}\n{% block main %}{% endblock main %}\n{% include \"mobile/nav-bottom.html\" %}\n</body></html>" }, { "alpha_fraction": 0.5705463290214539, "alphanum_fraction": 0.5834916830062866, "avg_line_length": 36.58928680419922, "blob_id": "febf92d22ca21c2b1e3a8518fcb126264aef5816", "content_id": "626d45c32c4c62d26f634980d0b75086042f7b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8420, "license_type": "no_license", "max_line_length": 128, "num_lines": 224, "path": "/image_proxy.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import memcache\nfrom google.appengine.api import urlfetch\n\nfrom base import BaseHandler\nfrom django.utils import simplejson as json\nfrom datetime import datetime\n\nimport md\nimport logging\nimport urllib\n\n\n_cached_headers=['last-modified', 'etag', 'cache-control', 'expires', 'content-type']\n\nclass ImageProxy(BaseHandler):\n\n def initialize(self, request, response):\n BaseHandler.initialize(self, request, response)\n self.image_proxy_config = md.get_image_proxy_config()\n\n def get_image(self, image_url, cache_id=None):\n if not cache_id: cache_id=image_url\n _cache=memcache.get(cache_id)\n if _cache:\n if self.request.if_modified_since and 'last-modified' in _cache:\n since = self.request.if_modified_since\n last = datetime.strptime(_cache['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')\n if not last.tzinfo:\n since=since.replace(tzinfo=None)\n if last<=since:\n logging.debug('[ImageProxy] Hit Cache: last-modified')\n self.response.set_status(304)\n if 'content-type' in _cache:\n self.response.headers['Content-Type']=_cache['content-type']\n return\n if self.request.if_none_match and 'etag' in _cache:\n if str(self.request.if_none_match) == _cache['etag']:\n logging.debug('[ImageProxy] Hit Cache: etag')\n self.response.set_status(304)\n if 'content-type' in _cache:\n self.response.headers['Content-Type']=_cache['content-type']\n return\n\n image=urlfetch.fetch(image_url)\n logging.debug('[ImageProxy] Response Headers: %s' % image.headers)\n\n _cache={}\n for h in _cached_headers:\n if h in image.headers:\n _cache[h]=image.headers[h]\n self.response.headers[h]=image.headers[h]\n memcache.set(cache_id, _cache)\n logging.debug('[ImageProxy] Cached Header: %s' % _cache)\n\n self.response.out.write(image.content)\n\n\ndef b58decode(s):\n alphabet = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'\n num, decoded, multi = len(s), 0, 1\n for i in range(num-1, -1, -1):\n decoded = decoded+multi*(alphabet.index(s[i]))\n multi = multi*len(alphabet)\n return decoded\n\ndef flickr_rest(api_url, **params):\n params.update( { 'format':'json', 'nojsoncallback':1 } )\n try:\n http_method = params.pop('http_method')\n except KeyError:\n http_method = urlfetch.GET\n res=urlfetch.fetch('%s?%s' % (api_url, urllib.urlencode(params)), method=http_method)\n content = json.loads(res.content)\n logging.debug('[ImageProxy] Flickr REST: %s' % content)\n return content\n\nclass Flickr(ImageProxy):\n def get(self, link_type, image_id):\n\n api_key = self.image_proxy_config.flickr_api_key\n rest_api_url = self.image_proxy_config.flickr_rest_api_url\n\n if not api_key:\n self.redirect('/images/flickr-not-ready.png')\n return\n\n photo_id = image_id \n if link_type == 'short':\n photo_id = b58decode(image_id)\n\n image_url = memcache.get('Image-Flickr-URL-%s-%s' % (link_type, image_id) )\n if not image_url:\n fpi = flickr_rest(rest_api_url, method='flickr.photos.getInfo', api_key=api_key, photo_id=photo_id )\n if fpi['stat'] == 'fail':\n self.redirect('/images/flickr-not-ready.png')\n return\n p = fpi['photo']\n image_url = 'http://farm%s.static.flickr.com/%s/%s_%s_m.jpg' % (p['farm'], p['server'], p['id'], p['secret'])\n memcache.set('Image-Flickr-URL-%s-%s' % (link_type, image_id), image_url)\n\n cache_id = 'Image-Flickr-%s' % image_id\n self.get_image(image_url, cache_id)\n\n\nclass Twitpic(ImageProxy):\n def get(self, image_size, image_id):\n # Thumb(150px x 150px max), Mini(75px x 75px max)\n # http://twitpic.com/show/[size]/[image-id]\n image_url = 'http://twitpic.com/show/%s/%s' % (image_size, image_id)\n cache_id = 'Image-Twitpic-%s-%s' % (image_size, image_id)\n self.get_image(image_url, cache_id)\n\n\nclass Twitgoo(ImageProxy):\n def get(self, image_size, image_id):\n # Thumb/mini (up to 160x160), Img (up to 1600x1600)\n # http://twitgoo.com/show/[size]/[gooid]\n image_url='http://twitgoo.com/show/%s/%s' % (image_size, image_id)\n cache_id='Image-Twitgoo-%s-%s' % (image_size, image_id)\n self.get_image(image_url, cache_id)\n\n\nclass Yfrog(ImageProxy):\n def get(self, domain_tail, image_id):\n image_url='http://yfrog.%s/%s.th.jpg' % (domain_tail, image_id)\n cache_id='Image-Yfrog-%s-%s' % (domain_tail, image_id)\n self.get_image(image_url, cache_id)\n\n\nclass Imgly(ImageProxy):\n def get(self, image_size, image_id):\n # http://img.ly/show/[mini|thumb|medium|large|full]/<image-id>\n image_url='http://img.ly/show/%s/%s' % (image_size, image_id)\n cache_id='Image-Imgly-%s-%s' % (image_size, image_id)\n self.get_image(image_url, cache_id)\n\n\nclass Youtube(ImageProxy):\n def get(self, video_id):\n image_url='http://i.ytimg.com/vi/%s/1.jpg' % video_id\n cache_id='Image-Youtube-%s' % video_id\n self.get_image(image_url, cache_id)\n\n \nclass Moby(ImageProxy):\n def get(self, image_size, image_id):\n #full, square, view, medium, thumbnail, thumb\n image_url='http://moby.to/%s:%s' % (image_id, image_size)\n cache_id='Image-Moby-%s-%s' % (image_size, image_id)\n self.get_image(image_url, cache_id)\n\n\nclass Instagram(ImageProxy):\n def get(self, image_id, image_size):\n #size: One of t (thumbnail), m (medium), l (large). Defaults to m.\n if not image_size:\n image_size='l'\n image_url='http://instagr.am/p/%s/media/?size=%s' % (image_id, image_size)\n #self.get_image(image_url)\n self.redirect(image_url)\n\n\ndef picplz_url(image_id, image_size):\n # See: https://sites.google.com/site/picplzapi/\n api_url='http://api.picplz.com/api/v2/pic.json'\n try:\n res=urlfetch.fetch('%s?shorturl_id=%s' % (api_url, image_id))\n img=json.loads(res.content)\n if img['result']!='ok':\n return None\n else:\n return img['value']['pics'][0]['pic_files'][image_size]['img_url']\n except urlfetch.DownloadError:\n return None\n except KeyError, e:\n logging.warning(e)\n return None\n\nclass Picplz(ImageProxy):\n def get(self, image_id, image_size):\n # The default format list is: 640r, 320rh, 100sh\n if not image_size:\n image_size='320rh'\n image_url = picplz_url(image_id, image_size)\n if image_url:\n self.get_image(image_url)\n else:\n self.error(404)\n\n\nclass Plixi(ImageProxy):\n def get(self, image_id, image_size):\n # big - original\n # medium - 600px scaled\n # mobile - 320px scaled\n # small - 150px cropped\n # thumbnail - 79px cropped\n if not image_size:\n image_size='mobile'\n image_url = 'http://api.plixi.com/api/tpapi.svc/imagefromurl?url=http://plixi.com/p/%s&size=%s' % (image_id, image_size)\n self.get_image(image_url)\n\ndef main():\n application = webapp.WSGIApplication([\n ('/i/twitpic/(thumb|mini)/([0-9a-zA-Z]+)', Twitpic),\n ('/i/twitgoo/(thumb|mini|img)/([0-9a-zA-Z]+)', Twitgoo),\n ('/i/yfrog/([\\.a-zA-Z]+)/([0-9a-zA-Z]+)', Yfrog),\n ('/i/imgly/(mini|thumb|medium|large|full)/([0-9a-zA-Z]+)', Imgly),\n ('/i/flickr/(long|short)/([0-9a-zA-Z]+)', Flickr),\n ('/i/y2b/([0-9a-zA-Z_\\-]+)', Youtube),\n ('/i/moby/(full|square|view|medium|thumbnail|thumb)/([0-9a-zA-Z]+)', Moby),\n ('/i/instagram/(?P<image_id>[0-9a-zA-Z_\\-]+)(?:/(?P<image_size>t|m|l))?', Instagram),\n ('/i/picplz/([0-9a-zA-Z]+)(?:/(?P<image_size>640r|320rh|100sh))?', Picplz),\n ('/i/plixi/(?P<image_id>[0-9a-zA-Z]+)(?:/(?P<image_size>big|medium|mobile|small|thumbnail))?', Plixi),\n\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5627450942993164, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 510, "blob_id": "eeaf14ccaca8bf8b7c7928811b082db45f4c67fe", "content_id": "a4a948328db933729d871189efaa5cd7f56be927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 510, "license_type": "no_license", "max_line_length": 510, "num_lines": 1, "path": "/templates/mobile/nav-top.html", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "<h2>{% ifequal where \"home\" %}[<a href=\"/m/u-/home\">Home</a>]{% else %}<a href=\"/m/u-/home\">Home</a>{% endifequal %}{% ifequal where \"mentions\" %} | [<a href=\"/m/u-/at\">Mentions</a>]{% else %} | <a href=\"/m/u-/at\">Mentions</a>{% endifequal %}{% ifequal where \"favorites\" %} | [<a href=\"/m/u-/favs\">Favorites</a>]{% else %} | <a href=\"/m/u-/favs\">Favorites</a>{% endifequal %}{% ifequal where \"messages\" %} | [<a href=\"/m/m-inbox\">Messages</a>]{% else %} | <a href=\"/m/m-inbox\">Messages</a>{% endifequal %}</h2>" }, { "alpha_fraction": 0.6144772171974182, "alphanum_fraction": 0.6340482831001282, "avg_line_length": 28.370079040527344, "blob_id": "2d6d4db9528625453f8600b9d60e20f5e25557e4", "content_id": "9dc364921105fe8107ccd6c49f3ecef9159e9b11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3748, "license_type": "no_license", "max_line_length": 144, "num_lines": 127, "path": "/templatetags/string.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom django.utils import simplejson as json\nfrom django.utils.safestring import mark_safe\nfrom django.template.defaultfilters import stringfilter\n\nimport ttp\nimport utils\n\nimport time\nimport calendar\nimport rfc822\nimport htmllib\nimport urllib\n\nregister = webapp.template.create_template_register()\n\[email protected]\n@stringfilter\ndef twitter_text_py(text):\n p = ttp.Parser()\n return p.parse(text).html\n\[email protected]\n@stringfilter\ndef tweet_id_encode(text):\n return utils.tweet_id_encode(text)\ntweet_id_encode.is_safe=True\n\[email protected]\n@stringfilter\ndef tweet_id_decode(text):\n return utils.tweet_id_decode(text)\ntweet_id_decode.is_safe=True\n\ndef _m_escape(text):\n return ''.join({'&':'&#38;', '\"':'&#34;', '\\'':'&#39;', '>':'&#62;', '<':'&#60;'}.get(c, c) for c in text)\ndef _m_format_tag(tag, text):\n return '<a href=\"/a/search?q=%s\">%s%s</a>' % (urllib.quote('#' + text.encode('utf-8')), tag, text)\ndef _m_format_username(at_char, user):\n return '<a href=\"/m/u-%s\">%s%s</a>' % (user, at_char, user)\ndef _m_format_list(at_char, user, list_name):\n return '<a href=\"/m/l-%s/%s\">%s%s/%s</a>' % (user, list_name, at_char, user, list_name)\ndef _m_google_format_url(url, text):\n return '<a target=\"_blank\" href=\"http://www.google.com/gwt/n?u=%s\">%s</a>' % (urllib.quote(_m_escape(url).encode('utf-8')), text)\ndef _m_baidu_format_url(url, text):\n return '<a target=\"_blank\" href=\"http://gate.baidu.com/tc?from=opentc&src=%s\">%s</a>' % (urllib.quote(_m_escape(url).encode('utf-8')), text)\ndef _m_format_url(url, text):\n return '<a target=\"_blank\" href=\"%s\">%s</a>' % (_m_escape(url), text)\n\[email protected]\n@stringfilter\ndef m_twitter_text(text, op=None):\n p = ttp.Parser()\n p.format_tag=_m_format_tag\n p.format_username=_m_format_username\n p.format_list=_m_format_list\n if op=='google-gwt':\n p.format_url=_m_google_format_url\n elif op=='baidu-gate':\n p.format_url=_m_baidu_format_url\n else:\n p.format_url=_m_format_url\n return p.parse(text).html\n\[email protected]\n@stringfilter\ndef human_readable(date_str):\n '''Get a human redable string representing the posting time\n\n Returns:\n A human readable string representing the posting time\n '''\n if not date_str:\n return ''#TODO 似乎要仔细检查啊。\n fudge = 1.25\n delta = long(time.time()) - long(calendar.timegm(rfc822.parsedate(date_str)))\n\n if delta < (1 * fudge):\n return 'a second ago'\n elif delta < (60 * (1/fudge)):\n return '%d seconds ago' % (delta)\n elif delta < (60 * fudge):\n return 'a minute ago'\n elif delta < (60 * 60 * (1/fudge)):\n return '%d minutes ago' % (delta / 60)\n elif delta < (60 * 60 * fudge):\n return 'about an hour ago'\n elif delta < (60 * 60 * 24 * (1/fudge)):\n return 'about %d hours ago' % (delta / (60 * 60))\n elif delta < (60 * 60 * 24 * fudge):\n return 'about a day ago'\n else:\n return 'about %d days ago' % (delta / (60 * 60 * 24))\nhuman_readable.is_safe=True\n\[email protected]\n@stringfilter\ndef time_format(date_str, fmt_str=\"%Y-%m-%d\"):\n try:\n dtp=rfc822.parsedate(date_str)\n return time.strftime(fmt_str, dtp)\n except:\n return None\ntime_format.is_safe=True\n\[email protected]\n@stringfilter\ndef milliseconds(date_str):\n dtp=rfc822.parsedate(date_str)\n if dtp:\n return long(time.mktime(dtp)*1000)\n else:\n return None\nmilliseconds.is_safe=True\n\[email protected]\n@stringfilter\ndef unescape(s):\n p = htmllib.HTMLParser(None)\n p.save_bgn()\n p.feed(s)\n return p.save_end()\n\[email protected]\ndef to_json(obj):\n return mark_safe(json.dumps(obj))\n" }, { "alpha_fraction": 0.7530864477157593, "alphanum_fraction": 0.8240740895271301, "avg_line_length": 39, "blob_id": "fda4583bc4df90f1d148b7d28fd2e991abd9a1bc", "content_id": "afcd70387fed5e94e38a988d3c70672fb4721ff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 612, "license_type": "no_license", "max_line_length": 92, "num_lines": 8, "path": "/README.markdown", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "## 关于twitdao11 \n\nfork自 https://github.com/tongsu/twitdao/ 2013年6月12日twitter关闭了API 1,只能使用API1.1,我做了一些修改来适应这些变化\n\n## twitdao11与twitdao的不同之处\n1. 修改API调用,除了list部分(太复杂我又不常用就不弄了)和一些api1.1不再支持的功能,大部分功能可以使用\n2. 提高自动刷新的间隔,降低自动刷新频率,防止不断出现rate-limit问题(15分钟15次最多)\n3. 添加了一些try except,修改一些小细节,在api出现兼容性问题/网络不好的情况下可以保持显示/重新fetch而不是报错.\n\n\n\n\n" }, { "alpha_fraction": 0.5422885417938232, "alphanum_fraction": 0.552158534526825, "avg_line_length": 39.72549057006836, "blob_id": "053b496124c1564a235caf35f689ce22e92a3419", "content_id": "afd4e9cb4ba8488cf6e1cd790fc7a4bb2bfe524a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12462, "license_type": "no_license", "max_line_length": 226, "num_lines": 306, "path": "/templatetags/entities.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom django.template.defaultfilters import stringfilter\nfrom google.appengine.api import urlfetch\nfrom django.utils import simplejson as json\n#from google.appengine.api import memcache\n\nimport re\nimport urllib\nimport ttp\n\nregister = webapp.template.create_template_register()\n\n_twitpic=re.compile('https?://twitpic\\.com/(?P<id>[0-9a-zA-Z]+)', re.I)\n_twitgoo=re.compile('https?://twitgoo\\.com/(?P<id>[0-9a-zA-Z]+)', re.I)\n_imgly=re.compile('https?://img\\.ly/(?P<id>[0-9a-zA-Z]+)', re.I)\n_yfrog=re.compile('https?://yfrog\\.(?P<tail>[^/]+)(/[a-z])?/(?P<id>[0-9a-zA-Z]{2,})', re.I)\n_flic_kr=re.compile('https?://flic\\.kr/p/(?P<id>[0-9a-zA-Z]+)', re.I)\n_flickr_com=re.compile('https?://(www\\.|)flickr\\.com/photos/[0-9a-zA-Z_]+/(?P<id>[0-9]+)', re.I)\n_youtu_be=re.compile('https?://youtu\\.be/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_youtube_com=re.compile('https?://(www\\.|)youtube\\.com/((watch\\?v=)|(v/))(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_moby_to=re.compile('https?://moby\\.to/(?P<id>[0-9a-zA-Z]+)', re.I)\n_instagram=re.compile('https?://instagr\\.am/p/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_instagramcom=re.compile('https?://instagram\\.com/p/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_picplz=re.compile('https?://picplz\\.com/(?P<id>[0-9a-zA-Z]+)', re.I)\n_plixi=re.compile('https?://plixi\\.com/p/(?P<id>[0-9a-zA-Z]+)', re.I)\n\n_youku=re.compile('https?://v\\.youku\\.com/v_show/id_(?P<id>[0-9a-zA-Z_\\-=]+)\\.html', re.I)\n_tudou=re.compile('https?://(www\\.|)tudou\\.com/programs/view/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_56=re.compile('https?://(www\\.|)56\\.com/([0-9a-zA-Z]+)/v_(?P<id>[0-9a-zA-Z_\\-]+)\\.html', re.I)\n_ku6=re.compile('https?://v\\.ku6\\.com/show/(?P<id>[0-9a-zA-Z_\\-]+)\\.html', re.I)\n\n_bitly = re.compile('http://bit\\.ly/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_jmp = re.compile('http://j\\.mp/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_tco = re.compile('http://t\\.co/(?P<id>[a-z0-9]*)', re.I)\n_tcn = re.compile('http://t\\.cn/(?P<id>[a-z0-9]*)', re.I)\n\n_isgd = re.compile('http://is\\.gd/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n_googl = re.compile('http://goo\\.gl/(?P<id>[0-9a-zA-Z_\\-]{3,})', re.I)\n_googlfb = re.compile('http://goo\\.gl/fb/(?P<id>[0-9a-zA-Z_\\-]+)', re.I)\n\[email protected]\n@stringfilter\ndef image_preview(url):\n ''' show photo thumbnails '''\n try:\n url,is_short= url_unshort(url)\n except:\n return '<span class=\"unshorturl\"><a href=\"%s\" target=\"_blank\" rel=\"noreferrer\">%s</a></span>' % (url,url)\n m=_twitpic.search(url)\n if m:\n twitpic_id=m.group('id')\n if twitpic_id.lower() in ['photos','events','places','widgets','upload','account','logout','doc']:\n return ''\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/twitpic/%s/%s\" /></a>' % ( url, 'thumb', twitpic_id )\n\n m=_twitgoo.search(url)\n if m:\n twitgoo_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/twitgoo/%s/%s\" /></a>' % ( url, 'thumb', twitgoo_id )\n\n m=_imgly.search(url)\n if m:\n imgly_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/imgly/%s/%s\" /></a>' % ( url, 'medium', imgly_id )\n\n m=_yfrog.search(url)\n if m:\n yfrog_id=m.group('id')\n yfrog_tail=m.group('tail')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/yfrog/%s/%s\" /></a>' % ( url, yfrog_tail, yfrog_id )\n\n m=_flic_kr.search(url)\n if m:\n flickr_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/flickr/short/%s\" /></a>' % ( url, flickr_id )\n\n m=_flickr_com.search(url)\n if m:\n flickr_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/flickr/long/%s\" /></a>' % ( url, flickr_id )\n\n m=_youtu_be.search(url)\n if m:\n youtube_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/y2b/%s\" /></a>' % ( url, youtube_id )\n\n m=_youtube_com.search(url)\n if m:\n youtube_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/y2b/%s\" /></a>' % ( url, youtube_id )\n\n m=_moby_to.search(url)\n if m:\n moby_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/moby/thumb/%s\" /></a>' % ( url, moby_id )\n\n m=_instagram.search(url)\n if m:\n insid=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/instagram/%s\" width=\"550\" /></a>' % ( url, insid )\n m=_instagramcom.search(url)\n if m:\n insid=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/instagram/%s\" width=\"550\" /></a>' % ( url, insid )\n\n m=_picplz.search(url)\n if m:\n pic_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/picplz/%s\" /></a>' % ( url, pic_id )\n\n m=_plixi.search(url)\n if m:\n pic_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/plixi/%s\" /></a>' % ( url, pic_id )\n \n m=_youku.search(url)\n if m:\n youku_id=m.group('id')\n return '<embed src=\"http://player.youku.com/player.php/sid/%s/v.swf\" quality=\"high\" width=\"480\" height=\"400\" align=\"middle\" allowScriptAccess=\"sameDomain\" type=\"application/x-shockwave-flash\"></embed>' % youku_id\n \n m=_tudou.search(url)\n if m:\n tudou_id=m.group('id')\n return '<embed src=\"http://www.tudou.com/v/%s/v.swf\" type=\"application/x-shockwave-flash\" allowscriptaccess=\"always\" allowfullscreen=\"true\" wmode=\"opaque\" width=\"480\" height=\"400\"></embed>' % tudou_id\n\n m=_56.search(url)\n if m:\n _56_id=m.group('id')\n return '<embed src=\"http://player.56.com/v_%s.swf\" type=\"application/x-shockwave-flash\" width=\"480\" height=\"395\" allowNetworking=\"all\" allowScriptAccess=\"always\"></embed>' % _56_id\n\n m=_ku6.search(url)\n if m:\n ku6_id=m.group('id')\n return '<embed src=\"http://player.ku6.com/refer/%s/v.swf\" quality=\"high\" width=\"480\" height=\"400\" align=\"middle\" allowScriptAccess=\"always\" allowfullscreen=\"true\" type=\"application/x-shockwave-flash\"></embed>' % ku6_id\n\tif is_short == 1:\n\t\treturn '<span class=\"unshorturl\"><a href=\"%s\" target=\"_blank\" rel=\"noreferrer\">%s</a></span>' % (url,url)\n return '<span class=\"unshorturl\"><a href=\"%s\" target=\"_blank\" rel=\"noreferrer\">%s</a></span>' % (url,url)\n\ndef url_unshort(url):\n m=_bitly.search(url)\n if m:\n bitly_id=m.group('id')\n try:\n res=urlfetch.fetch('http://api.unshort.me/?r=http://bit.ly/%s&t=json' % bitly_id)\n url_json = json.loads(res.content)\n \n newurl = url_json['resolvedURL']\n if newurl != \"http://unshort.me\":\n return newurl,1\n except urlfetch.DownloadError:\n return url,0\n\n m=_jmp.search(url)\n if m:\n jmp_id=m.group('id')\n try:\n res=urlfetch.fetch('http://api.unshort.me/?r=http://j.mp/%s&t=json' % jmp_id)\n url_json = json.loads(res.content)\n \n newurl = url_json['resolvedURL']\n if newurl != \"http://unshort.me\":\n return newurl,1\n except urlfetch.DownloadError:\n return url,0\n\n m=_tco.search(url)\n if m:\n tco_id=m.group('id')\n try:\n res=urlfetch.fetch('http://api.unshort.me/?r=http://t.co/%s&t=json' % tco_id)\n url_json = json.loads(res.content)\n \n newurl = url_json['resolvedURL']\n if newurl != \"http://unshort.me\":\n return newurl,1\n except urlfetch.DownloadError:\n return url,0\n\n m=_tcn.search(url)\n if m:\n tcn_id=m.group('id')\n try:\n res=urlfetch.fetch('http://api.unshort.me/?r=http://t.cn/%s&t=json' % tcn_id)\n url_json = json.loads(res.content)\n \n newurl = url_json['resolvedURL']\n if newurl != \"http://unshort.me\":\n return newurl,1\n except urlfetch.DownloadError:\n return url,0\n\n m=_isgd.search(url)\n if m:\n isgd_id=m.group('id')\n try:\n res=urlfetch.fetch('http://api.unshort.me/?r=http://is.gd/%s&t=json' % isgd_id)\n url_json = json.loads(res.content)\n \n newurl = url_json['resolvedURL']\n if newurl != \"http://unshort.me\":\n return newurl,1\n except urlfetch.DownloadError:\n return url,0\n m=_googl.search(url)\n if m:\n googl_id=m.group('id')\n try:\n res=urlfetch.fetch('http://api.unshort.me/?r=http://goo.gl/%s&t=json' % googl_id)\n url_json = json.loads(res.content)\n \n newurl = url_json['resolvedURL']\n if newurl != \"http://unshort.me\":\n return newurl,1\n except urlfetch.DownloadError:\n return url,0\n m=_googlfb.search(url)\n if m:\n googl_id=m.group('id')\n try:\n res=urlfetch.fetch('http://api.unshort.me/?r=http://goo.gl/fb/%s&t=json' % googl_id)\n url_json = json.loads(res.content)\n \n newurl = url_json['resolvedURL']\n if newurl != \"http://unshort.me\":\n return newurl,1\n except urlfetch.DownloadError:\n return url,0\n return url,0\n\n\n#def get_url_cache(self, short_service, cache_id=None):\n\ndef _m_google_gwt_url(url):\n return 'http://www.google.com/gwt/n?u=%s' % urllib.quote(url)\ndef _m_baidu_gate_url(url):\n return 'http://gate.baidu.com/tc?from=opentc&src=%s' % urllib.quote(url)\ndef _m_media_url(url, op=None):\n if op=='google-gwt':\n url=_m_google_gwt_url(url)\n elif op=='baidu-gate':\n url=_m_baidu_gate_url(url)\n m=_twitpic.search(url)\n if m:\n twitpic_id=m.group('id')\n if twitpic_id.lower() in ['photos','events','places','widgets','upload','account','logout','doc']:\n return ''\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/twitpic/%s/%s\" /></a>' % ( url, 'thumb', twitpic_id )\n m=_twitgoo.search(url)\n if m:\n twitgoo_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/twitgoo/%s/%s\" /></a>' % ( url, 'thumb', twitgoo_id )\n m=_imgly.search(url)\n if m:\n imgly_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/imgly/%s/%s\" /></a>' % ( url, 'thumb', imgly_id )\n m=_yfrog.search(url)\n if m:\n yfrog_id=m.group('id')\n yfrog_tail=m.group('tail')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/yfrog/%s/%s\" /></a>' % ( url, yfrog_tail, yfrog_id )\n m=_flic_kr.search(url)\n if m:\n flickr_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/flickr/short/%s\" /></a>' % ( url, flickr_id )\n m=_flickr_com.search(url)\n if m:\n flickr_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/flickr/long/%s\" /></a>' % ( url, flickr_id )\n m=_youtu_be.search(url)\n if m:\n youtube_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/y2b/%s\" /></a>' % ( url, youtube_id )\n m=_youtube_com.search(url)\n if m:\n youtube_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/y2b/%s\" /></a>' % ( url, youtube_id )\n m=_moby_to.search(url)\n if m:\n moby_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/moby/thumb/%s\" /></a>' % ( url, moby_id )\n m=_instagram.search(url)\n if m:\n insid=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/instagram/%s\" /></a>' % ( url, insid )\n m=_picplz.search(url)\n if m:\n pic_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/picplz/%s\" /></a>' % ( url, pic_id )\n m=_plixi.search(url)\n if m:\n pic_id=m.group('id')\n return '<a href=\"%s\" target=\"_blank\" rel=\"noreferrer\"><img src=\"/i/plixi/%s\" /></a>' % ( url, pic_id )\n return None\n\[email protected]\n@stringfilter\ndef m_media_preview(text, op=None):\n p=ttp.Parser()\n urls=p.parse(text).urls\n medias=[]\n for url in urls:\n u=_m_media_url(url, op)\n if u:\n medias.append(u)\n return ''.join(medias)\n" }, { "alpha_fraction": 0.6622734665870667, "alphanum_fraction": 0.6853377223014832, "avg_line_length": 45.769229888916016, "blob_id": "e11e1c58a26b64f5a5f13f848669da646206fdf3", "content_id": "80120cde1bdb7da8d50cbadea11b555a5d187603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 607, "license_type": "no_license", "max_line_length": 168, "num_lines": 13, "path": "/templates/clean-up-accesses.html", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "{% extends \"config.html\" %}\n{% block head %}{% endblock head %}\n{% block main %}\n<div id=\"Config\">\n\t<div id=\"ConfigHeading\">Clean Up Accesses</div>\n\t<form action=\"/config/clean_up_accesses\" method=\"post\">\n\t<div style=\"width:80%;margin:0 auto;height:100px;padding:20px;\">\n\t<div style=\"color:#f00;font-size:18px;text-align:center;\">Warning: This action may costly! Are you sure to continue?</div>\n\t<div style=\"text-align:center;padding:20px;\"><input type=\"submit\" value=\"Continue\" /> <input type=\"button\" value=\"Cancel\" onclick=\"javascript:window.close();\" /></div>\n\t</div>\n\t</form>\n</div>\n{% endblock main %}" }, { "alpha_fraction": 0.5705048441886902, "alphanum_fraction": 0.5754650235176086, "avg_line_length": 34.727848052978516, "blob_id": "b09126b14613eac3aec1f260ce8e46c7fb17bf1b", "content_id": "91cdeb4a6ae2a329d2170def761ae0a59fc8c06a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11290, "license_type": "no_license", "max_line_length": 123, "num_lines": 316, "path": "/twitter.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport oauth\n\nfrom django.utils import simplejson as json\nfrom google.appengine.api import urlfetch\n\nimport urllib\nfrom cgi import parse_qsl\nimport mimetypes\nimport random\n\nimport logging\n\n#default configs\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\n\nREQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'\nACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'\n\nAUTHORIZE_URL = 'https://twitter.com/oauth/authorize'\nAUTHENTICATE_URL = 'https://twitter.com/oauth/authenticate'\n\nAPI_URL = 'https://api.twitter.com/1.1/'\nSEARCH_API_URL = 'https://api.twitter.com/1.1/search/'\n\n\nMAX_FETCH_COUNT = 5\n\n\n_http_methods={\n 'GET':urlfetch.GET,\n 'POST':urlfetch.POST,\n 'HEAD':urlfetch.HEAD,\n 'PUT':urlfetch.PUT,\n 'DELETE':urlfetch.DELETE\n}\n\ndef _generate_boundary(length=16):\n s = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_'\n a = []\n for i in range(length):\n a.append(random.choice(s))\n return ''.join(a)\n\nclass Twitter:\n\n def __init__(self,\n oauth_token=None,\n oauth_token_secret=None,\n consumer_key=CONSUMER_KEY,\n consumer_secret=CONSUMER_SECRET,\n request_token_url=REQUEST_TOKEN_URL,\n access_token_url=ACCESS_TOKEN_URL,\n authorize_url=AUTHORIZE_URL,\n authenticate_url=AUTHENTICATE_URL,\n api_url=API_URL,\n search_api_url=SEARCH_API_URL\n ):\n if oauth_token and oauth_token_secret:\n token = oauth.OAuthToken(oauth_token, oauth_token_secret)\n else:\n token = None\n self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)\n self._signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()\n self._oauth_token = token\n\n self.http_status=0\n self.http_headers={}\n self.http_body=''\n \n #api config\n self.request_token_url=request_token_url\n self.access_token_url=access_token_url\n self.authorize_url=authorize_url\n self.authenticate_url=authenticate_url\n self.api_url=api_url\n self.search_api_url=search_api_url\n\n\n def _get_content_type(self, filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\n\n def _encode_multipart_formdata(self, fields, files=[]):\n \"\"\"\n fields is a sequence of (name, value) elements for regular form fields.\n files is a sequence of (name, filename, value) elements for data to be uploaded as files\n Return (boundary, body)\n \"\"\"\n boundary=_generate_boundary()\n crlf = '\\r\\n'\n\n l = []\n for k, v in fields:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"' % k)\n l.append('')\n l.append(v)\n for (k, f, v) in files:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (k, f))\n l.append('Content-Type: %s' % self._get_content_type(f))\n l.append('')\n l.append(v)\n l.append('--' + boundary + '--')\n l.append('')\n body = crlf.join(l)\n return boundary, body\n\n\n def _fetch(self, method, url, params={}, headers={}, files=None):\n payload=None\n if method.upper() in ['POST','PUT']:\n if files and type(files) == list:\n boundary, payload = self._encode_multipart_formdata(params.items(), files)\n headers['Content-Type']='multipart/form-data; boundary=%s' % boundary\n else:\n payload=urllib.urlencode(params)\n try:\n res=urlfetch.fetch(url, payload, _http_methods[method.upper()], headers)\n except:\n self.http_status=500\n return ''\n self.http_status=res.status_code\n self.http_headers=res.headers\n self.http_body=res.content\n logging.debug('[Twitter] Response Headers: %s' % res.headers)\n return res.content\n\n\n def _extend_fetch(self, method, url, params={}, headers={}, files=None):\n http_body=''\n for count in range(MAX_FETCH_COUNT):\n try:\n http_body = self._fetch(method, url, params, headers, files)\n if self.http_status!=200:\n logging.debug('[HTTP Status %s] body %s' % (self.http_status, http_body) )\n if self.http_status in range(499, 600):\n continue\n logging.debug('[Twitter] fetch count: %s ' % str(count+1))\n return http_body\n except urlfetch.DownloadError, e:\n logging.warning('[Twitter] urlfetch: %s' % e)\n continue\n raise Exception('Max fetch count exceeded.')\n\n\n def oauth_request(self, url, params={}, method = 'GET', files=None):\n oauth_request = oauth.OAuthRequest.from_consumer_and_token(\n self._consumer,\n self._oauth_token,\n http_url=url,\n http_method=method,\n parameters = params if not files else {}\n )\n oauth_request.sign_request(\n self._signature_method,\n self._consumer,\n self._oauth_token\n )\n\n if method.upper() == 'GET':\n resp = self._extend_fetch(method, oauth_request.to_url())\n else:\n resp = self._extend_fetch(\n method,\n oauth_request.get_normalized_http_url(),\n params,\n headers=oauth_request.to_header(),\n files=files\n )\n return resp\n\n\n def fetch_request_token(self, callback=None):\n \"\"\"returns {'oauth_token':'the-request-token',\n 'oauth_token_secret':'the-request-secret',\n 'oauth_callback_confirmed':'true'}\"\"\"\n param = {}\n if callback:\n param.update({'oauth_callback':callback})\n response_body = self.oauth_request(self.request_token_url, param)\n request_token = dict(parse_qsl(response_body))\n\n if 'oauth_token' not in request_token:\n return None\n\n self._oauth_token = oauth.OAuthToken(\n request_token['oauth_token'],\n request_token['oauth_token_secret']\n )\n return request_token\n\n\n def fetch_access_token(self, verifier):\n \"\"\"returns {'oauth_token':'the-access-token',\n 'oauth_token_secret':'the-access-secret',\n 'user_id':'1234567',\n 'screen_name':'darasion'}\"\"\"\n param = {}\n param.update({'oauth_verifier':verifier})\n response_body = self.oauth_request(self.access_token_url, param, 'POST')\n \n access_token = dict(parse_qsl(response_body))\n\n if 'oauth_token' not in access_token:\n return None\n\n self._oauth_token = oauth.OAuthToken(\n access_token['oauth_token'],\n access_token['oauth_token_secret']\n )\n return access_token\n\n\n def get_authenticate_url(self, request_token, force_login=False):\n if force_login:\n return \"%s?oauth_token=%s&force_login=true\" % (self.authenticate_url, request_token['oauth_token'])\n else:\n return \"%s?oauth_token=%s\" % (self.authenticate_url, request_token['oauth_token'])\n\n\n def get_authorize_url(self, request_token, force_login=False):\n if force_login:\n return \"%s?oauth_token=%s&force_login=true\" % (self.authorize_url, request_token['oauth_token'])\n else:\n return \"%s?oauth_token=%s\" % (self.authorize_url, request_token['oauth_token'])\n\n\n def api_call(self, http_method, api_method, parameters={}, files=None):\n try:\n return json.loads(self.oauth_request(''.join([\n self.api_url,\n api_method,\n '.json'\n ]), parameters, http_method, files))\n except:\n logging.warning('[Twitter] Still cant handle: Status: %s, Body: %s' % (self.http_status, self.http_body))\n raise\n\n\n def get_users_profile_image_url(self, screen_name, size='normal'):\n res=urlfetch.fetch('%s/users/profile_image/%s?size=%s' % (self.api_url, screen_name, size), follow_redirects=False)\n if res.status_code == 302 or res.status_code == 301:\n return res.headers['location']\n return None\n\n def search_api_call(self, q, **params):\n pms={'q':q}\n pms.update(params)\n data = urllib.urlencode(pms)\n return json.loads(urllib.urlopen(''.join([self.search_api_url, 'tweets.json']), data).read())\n\n def hacked_search(self, q, since_id=None, page=None):\n # since_id, page(next_page)\n # include_entities=1, contributor_details=true, domain=https://twitter.com, format=phoenix\n pms={\n 'q':q,\n 'include_entities':'1',\n 'contributor_details':'true',\n 'format':'phoenix',\n 'domain':'https://twitter.com'\n }\n if since_id:\n pms['since_id']=since_id\n if page:\n pms['page']=page\n pms['rpp']=200\n\n data = urllib.urlencode(pms)\n url=\"https://twitter.com/phoenix_search.phoenix\"\n\n res = json.loads(self.oauth_request(''.join([url, '?', data]), pms, 'GET'))\n try:\n logging.debug('RateLimit Class: %s' % self.http_headers['X-RateLimit-Class'])\n logging.debug('RateLimit Limit: %s' % self.http_headers['X-RateLimit-Limit'])\n logging.debug('RateLimit Remaining: %s' % self.http_headers['X-RateLimit-Remaining'])\n logging.debug('RateLimit Reset: %s' % self.http_headers['X-RateLimit-Reset'])\n except:\n pass\n return res\n\n def hacked_following_followers_of(self, user_id):\n # Also followed by.\n # user_id, cursor=-1\n pms={'user_id':user_id,'cursor':'-1'}\n qs = urllib.urlencode(pms)\n url='https://twitter.com/users/following_followers_of.json'\n\n res = json.loads(self.oauth_request(''.join([url, '?', qs]), pms, 'GET'))\n try:\n logging.debug('RateLimit Class: %s' % self.http_headers['X-RateLimit-Class'])\n logging.debug('RateLimit Limit: %s' % self.http_headers['X-RateLimit-Limit'])\n logging.debug('RateLimit Remaining: %s' % self.http_headers['X-RateLimit-Remaining'])\n logging.debug('RateLimit Reset: %s' % self.http_headers['X-RateLimit-Reset'])\n except:\n pass\n return res\n\n def hacked_follows_in_common_with(self, user_id):\n # You both follow.\n # user_id, cursor=-1\n pms={'user_id':user_id,'cursor':'-1'}\n qs = urllib.urlencode(pms)\n url='https://twitter.com/users/follows_in_common_with.json'\n\n res = json.loads(self.oauth_request(''.join([url, '?', qs]), pms, 'GET'))\n try:\n logging.debug('RateLimit Class: %s' % self.http_headers['X-RateLimit-Class'])\n logging.debug('RateLimit Limit: %s' % self.http_headers['X-RateLimit-Limit'])\n logging.debug('RateLimit Remaining: %s' % self.http_headers['X-RateLimit-Remaining'])\n logging.debug('RateLimit Reset: %s' % self.http_headers['X-RateLimit-Reset'])\n except:\n pass\n return res\n" }, { "alpha_fraction": 0.6647116541862488, "alphanum_fraction": 0.6744868159294128, "avg_line_length": 30.96875, "blob_id": "cdfc0cf819cdd1b36f744081206bd255ec7a4de0", "content_id": "5543d6d4f059cf6fa33a2feb01c0b0433c7739fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1023, "license_type": "no_license", "max_line_length": 79, "num_lines": 32, "path": "/templatetags/tags.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "from google.appengine.ext import webapp\nregister = webapp.template.create_template_register()\n\nfrom django.template import Node\nfrom django.template import TemplateSyntaxError, VariableDoesNotExist, Variable\n\nfrom datetime import datetime\n\nimport rfc822\n\[email protected]\ndef tweet_stats(parser, token):\n try:\n tag_name, tweet_count, created_at=token.split_contents()\n except ValueError, e:\n raise TemplateSyntaxError(e)\n return TweetStatsNode(tweet_count, created_at)\n\nclass TweetStatsNode(Node):\n def __init__(self, tweet_count, created_at):\n self.tweet_count=Variable(tweet_count)\n self.created_at=Variable(created_at)\n def render(self, context):\n try:\n tweet_count=self.tweet_count.resolve(context)\n created_at=self.created_at.resolve(context)\n tc=float(tweet_count)\n ca=datetime(*rfc822.parsedate(created_at)[0:6])\n ts=tc/(datetime.now()-ca).days\n except:\n return 'NaN'\n return '%9.2f' % ts\n" }, { "alpha_fraction": 0.6301223635673523, "alphanum_fraction": 0.6310257315635681, "avg_line_length": 28.772615432739258, "blob_id": "8a418c78ddf4f972a48e6de6deccf1c673d455ce", "content_id": "4a3d8cd7a80df3ac1a2f85e01be8a0b9809f4818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12471, "license_type": "no_license", "max_line_length": 103, "num_lines": 409, "path": "/md.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "GB18030", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\n\nimport hashlib\nimport logging\nimport sys\nimport pickle\n\n\n\n_app_config_cache=None\n\nclass AppConfig(db.Model):\n consumer_key = db.StringProperty(default='')\n consumer_secret = db.StringProperty(default='')\n\n request_token_url = db.StringProperty(default='https://api.twitter.com/oauth/request_token')\n access_token_url = db.StringProperty(default='https://api.twitter.com/oauth/access_token')\n\n authorize_url = db.StringProperty(default='https://twitter.com/oauth/authorize')\n authenticate_url = db.StringProperty(default='https://twitter.com/oauth/authenticate')\n\n api_url = db.StringProperty(default='https://api.twitter.com/1.1/')\n search_api_url = db.StringProperty(default='https://api.twitter.com/1.1/search/')\n\n twitpic_api_key = db.StringProperty(default='')\n\ndef set_app_config(\n consumer_key=None,\n consumer_secret=None, \n request_token_url=None,\n access_token_url=None,\n authorize_url=None,\n authenticate_url=None,\n api_url=None,\n search_api_url=None,\n twitpic_api_key=None,\n):\n global _app_config_cache\n params={'key_name':'app_config'}\n if consumer_key:\n params['consumer_key'] = consumer_key\n if consumer_secret:\n params['consumer_secret'] = consumer_secret\n if request_token_url:\n params['request_token_url'] = request_token_url\n if access_token_url:\n params['access_token_url'] = access_token_url\n if authorize_url:\n params['authorize_url'] = authorize_url\n if authenticate_url:\n params['authenticate_url'] = authenticate_url\n if api_url:\n params['api_url'] = api_url\n if search_api_url:\n params['search_api_url'] = search_api_url\n if twitpic_api_key:\n params['twitpic_api_key'] = twitpic_api_key\n app_config = AppConfig(**params)\n logging.debug('[App Config] Set: %s' % params)\n app_config.put()\n _app_config_cache = app_config\n memcache.set('app_config', app_config)\n return app_config\n\ndef get_app_config():\n global _app_config_cache\n if _app_config_cache:\n logging.debug('[MD] hit _app_config_cache %s' % _app_config_cache)\n return _app_config_cache\n app_config = memcache.get('app_config')\n _app_config_cache = app_config\n if not app_config:\n app_config = AppConfig.get_by_key_name('app_config')\n if not app_config:\n return set_app_config()\n _app_config_cache = app_config\n memcache.set('app_config', app_config)\n return app_config\n\n\n\n_image_proxy_config_cache=None\n\nclass ImageProxyConfig(db.Model):\n flickr_api_key = db.StringProperty(default='')\n flickr_api_secret = db.StringProperty(default='')\n flickr_rest_api_url = db.StringProperty(default='http://api.flickr.com/services/rest/')\n\ndef set_image_proxy_config(\n flickr_api_key=None,\n flickr_api_secret=None,\n flickr_rest_api_url=None\n):\n global _image_proxy_config_cache\n params={'key_name':'image_proxy_config'}\n if flickr_api_key:\n params['flickr_api_key'] = flickr_api_key\n if flickr_api_secret:\n params['flickr_api_secret'] = flickr_api_secret\n if flickr_rest_api_url:\n params['flickr_rest_api_url'] = flickr_rest_api_url\n image_proxy_config = ImageProxyConfig(**params)\n logging.debug('[ImageProxy Config] Set: %s' % params)\n image_proxy_config.put()\n _image_proxy_config_cache = image_proxy_config\n memcache.set('image_proxy_config', image_proxy_config)\n return image_proxy_config\n\ndef get_image_proxy_config():\n global _image_proxy_config_cache\n if _image_proxy_config_cache:\n return _image_proxy_config_cache\n image_proxy_config = memcache.get('image_proxy_config')\n _image_proxy_config_cache = image_proxy_config\n if not image_proxy_config:\n image_proxy_config = ImageProxyConfig.get_by_key_name('image_proxy_config')\n if not image_proxy_config:\n return set_image_proxy_config()\n _image_proxy_config_cache = image_proxy_config\n memcache.set('image_proxy_config', image_proxy_config)\n return image_proxy_config\n\n\n\nclass PickledProperty(db.Property):\n data_type = db.Blob\n\n def get_value_for_datastore(self, model_instance):\n value = self.__get__(model_instance, model_instance.__class__)\n if value is not None:\n return db.Blob(pickle.dumps(value))\n\n def make_value_from_datastore(self, value):\n if value is not None:\n return pickle.loads(str(value))\n\n\nclass TwitdaoUser(db.Model):\n app_user = db.UserProperty(auto_current_user_add=True)\n default_token = db.ReferenceProperty(default=None)\n\n def __str__(self):\n return str(self.app_user)\n\n\n_default_token_settings={\n 'show_media':True,\n 'm_show_avatar':False,\n 'm_show_media':False,\n 'm_optimizer':None\n}\n\nclass AccessToken(db.Model):\n #twitdao info\n twitdao_user = db.ReferenceProperty(reference_class=TwitdaoUser, collection_name=\"access_tokens\")\n first_auth_at = db.DateTimeProperty(auto_now_add=True)\n last_auth_at = db.DateTimeProperty(auto_now=True)\n settings = PickledProperty(default=_default_token_settings)\n #access token\n user_id = db.IntegerProperty()\n screen_name = db.StringProperty()\n oauth_token = db.StringProperty()\n oauth_token_secret = db.StringProperty()\n\n def __str__(self):\n return '(%s, %s, key=%s)' % (self.user_id, self.screen_name, self.key())\n\nclass NoUserError(Exception):\n '''Raise when we can't find any user.'''\n pass\n\ndef _default_app_user():\n app_user = users.get_current_user()\n if not app_user:\n raise NoUserError('Have you logged in?')\n return app_user\n\ndef _app_user_key(app_user=None):\n '''Identifier of the user. '''\n if not app_user:\n app_user = _default_app_user()\n return 'token-%s-%s-%s-%s-%s' % (\n app_user.nickname(),\n app_user.email(),\n app_user.user_id(),\n app_user.federated_identity(),\n app_user.federated_provider()\n )\n\n\ndef set_default_access_token(access_token, app_user=None):\n '''\n 设置app_user的默认access token.\n '''\n if not app_user:\n app_user = _default_app_user()\n twitdao_user = TwitdaoUser.all().filter('app_user =', app_user).get()\n twitdao_user.default_token = access_token\n twitdao_user.put()\n \n default_key = _app_user_key(app_user)\n memcache.set( default_key, access_token)\n return access_token\n\n\ndef get_access_tokens(size=50, cursor=None):\n '''\n 获取 access tokens.\n\n 返回 token 列表和一个 cursor.\n 如果返回的 cursor!=None, 则仍有更多tokens; 如果返回的 cursor==None, 则token已经取尽.\n '''\n q=AccessToken.all()\n if cursor:\n q.with_cursor(cursor)\n tokens=q.fetch(size)\n next_cursor=q.cursor()\n if len(tokens)<size:\n next_cursor = None\n return tokens, next_cursor\n\n\ndef get_user_access_tokens(app_user=None, size=10, cursor=None):\n '''\n 获取 app_user 的 access tokens.\n\n 返回 token 列表和一个 cursor.\n 如果返回的 cursor!=None, 则仍有更多tokens; 如果返回的 cursor==None, 则token已经取尽.\n\n 如果未指定 app_user 则默认app_user就是当前登录用户。\n '''\n if not app_user:\n app_user = _default_app_user()\n tdu = TwitdaoUser.all().filter('app_user =', app_user).get()\n\n next_cursor=None\n tokens=None\n if tdu:\n if cursor:\n q=tdu.access_tokens.with_cursor(cursor)\n else:\n q=tdu.access_tokens\n tokens=q.fetch(size)\n next_cursor=q.cursor()\n else:\n return None,None\n\n if len(tokens)<size:\n next_cursor = None\n\n return tokens, next_cursor\n\n\ndef get_default_access_token(app_user=None):\n '''\n 获取 app_user 的默认 access token.\n\n 如果未指定 app_user 则默认app_user就是当前登录用户。\n '''\n if not app_user:\n app_user = _default_app_user()\n default_key = _app_user_key(app_user)\n token = memcache.get(default_key)\n if not token:\n twitdao_user = TwitdaoUser.all().filter('app_user =', app_user).get()\n if twitdao_user:\n # Try to prevent the \"ReferenceProperty failed to be resolved\" error.\n try:\n token = twitdao_user.default_token\n if not token: return None\n memcache.set_multi({str(token.key()):token, default_key:token})\n except:\n logging.warning('Exception: %s' % sys.exc_info()[0])\n return None\n else:\n return None\n return token\n\n\ndef get_access_token(token_key=None, app_user=None):\n '''\n 获取token_key所代表的access token.\n\n 如果指定了 app_user , 则只取 app_user 的 access token\n 否则直接取得 access_token。\n '''\n if app_user:\n token=memcache.get(str(token_key))\n if not token:\n token = AccessToken.get(token_key)\n \n if not token:\n return None\n elif token.twitdao_user.app_user != app_user:\n return None\n else:\n memcache.set(str(token_key),token)\n return token\n else:\n token=memcache.get(str(token_key))\n if not token:\n token = AccessToken.get(token_key)\n if not token: return None\n memcache.set(str(token_key),token)\n return token\n\n\ndef save_access_token(\n user_id,\n screen_name,\n oauth_token,\n oauth_token_secret,\n app_user\n):\n tdu = TwitdaoUser.all().filter('app_user =', app_user).get()\n if not tdu:\n tdu = TwitdaoUser()\n tdu.put()\n\n tk = tdu.access_tokens.filter('user_id =', long(user_id)).get()\n if tk:\n tk.screen_name=screen_name\n tk.oauth_token=oauth_token\n tk.oauth_token_secret=oauth_token_secret\n tk.twitdao_user=tdu\n tk.put()\n else:\n tk = AccessToken(\n app_user = app_user,\n twitdao_user=tdu,\n user_id=long(user_id),\n screen_name=screen_name,\n oauth_token=oauth_token,\n oauth_token_secret=oauth_token_secret\n )\n tk.put()\n\n # Set the token as default only if default_token is None or the Error is raised.\n try:\n # Try to prevent the \"ReferenceProperty failed to be resolved\" error.\n if not tdu.default_token:\n tdu.default_token = tk\n tdu.put()\n except:\n logging.warning('Exception: %s' % sys.exc_info()[0])\n tdu.default_token = tk\n tdu.put()\n\n return tk\n\n\ndef delete_access_token(token_key=None, app_user=None):\n '''\n 删除token_key所代表的 access token.\n\n 如果指定了 app_user, 则只删除app_user 的 access token.\n 否则直接删除 access token.\n '''\n token = AccessToken.get(token_key)\n if not token:\n return None\n\n if not app_user:\n memcache.delete_multi(keys=[str(token_key), _app_user_key(token.twitdao_user.app_user)])\n token.delete()\n elif token.twitdao_user.app_user != app_user:\n return None\n else:\n memcache.delete_multi(keys=[str(token_key), _app_user_key(app_user)])\n token.delete()\n return token\n\n\ndef _cleanup_settings(settings):\n if not isinstance(settings, dict):\n return _default_token_settings\n skeys=settings.keys()\n for k in skeys:\n if k not in _default_token_settings:\n del settings[k]\n return settings\n\ndef set_token_settings(token_key, app_user=None, **settings):\n token = AccessToken.get(token_key)\n if not token:\n return None\n\n if not app_user:\n settings=_cleanup_settings(settings)\n old_settings=_cleanup_settings(token.settings)\n old_settings.update(settings)\n token.settings=old_settings\n memcache.delete_multi({str(token_key):token, _app_user_key(token.twitdao_user.app_user):token})\n token.put()\n elif token.twitdao_user.app_user != app_user:\n return None\n else:\n settings=_cleanup_settings(settings)\n old_settings=_cleanup_settings(token.settings)\n old_settings.update(settings)\n token.settings=old_settings\n memcache.delete_multi({str(token_key):token, _app_user_key(app_user):token})\n token.put()\n\ndef get_proxy_access_token():\n\treturn get_access_token('agdnYWUtdHVpchILEgtBY2Nlc3NUb2tlbhipRgw','')\n" }, { "alpha_fraction": 0.5605130791664124, "alphanum_fraction": 0.5649749040603638, "avg_line_length": 28.393442153930664, "blob_id": "d3ba4416af61d835b12ba54da835e6d281d29bd5", "content_id": "3773150106621c22adf20afcafcf53ce3563e122", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1793, "license_type": "no_license", "max_line_length": 101, "num_lines": 61, "path": "/user.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import taskqueue\n\nfrom base import BaseHandler\nfrom twitdao import Twitdao\n\nimport md\n\nimport urllib\n\nclass ShowUserTimeline(BaseHandler):\n def get(self, screen_name):\n\n params = self.params([\n 'user_id',\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities',\n ],include_rts='true')\n #if screen_name== '':\n # self.redirect('/')\n # return\n token = md.get_proxy_access_token()\n #if not token:\n # self.redirect('/')\n # return\n\n td = Twitdao(token)\n owner_user = td.users_show_by_screen_name( screen_name=screen_name )\n token_user = td.users_show_by_id(user_id = token.user_id)\n friendship = td.friendships_show(source_id=token.user_id, target_screen_name=screen_name)\n timeline = td.user_timeline(screen_name=screen_name, **params)\n\n\n self.render('user-timeline-proxy.html', {\n 'token':token,\n #'token_user':'twittertwitter',# token_user\n 'owner_user':owner_user,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'timeline':timeline,\n #'friendship':friendship,\n 'where':'user',\n })\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/user/([0-9a-zA-Z_]+)', ShowUserTimeline),\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5381718277931213, "alphanum_fraction": 0.5412734150886536, "avg_line_length": 31.103839874267578, "blob_id": "23dbb5316576fb141735852f319dd164ca2d203e", "content_id": "d84522a455b604f3c0b3d83524672ac07337e3cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45138, "license_type": "no_license", "max_line_length": 138, "num_lines": 1406, "path": "/main.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import taskqueue\n\nfrom base import BaseHandler\nfrom twitdao import Twitdao\n\nimport md\n\nimport urllib\n\n\n#Home\nclass HomeTimeline(BaseHandler):\n def get(self):\n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities'\n ])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n timeline = td.home_timeline(**params)\n limit_rate = td.API_limit_rate()\n\n self.render('home-timeline.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'limit_rate':limit_rate,\n 'where':'home'\n })\n\n\nclass Mentions(BaseHandler):\n def get(self):\n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities'\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return \n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n timeline = td.mentions(**params)\n\n self.render('mentions-timeline.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'where':'mentions'\n })\n\n\nclass Retweets(BaseHandler):\n def get(self, which):\n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_entities'\n ])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return \n\n td = Twitdao(token)\n timeline=[]\n if which == 'retweeted_by_me':\n timeline = td.retweeted_by_me(**params)\n title = \"retweeted by me\"\n elif which == 'retweeted_to_me':\n timeline = td.retweeted_to_me(**params)\n title = \"retweeted to me\"\n elif which == 'retweeted_of_me':\n timeline = td.retweets_of_me(**params)\n title = \"retweeted of me\"\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n self.render('retweets-timeline.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'where':which,\n 'which':which,\n 'title':title,\n })\n\n\nclass Retweet(BaseHandler):\n def get(self, id):\n params = self.params(['trim_user','include_entities'])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('retweet.html', {\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self, id):\n params = self.params(['trim_user','include_entities'])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n tweet = td.statuses_retweet(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n\n self.redirect('/t')\n\n\nclass UserTimeline(BaseHandler):\n def get(self, screen_name):\n\n params = self.params([\n 'user_id',\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities',\n ],include_rts='true')\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n owner_user = td.users_show_by_screen_name( screen_name=screen_name )\n token_user = td.users_show_by_id(user_id = token.user_id)\n friendship = td.friendships_show(source_id=token.user_id, target_screen_name=screen_name)\n timeline = td.user_timeline(screen_name=screen_name, **params)\n\n self.render('user-timeline.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'timeline':timeline,\n 'friendship':friendship,\n 'where':'user',\n })\n\n\nclass UpdateStatus(BaseHandler):\n\n def get(self):\n\n screen_name = self.param('screen_name')\n status_id = self.param('status_id')\n\n params = self.params(['trim_user','include_entities'])\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n if screen_name:\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n self.render('reply.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'screen_name':screen_name,\n })\n else:\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=status_id,**params)\n \n self.render('reply.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self):\n status = self.param('status')\n \n params = self.params([\n 'in_reply_to_status_id',\n 'lat',\n 'long',\n 'place_id',\n 'display_coordinates',\n 'trim_user',\n 'include_entities',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n td = Twitdao(token)\n td.statuses_update(status=status.encode('utf-8'), **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.redirect('/t')\n\n\nclass ShowStatus(BaseHandler):\n def get(self, status_id):\n\n params = self.params(['trim_user','include_entities'])\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=status_id,**params)\n\n self.render('tweet-show.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n\nclass DeleteStatus(BaseHandler):\n def get(self, id):\n\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('tweet-delete.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n \n def post(self, id):\n \n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n tweet = td.statuses_destroy(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n\n self.redirect('/t')\n\n\nclass Followers(BaseHandler):\n def get(self, screen_name):\n \n params = self.params([\n 'user_id',\n 'cursor',\n 'include_entities',\n ], cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n followers = td.statuses_followers(screen_name=screen_name, **params)\n\n self.render('followers.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'error': followers['error'] if 'error' in followers else False,\n 'followers':followers if 'error' in followers else followers['users'],\n 'next_cursor':None if 'error' in followers else followers['next_cursor'],\n 'next_cursor_str':None if 'error' in followers else followers['next_cursor_str'],\n 'previous_cursor':None if 'error' in followers else followers['previous_cursor'],\n 'previous_cursor_str':None if 'error' in followers else followers['previous_cursor_str'],\n 'where':'followers',\n })\n\n\nclass Following(BaseHandler):\n def get(self, screen_name):\n\n params = self.params([\n 'user_id',\n 'cursor',\n 'include_entities',\n ], cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n following = td.statuses_friends(screen_name=screen_name, **params)\n \n self.render('following.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'error': following['error'] if 'error' in following else False,\n 'following':following if 'error' in following else following['users'],\n 'next_cursor':None if 'error' in following else following['next_cursor'],\n 'next_cursor_str':None if 'error' in following else following['next_cursor_str'],\n 'previous_cursor':None if 'error' in following else following['previous_cursor'],\n 'previous_cursor_str':None if 'error' in following else following['previous_cursor_str'],\n 'where':'following',\n })\n\n\nclass Follow(BaseHandler):\n def get(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n follow_user = td.users_show_by_screen_name(screen_name = screen_name)\n\n self.render('follow.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':follow_user,\n })\n\n def post(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n follow_user = td.friendships_create(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/t/%s?force_refresh=true' % screen_name)\n\n\n\nclass UnFollow(BaseHandler):\n def get(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n follow_user = td.users_show_by_screen_name(screen_name = screen_name)\n\n self.render('unfollow.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':follow_user,\n })\n\n def post(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n follow_user = td.friendships_destroy(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/t/%s?force_refresh=true' % screen_name)\n\n\n\nclass Block(BaseHandler):\n def get(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n block_user = td.users_show_by_screen_name(screen_name = screen_name)\n\n self.render('block.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':block_user,\n })\n\n def post(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n block_user = td.blocks_create(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/t/%s?force_refresh=true' % screen_name)\n\n\nclass UnBlock(BaseHandler):\n def get(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n block_user = td.users_show_by_screen_name(screen_name = screen_name)\n\n self.render('unblock.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':block_user,\n })\n\n def post(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n follow_user = td.blocks_destroy(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/t/%s?force_refresh=true' % screen_name)\n\n\n#Favorite\nclass Favorites(BaseHandler):\n def get(self, screen_name):\n\n params = self.params(['page', 'include_entities'])\n page = self.param('page')\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n favorites = td.favorites(id=screen_name, **params)\n\n prev_page, next_page = None, 2\n if page:\n try:\n page = int(page)\n prev_page = page-1 if page-1>0 else None\n next_page = page+1\n except:\n pass\n\n self.render('favorites.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'favorites':favorites,\n 'prev_page':prev_page,\n 'next_page':next_page,\n 'where':'favorites',\n })\n\n\nclass FavoritesDestroy(BaseHandler):\n def get(self, id):\n\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('unfavorite.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self, id):\n\n params = self.params(['include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n tweet = td.favorites_destroy(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.redirect('/t/%s/favorites' % token.screen_name)\n\n\nclass FavoritesCreate(BaseHandler):\n\n def get(self, id):\n\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('favorite.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self, id):\n\n params = self.params(['include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n tweet = td.favorites_create(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.redirect('/t/%s/favorites' % token.screen_name)\n\n\n#direct message\nclass DirectMessages(BaseHandler):\n def get(self):\n\n params = self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'include_entities',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n direct_messages = td.direct_messages(**params)\n\n self.render('messages.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'max_id':str(direct_messages[-1]['id']-1) if type(direct_messages)==list and len(direct_messages)>0 else None,\n 'since_id':direct_messages[0]['id_str'] if type(direct_messages)==list and len(direct_messages)>0 else None,\n 'messages':direct_messages,\n 'where':'inbox',\n })\n\n\nclass DirectMessagesSent(BaseHandler):\n def get(self):\n \n params = self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'include_entities',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n direct_messages = td.direct_messages_sent(**params)\n self.render('messages-sent.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'max_id':str(direct_messages[-1]['id']-1) if type(direct_messages)==list and len(direct_messages)>0 else None,\n 'since_id':direct_messages[0]['id_str'] if type(direct_messages)==list and len(direct_messages)>0 else None,\n 'messages':direct_messages,\n 'where':'sent',\n })\n\n\nclass DirectMessagesNew(BaseHandler):\n def get(self):\n\n screen_name = self.param('screen_name')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n self.render('message-new.html',{\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'screen_name':screen_name,\n })\n\n def post(self):\n screen_name = self.param('screen_name')\n user_id = self.param('user_id')\n text = self.param('text')\n\n params = self.params(['include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n message = td.direct_messages_new(user_id=user_id, screen_name=screen_name, text=text.encode('utf-8'), **params)\n\n self.redirect('/a/messages_sent')\n\n\nclass DirectMessagesDestroy(BaseHandler):\n def get(self, id):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n \n #No show single message api.\n message = None \n\n self.render('message-destroy.html',{\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'message':message,\n })\n\n def post(self, id):\n params = self.params(['include_entities'])\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n message = td.direct_messages_destroy(id=id, **params)\n self.redirect('/a/messages_sent')\n\n\nclass Lists(BaseHandler):\n def get(self, screen_name):\n\n params = self.params(['cursor'],cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n lists = td.user_lists_get(screen_name = screen_name, **params)\n\n self.render('lists.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'lists':lists['lists'], \n 'next_cursor':lists['next_cursor'],\n 'next_cursor_str':lists['next_cursor_str'],\n 'previous_cursor':lists['previous_cursor'],\n 'previous_cursor_str':lists['previous_cursor_str'],\n 'where':'lists',\n })\n\n\nclass ListsMemberships(BaseHandler):\n def get(self, screen_name):\n\n params = self.params(['cursor'],cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n lists = td.user_list_memberships(screen_name = screen_name, **params)\n\n self.render('lists-memberships.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'lists':lists['lists'], \n 'next_cursor':lists['next_cursor'],\n 'next_cursor_str':lists['next_cursor_str'],\n 'previous_cursor':lists['previous_cursor'],\n 'previous_cursor_str':lists['previous_cursor_str'],\n 'where':'list-memberships',\n })\n\n\nclass ListsSubscriptions(BaseHandler):\n def get(self, screen_name):\n\n params = self.params(['cursor'],cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n lists = td.user_list_subscriptions(screen_name = screen_name, **params)\n\n self.render('lists-subscriptions.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'lists':lists['lists'], \n 'next_cursor':lists['next_cursor'],\n 'next_cursor_str':lists['next_cursor_str'],\n 'previous_cursor':lists['previous_cursor'],\n 'previous_cursor_str':lists['previous_cursor_str'],\n 'where':'list-subscriptions',\n })\n\n\nclass ListTimeline(BaseHandler):\n def get(self, screen_name, slug ):\n params = self.params(['since_id','max_id','per_page','page','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n ls = td.user_list_id_get(id=slug, screen_name=screen_name)\n timeline = td.user_list_id_statuses(id=slug, screen_name = screen_name, **params)\n\n self.render('list-timeline.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'list':ls,\n 'timeline':timeline,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'where':'list-timeline'\n })\n\n\nclass ListCreate(BaseHandler):\n def get(self):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n self.render('list-create.html',{\n 'token_user':token_user,\n 'owner_user':owner_user,\n })\n\n def post(self):\n name = self.param('name')\n params = self.params(['mode','description'], mode='public')\n\n name=name.encode('utf-8')\n if 'description' in params:\n params['description']=params['description'].encode('utf-8')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n lst = td.user_lists_post(name=name, **params)\n self.redirect('/t/%s/%s' % (token_user['screen_name'], urllib.quote(lst['slug'].encode('utf-8'))))\n\n\nclass ListEdit(BaseHandler):\n def get(self, lid):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n lst = td.user_list_id_get(id=lid)\n\n self.render('list-edit.html',{\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'list':lst,\n })\n \n def post(self, lid):\n\n params = self.params(['name','mode','description'])\n\n if 'name' in params:\n params['name']=params['name'].encode('utf-8')\n if 'description' in params:\n params['description']=params['description'].encode('utf-8')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n lst = td.user_lists_id_post(id=lid, **params)\n self.jedirect('/t/%s/%s' % (token_user['screen_name'], urllib.quote(lst['slug'].encode('utf-8'))), time=2000)\n\n\nclass ListDelete(BaseHandler):\n def get(self, lid):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n lst = td.user_list_id_get(id=lid)\n\n self.render('list-delete.html',{\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'list':lst,\n })\n \n def post(self, lid):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n lst = td.user_list_id_delete(id=lid)\n self.redirect('/t/%s/lists' % token.screen_name)\n\n\nclass ListFollow(BaseHandler):\n def get(self, screen_name, slug ):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n lst = td.user_list_id_get(id=slug, screen_name=screen_name )\n\n self.render('list-follow.html',{\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'list':lst,\n })\n\n def post(self, screen_name, slug ):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n td.user_list_id_subscribers_post(screen_name=screen_name, list_id=slug)\n self.redirect('/t/%s/%s' % (screen_name, slug) )\n\n\nclass ListUnFollow(BaseHandler):\n def get(self, screen_name, slug ):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n lst = td.user_list_id_get(id=slug, screen_name=screen_name )\n\n self.render('list-unfollow.html',{\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'list':lst,\n })\n\n def post(self, screen_name, slug ):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n td.user_list_id_subscribers_delete(screen_name=screen_name, list_id=slug)\n self.redirect('/t/%s/%s' % (screen_name, slug) )\n\n\nclass ListAdd(BaseHandler):\n def get(self, screen_name):\n\n params = self.params(['cursor'],cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n add_user = owner_user\n lists = td.user_lists_get(**params)\n\n self.render('lists-add-to.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'add_user':add_user,\n 'lists':lists['lists'],\n 'where':'lists',\n })\n\n def post(self, screen_name):\n\n list_ids=self.request.get_all('list_ids')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n for list_id in list_ids:\n taskqueue.add(url=\"/q/list_add_user\", params={'tk':token.key(), 'list_id':list_id, 'screen_name':screen_name}, method='GET')\n #td.user_list_id_members_post(token.screen_name, list_id, id=screen_name)\n\n self.redirect('/t/%s/lists' % token.screen_name)\n\n\nclass ListRemove(BaseHandler):\n def get(self, slug, screen_name):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n remove_user = td.users_show_by_screen_name(screen_name = screen_name)\n lst = td.user_list_id_get(id=slug, screen_name=token.screen_name )\n\n self.render('list-remove-from.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'remove_user':remove_user,\n 'list':lst,\n 'where':'lists',\n })\n\n def post(self, slug, screen_name):\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n td.user_list_id_members_delete(screen_name=token.screen_name, list_id=slug, id=screen_name)\n self.redirect('/t/%s/%s/following' % (token.screen_name, slug) )\n\n\nclass ListFollowing(BaseHandler):\n def get(self, screen_name, slug):\n\n params = self.params(['cursor', 'include_entities'], cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n lst = td.user_list_id_get(id=slug, screen_name=screen_name )\n following = td.user_list_id_members_get(screen_name, slug, **params)\n self.render('list-following.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'error': following['error'] if 'error' in following else False,\n 'following':following if 'error' in following else following['users'],\n 'next_cursor':None if 'error' in following else following['next_cursor'],\n 'next_cursor_str':None if 'error' in following else following['next_cursor_str'],\n 'previous_cursor':None if 'error' in following else following['previous_cursor'],\n 'previous_cursor_str':None if 'error' in following else following['previous_cursor_str'],\n 'list':lst,\n 'where':'list-following',\n })\n\n\nclass ListFollowers(BaseHandler):\n def get(self, screen_name, slug):\n\n params = self.params(['cursor', 'include_entities'], cursor=-1)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n lst = td.user_list_id_get(id=slug, screen_name=screen_name )\n followers = td.user_list_id_subscribers_get(screen_name, slug, **params)\n self.render('list-followers.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'error': followers['error'] if 'error' in followers else False,\n 'followers':followers if 'error' in followers else followers['users'],\n 'next_cursor':None if 'error' in followers else followers['next_cursor'],\n 'next_cursor_str':None if 'error' in followers else followers['next_cursor_str'],\n 'previous_cursor':None if 'error' in followers else followers['previous_cursor'],\n 'previous_cursor_str':None if 'error' in followers else followers['previous_cursor_str'],\n 'list':lst,\n 'where':'list-followers',\n })\n\n\nclass Blocking(BaseHandler):\n\n def get(self):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n params = self.params(['page', 'include_entities'])\n page = self.param('page')\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n blocking = td.blocks_blocking(**params)\n\n prev_page, next_page = None, 2\n if page:\n try:\n page = int(page)\n prev_page = page-1 if page-1>0 else None\n next_page = page+1\n except:\n pass\n\n self.render('blocking.html',{\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'blocking':blocking,\n 'prev_page':prev_page,\n 'next_page':next_page,\n })\n\n\nclass ReportSpam(BaseHandler):\n def get(self, screen_name):\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n\n self.render('report-spam.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'title':'Report %s for spam?' % screen_name,\n 'confirm':'Report',\n 'where':'reportspam',\n })\n\n def post(self, screen_name):\n #user_id, screen_name, include_entities\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n td.report_spam(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/t/%s?force_refresh=true' % screen_name)\n\n\nclass SavedSearches(BaseHandler):\n def get(self):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n searches = td.saved_searches()\n\n self.render('saved_searches.html',{\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'searches':searches,\n })\n\n\nclass Search(BaseHandler):\n def get(self):\n\n q = self.param('q')\n\n params = self.params([\n 'lang',\n 'locate',\n 'rpp',\n 'page',\n 'since_id',\n 'until',\n 'geocode',\n 'show_user',\n 'result_type',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n limit_rate = td.API_limit_rate()\n\n searchd = None\n if q:\n q = q.encode('utf-8')\n searchd = td.search(q, **params)\n self.render('search.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'q':q,\n\t\t\t'limit_rate':limit_rate,\n 'search_data':searchd\n })\n\n\nclass HackedSearch(BaseHandler):\n def get(self):\n q = self.param('q')\n page = self.param('page')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n searchd = None\n timeline=[]\n if q:\n searchd=td.hacked_search(q.encode('utf-8'), page=page)\n timeline=searchd['statuses']\n self.render('hacked_search.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'q':q,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'search_data':searchd\n })\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/t/?', HomeTimeline),\n ('/t/mentions', Mentions),\n ('/t/retweets/(retweeted_by_me)', Retweets),\n ('/t/retweets/(retweeted_to_me)', Retweets),\n ('/t/retweets/(retweeted_of_me)', Retweets),\n ('/a/retweet/([0-9]+)', Retweet),\n ('/t/statuses/update', UpdateStatus),\n ('/a/statuses/reply', UpdateStatus),\n ('/a/statuses/mention', UpdateStatus),\n ('/a/statuses/delete/([0-9]+)', DeleteStatus),\n ('/a/statuses/([0-9]+)', ShowStatus),\n ('/t/([0-9a-zA-Z_]+)/followers', Followers),\n ('/t/([0-9a-zA-Z_]+)/following', Following),\n ('/t/([0-9a-zA-Z_]+)/favorites', Favorites),\n ('/t/[0-9a-zA-Z_]+/favorites/create/([0-9]+)', FavoritesCreate),\n ('/t/[0-9a-zA-Z_]+/favorites/destroy/([0-9]+)', FavoritesDestroy),\n\n ('/t/([0-9a-zA-Z_]+)/lists', Lists),\n ('/t/([0-9a-zA-Z_]+)/lists/memberships', ListsMemberships),\n ('/t/([0-9a-zA-Z_]+)/lists/subscriptions', ListsSubscriptions),\n ('/t/([0-9a-zA-Z_]+)/([0-9a-zA-Z\\-%]+)/?', ListTimeline),\n\n ('/t/([0-9a-zA-Z_]+)/([0-9a-zA-Z\\-%]+)/following', ListFollowing),\n ('/t/([0-9a-zA-Z_]+)/([0-9a-zA-Z\\-%]+)/followers', ListFollowers),\n\n ('/a/list_create', ListCreate),\n ('/a/list_edit/([0-9a-zA-Z\\-%]+)', ListEdit),\n ('/a/list_delete/([0-9a-zA-Z\\-%]+)', ListDelete),\n ('/a/list_follow/([0-9a-zA-Z_]+)/([0-9a-zA-Z\\-%]+)', ListFollow),\n ('/a/list_unfollow/([0-9a-zA-Z_]+)/([0-9a-zA-Z\\-%]+)', ListUnFollow),\n\n ('/a/list_add/([0-9a-zA-Z_]+)', ListAdd),\n ('/a/list_remove/([0-9a-zA-Z\\-%]+)/([0-9a-zA-Z_]+)', ListRemove),\n\n ('/t/([0-9a-zA-Z_]+)', UserTimeline),\n\n ('/a/messages', DirectMessages),\n ('/a/messages_sent', DirectMessagesSent),\n ('/a/messages_new', DirectMessagesNew),\n ('/a/messages_destroy/([0-9]+)', DirectMessagesDestroy),\n\n ('/a/follow/([0-9a-zA-Z_]+)', Follow),\n ('/a/unfollow/([0-9a-zA-Z_]+)', UnFollow),\n\n ('/a/block/([0-9a-zA-Z_]+)', Block),\n ('/a/unblock/([0-9a-zA-Z_]+)', UnBlock),\n ('/a/blocking', Blocking),\n\n ('/a/report_spam/([0-9a-zA-Z_]+)', ReportSpam),\n\n #('/a/search', Search),\n ('/a/saved_searches', SavedSearches),\n\n ('/a/search', HackedSearch),\n\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6071490049362183, "alphanum_fraction": 0.6109297275543213, "avg_line_length": 31.327777862548828, "blob_id": "210f8ca5fdde6cac86ebe5c3dbaa0456c48054dd", "content_id": "4624715e2ffc1e987209c64d0cd2ccd067f53598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5819, "license_type": "no_license", "max_line_length": 92, "num_lines": 180, "path": "/twitpic2.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport mimetypes\nimport urllib\nimport random\n\nimport oauth\n\nfrom django.utils import simplejson as json\nfrom google.appengine.api import urlfetch\n\n_http_methods={\n 'GET':urlfetch.GET,\n 'POST':urlfetch.POST,\n 'HEAD':urlfetch.HEAD,\n 'PUT':urlfetch.PUT,\n 'DELETE':urlfetch.DELETE\n}\n\n_requires_authentication=[\n 'upload',\n 'comments/create',\n 'comments/delete',\n 'comments/create',\n 'comments/delete',\n 'faces/show',\n 'faces/create',\n 'faces/edit',\n 'faces/delete',\n 'event/create',\n 'event/delete',\n 'event/add',\n 'event/remove',\n 'tags/create',\n 'tags/delete'\n]\n\ndef _generate_boundary(length=16):\n s = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_'\n a = []\n for i in range(length):\n a.append(random.choice(s))\n return ''.join(a)\n\ndef _get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\ndef _encode_multipart_formdata(fields, files=[]):\n \"\"\"\n fields is a sequence of (name, value) elements for regular form fields.\n files is a sequence of (name, filename, value) elements for data to be uploaded as files\n Return (boundary, body)\n \"\"\"\n boundary = _generate_boundary()\n crlf = '\\r\\n'\n\n l = []\n for k, v in fields:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"' % k)\n l.append('')\n l.append(str(v))\n for (k, f, v) in files:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (k, f))\n l.append('Content-Type: %s' % _get_content_type(f))\n l.append('')\n l.append(str(v))\n l.append('--' + boundary + '--')\n l.append('')\n body = crlf.join(l)\n return boundary, body\n\n\nclass TwitPic2(oauth.OAuthClient):\n \"\"\"TwitPic OAuth Client API\"\"\"\n \n SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'\n STATUS_UPDATE_URL = 'https://api.twitter.com/1.1/statuses/update.json'\n USER_INFO_URL = 'https://api.twitter.com/1.1/account/verify_credentials.json'\n \n FORMAT = 'json'\n SERVER = 'http://api.twitpic.com/2/'\n \n def __init__(self, consumer_key=None, consumer_secret=None, \n service_key=None, access_token=None):\n \"\"\"\n An object for interacting with the Twitpic API.\n \n The arguments listed below are generally required for most calls.\n \n Args:\n consumer_key:\n Twitter API Key [optional]\n consumer_secret:\n Twitter API Secret [optional]\n access_token:\n Authorized access_token in string format. [optional]\n service_key:\n Twitpic service key used to interact with the API. [optional]\n \n NOTE:\n The TwitPic OAuth Client does NOT support fetching \n an access_token. Use your favorite Twitter API Client to \n retrieve this.\n \n \"\"\"\n self.server = self.SERVER\n self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)\n self.signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()\n self.service_key = service_key \n self.format = self.FORMAT\n\n self.http_status=0\n self.http_headers={}\n self.http_body=''\n\n if access_token:\n self.access_token = oauth.OAuthToken.from_string(access_token)\n \n def set_comsumer(self, consumer_key, consumer_secret):\n self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)\n \n def set_access_token(self, accss_token):\n self.access_token = oauth.OAuthToken.from_string(access_token)\n \n def set_service_key(self, service_key):\n self.service_key = service_key\n\n def _fetch(self, method, url, params={}, headers={}, files=None):\n payload=None\n if method.upper() in ['POST','PUT']:\n if files and type(files) == list:\n boundary, payload = _encode_multipart_formdata(params.items(), files)\n headers['Content-Type']='multipart/form-data; boundary=%s' % boundary\n else:\n payload=urllib.urlencode(params)\n res=urlfetch.fetch(url, payload, _http_methods[method.upper()], headers)\n self.http_status=res.status_code\n self.http_headers=res.headers\n self.http_body=res.content\n return res.content\n\n def api_call(self, http_method, api_method, params={}, files=None):\n\n url = '%s%s.%s' % (self.server, api_method, self.format)\n\n if api_method not in _requires_authentication:\n resp = self._fetch(http_method, url, params, headers)\n return json.loads(resp)\n\n oauth_request = oauth.OAuthRequest.from_consumer_and_token(\n self.consumer,\n self.access_token,\n http_url=self.USER_INFO_URL\n )\n\n # Sign our request before setting Twitpic-only parameters\n oauth_request.sign_request(self.signature_method, self.consumer, self.access_token)\n\n # Set TwitPic parameters\n oauth_request.set_parameter('key', self.service_key)\n\n for key, value in params.iteritems():\n oauth_request.set_parameter(key, value)\n\n # Build request body parameters.\n params = oauth_request.parameters\n\n # Get the oauth headers.\n oauth_headers = oauth_request.to_header(realm='http://api.twitter.com/')\n\n # Add the headers required by TwitPic and any additional headers.\n headers = {\n 'X-Verify-Credentials-Authorization': oauth_headers['Authorization'],\n 'X-Auth-Service-Provider': self.USER_INFO_URL,\n }\n\n resp=self._fetch(http_method, url, params, headers, files)\n return json.loads(resp)\n" }, { "alpha_fraction": 0.5960670113563538, "alphanum_fraction": 0.5985433459281921, "avg_line_length": 29.51555633544922, "blob_id": "a3b4e8387730f31ec74da986149f03c1679e3df0", "content_id": "d7337eeda24d1d4c4e7c995cd742541ec2160722", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6873, "license_type": "no_license", "max_line_length": 117, "num_lines": 225, "path": "/static/js/tweet.js", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "$(function(){\n\n\tvar td=twitdao();\n\n\t//update\n\tfunction updateStatus(){\n\t\tvar in_reply_to_status_id = $('#In_reply_to_status_id').val();\n\t\tvar status = $('#Status').val();\n\t\tvar lat=$('#TweetForm input[name=lat]').val();\n\t\tvar l0ng=$('#TweetForm input[name=long]').val();\n\t\tvar others=(lat&&l0ng)?{'lat':lat,'long':l0ng}:{};\n\t\t$('#TweetButton').attr('disabled','disabled');\n\t\ttd.update(status,in_reply_to_status_id,others,function(d){\n\t\t\t$('#TweetButton').removeAttr('disabled');\n\t\t\t$('#In_reply_to_status_id').val('');\n\t\t\t$('#Status').val('');\n\t\t\tif(getConfig('tsn')==getConfig('osn')){\n\t\t\t\t$('#TweetsCount').text(Number($('#TweetsCount').text())+1);\n\t\t\t}\n\t\t\t$(\"#UploadArea\").fadeOut(function(){\n\t\t\t\t$(\"#UploadMedia\").show();\n\t\t\t\t$(\"#UploadMask\").hide();\n\t\t\t\t$(\"#UploadPreview\").html('').hide();\n\t\t\t});\n\t\t\tcharLeft();\n\t\t},function(){\n\t\t\t$('#TweetButton').removeAttr('disabled');\n\t\t});\n\t}\n\t$('#TweetButton').click(function(e){\n\t\tupdateStatus();\n\t\te.preventDefault();return false;\n\t});\n\t$(\"#Status\")\n\t.change(function(){charLeft();changeHeading();})\n\t.keydown(function(){charLeft();changeHeading();})\n\t.keyup(function(){charLeft();changeHeading();})\n\t.keydown(function(event){\n\t\tif(event.ctrlKey && event.keyCode==13){updateStatus();}\n\t});\n\n\t//upload\n\t$('#UploadButton').click(function(){\n\t\t$('#UploadArea').slideToggle('fast');\n\t});\n\t$('#IUpload').load(function(){\n\t\tvar i = $(this)[0], d, data;\n\t\tif (i.contentDocument){d = i.contentDocument;}\n\t\telse if(i.contentWindow){d = i.contentWindow.document;}\n\t\telse{d = window.frames['IUpload'].document;}\n\t\tif(d.location.href == \"about:blank\") {return;}\n\t\tdata=$.parseJSON(d.body.innerHTML);\n\t\tif(data && data.success)\n\t\t{\n\t\t\tvar l=' '+data['response']['url'],o=$('#Status').val();\n\t\t\tvar s=$('#Status').val(o+l)[0];\n\t\t\tsetCursorPos(s, o.length);\n\t\t\t$('#UploadMask').hide();\n\t\t\t$('#UploadPreview').html('<img src=\"/i/twitpic/thumb/'+data['response']['id']+'\" />').fadeIn();\n\t\t}else{\n\t\t\terrorEndTip('Fail!');\n\t\t\t$(\"#UploadMedia\").show();\n\t\t\t$(\"#UploadMask\").hide();\n\t\t\t$(\"#UploadPreview\").hide();//\n\t\t}\n\t\ttry{$('#UploadForm')[0].reset()}\n\t\tcatch(e){$('#UploadForm').val('')}\n\t});\n\t$('#UploadFile').change(function(e){\n\t\tif($('#UploadFile').val()=='')return;\n\t\t$(\"#UploadMask\").show();\n\t\t$(\"#UploadMedia\").hide();\n\t\t$('#UploadForm').submit();\n\t});\n\t$('#UploadCancel').click(function(){\n\t\t$(\"#UploadArea\").hide();\n\t\t$(\"#UploadMedia\").show();\n\t\t$(\"#UploadMask\").hide();\n\t\t$(\"#UploadPreview\").html('').hide();\n\t});\n\t\n\t//delete\n\t$('.tweet .delete').live('click',function(e){\n\t\tvar ths=$(this);\n\t\tvar id=ths.closest('.tweet').attr('tid');\n\t\ttd.del(id,function(){\n\t\t\tths.closest('.tweet').remove();\n\t\t\tif(getConfig('tsn')==getConfig('osn')){\n\t\t\t\t$('#TweetsCount').text(Number($('#TweetsCount').text())-1);\n\t\t\t}\n\t\t});\n\t\te.preventDefault();return false;\n\t});\n\n\t//show\n\t$('.tweet .show-reply-to').live('click',function(e){\n\t\tvar ths=$(this);\n\t\tvar id=ths.closest('.tweet').attr('rid');\n\t\tvar quotex=ths.closest('.tweet').find('.reply-to');\n\t\tif(quotex.html()==''){\n\t\t\ttd.show(id, function(d){\n\t\t\t\tquotex.append(d.tweet);\n\t\t\t});\n\t\t}\n\t\te.preventDefault();return false;\n\t});\n\n\t//favorite\n\t$('.tweet .fav').live('click', function(e){\n\t\tvar ths = $(this);\n\t\tvar sid=ths.closest('.tweet').attr('sid');\n\t\tvar tid =ths.closest('.tweet').attr('tid');\n\t\tvar id=sid?sid:tid;\n\t\ttd.favorite(id,function(){\n\t\t\tths.removeClass('fav').addClass('unfav').text('★Favorite');\n\t\t\tths.closest('.tweet').addClass('favorited');\n\t\t},function(){\n\t\t\tths.text('☆Favorite');\n\t\t});\n\t\te.preventDefault();return false;\n\t});\n\t//unFavorite\n\t$('.tweet .unfav').live('click', function(e){\n\t\tvar ths = $(this);\n\t\tvar sid=ths.closest('.tweet').attr('sid');\n\t\tvar tid =ths.closest('.tweet').attr('tid');\n\t\tvar id=sid?sid:tid;\n\t\ttd.unFavorite(id,function(){\n\t\t\tths.removeClass('unfav').addClass('fav').text('☆Favorite');\n\t\t\tths.closest('.tweet').removeClass('favorited');\n\t\t},function(){\n\t\t\tths.text('★Favorite');\n\t\t});\n\t\te.preventDefault();return false;\n\t});\n\n\t//retweet\n\tvar active=false;\n\t$('.retweet').live('click',function(e){\n\t\tvar ths=$(this);\n\t\tvar sid=ths.closest('.tweet').attr('sid');\n\t\tvar tid =ths.closest('.tweet').attr('tid');\n\t\tvar id=sid?sid:tid;\n\t\ttd.retweet(id,function(d){\n\t\t\tths.text('Undo Retweet');\n\t\t\tths.closest('.tweet').attr('uid', d.tweet.id_str).addClass('retweeted');\n\t\t\tths.removeClass('retweet').addClass('unrt');\n\t\t\tif(getConfig('tsn')==getConfig('osn')){\n\t\t\t\t$('#TweetsCount').text(Number($('#TweetsCount').text())+1);\n\t\t\t}\n\t\t});\n\t\te.preventDefault();return false;\n\t});\n\n\t//undo retweet\n\tvar active=false;\n\t$('.unrt').live('click',function(e){\n\t\tvar ths=$(this);\n\t\tvar id=ths.closest('.tweet').attr('uid');\n\t\ttd.undoRetweet(id, function(){\n\t\t\tths.text('Retweet');\n\t\t\tths.closest('.tweet').removeAttr('uid').removeClass('retweeted');\n\t\t\tths.removeClass('unrt').addClass('retweet');\n\t\t\tif(getConfig('tsn')==getConfig('osn')){\n\t\t\t\t$('#TweetsCount').text(Number($('#TweetsCount').text())-1);\n\t\t\t}\n\t\t});\n\t\te.preventDefault();return false;\n\t});\n\n\t//quote\n\t$('.quote').live('click', function(e){\n\t\tvar name = $(this).closest('.tweet').children('.tweet-body').find('.tweet-heading .screen_name').text();\n\t\tvar text = $(this).closest('.tweet').children('.tweet-body').find('.tweet-text').text();\n\t\t$('#TweetForm').show();\n\t\tvar s=$('#Status').val('RT @'+name+': '+text).focus()[0];\n\t\tsetCursorPos(s,0);charLeft();\n\t\te.preventDefault();return false;\n\t});\n\n\t//reply\n\t$('.reply').live('click', function(e){\n\t\tvar ths=$(this);\n\t\tvar name=ths.closest('.tweet').children('.tweet-body').find('.tweet-heading .screen_name').text();\n\t\tvar sid =ths.closest('.tweet').attr('sid');\n\t\tvar tid =ths.closest('.tweet').attr('tid');\n\t\tvar id=sid?sid:tid;\n\t\tvar text=$(this).closest('.tweet').children('.tweet-body').find('.tweet-text').text();\n\t\t\n\t\tvar to='@'+name;\n\t\tvar at='',aa={};\n\t\tif( (m=text.match(/@[\\w\\d_]+/g))!=null ){\n\t\t\tfor(var i=0;i<m.length;i++){\n\t\t\t\taa[m[i]]=m[i];\n\t\t\t}\n\t\t\tdelete aa[to];\n\t\t\tdelete aa['@'+getConfig('tsn','')]\n\t\t}\n\t\tfor(var a in aa){at+=(a+' ');}\n\t\t$('#TweetForm').show();\n\t\t$('#In_reply_to_status_id').val(id);\n\t\tvar s = $('#Status').focus().val(to+' '+at)[0];\n\t\tsetSelectionRange(s, to.length+1, (to+' '+at).length);\n\t\tchangeHeading();charLeft();\n\t\te.preventDefault();return false;\n\t});\n\n\t//geo\n\t$('.show-geo').live('click', function(e){\n\t\t$(this).closest('.tweet').children('.tweet-body').find('.media-preview .geo').slideToggle();\n\t\te.stopPropagation();e.preventDefault();\n\t});\n\n\t//tweet common actions\n\t$('.tweet').live('mouseover', function(){\n\t\t$(this).addClass('over').children('.tweet-body').find('.tweet-bottom .tweet-action').show().css('display','block');\n\t}).live('mouseout',function(){\n\t\t$(this).removeClass('over').children('.tweet-body').find('.tweet-bottom .tweet-action').hide();\n\t});\n\n\tvar saved_tweet=getCookie('saved_tweet');\n\t$('#Status').val(saved_tweet==null?'':saved_tweet);\n\tchangeHeading();charLeft();\n\n});" }, { "alpha_fraction": 0.49787044525146484, "alphanum_fraction": 0.5019053816795349, "avg_line_length": 29.346939086914062, "blob_id": "d9726029c2a9088649ece3e2439338d43238c433", "content_id": "5cc8828c714343fa0fe2149b30869c05655bf20c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4461, "license_type": "no_license", "max_line_length": 85, "num_lines": 147, "path": "/ajax1.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import taskqueue\n\nfrom base import BaseHandler\nfrom django.utils import simplejson as json\nfrom urllib import urlencode\nfrom twitdao import Twitdao\n\nimport md\nimport twitpic2\n\n\nclass UserTimeline(BaseHandler):\n def get(self, screen_name, slug):\n\n params = self.params([\n 'user_id',\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities',\n ],include_rts='true')\n\n token = md.get_proxy_access_token()\n #if not token:\n # token = md.get_proxy_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n owner_user = td.users_show_by_screen_name( screen_name=screen_name, **params)\n token_user = td.users_show_by_id(user_id = token.user_id)\n timeline = td.user_timeline(screen_name=screen_name, **params)\n tweets = self.render('ajax/user-user.html', {\n 'token':token,\n #'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n },out=False)\n\n if slug == 'refresh':\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['since_id'] = str(timeline[0]['id'])\n count = len(timeline)\n else:\n tweets=''\n next_params['since_id'] = str(params['since_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n }))\n else:\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['max_id'] = str(timeline[-1]['id']-1)\n count = len(timeline)\n else:\n tweets=''\n next_params['max_id'] = str(params['max_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n 'href':'/user/%s?%s' % (screen_name, urlencode(next_params))\n }))\n\nclass ShowStatus(BaseHandler):\n def get(self, status_id):\n\n params = self.params(['trim_user','include_entities'])\n \n token = md.get_proxy_access_token()\n if not token:\n self.redirect('/settings')\n return\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=status_id,**params)\n\n self.render('tweet-show-proxy.html', {\n 'token':token,\n #'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\nclass AjaxShowStatus(BaseHandler):\n def get(self, id):\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n tweet = td.statuses_show(id=id, **params)\n tweet_html = self.render('ajax/user-tweet.html', {\n 'token':token,\n #'token_user':token_user,\n 'tweet':tweet,\n }, out=False)\n\n self.write(json.dumps({\n 'tweet':tweet_html if 'error' not in tweet else None,\n 'success':'error' not in tweet,\n 'info':tweet['error'] if 'error' in tweet else 'OK',\n }))\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/x1/user/([0-9a-zA-Z_]+)/(refresh|more)', UserTimeline),\n ('/x1/statuses/([0-9]+)', ShowStatus),\n ('/x1/show/([0-9]+)', AjaxShowStatus),\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5581563711166382, "alphanum_fraction": 0.5629826188087463, "avg_line_length": 27.979021072387695, "blob_id": "6de20c83cfec18ed7206e871745ae4b10f3fa3d8", "content_id": "65ddd7355a63c0f7262a1320e8a5abe51ea70bc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8288, "license_type": "no_license", "max_line_length": 105, "num_lines": 286, "path": "/settings.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\n\nfrom base import BaseHandler\nfrom twitdao import Twitdao\n\nimport md\n\nimport random\nimport os\n\n\ndef _generate_id(length=64):\n '''Generate a cookie id. '''\n s = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'\n a = []\n for i in range(length):\n a.append(random.choice(s))\n return ''.join(a)\n\n\nclass Auth(BaseHandler):\n def get(self):\n url = self.param('url')\n if not url:\n url='%s://%s/settings' % (self.request.scheme, os.environ['HTTP_HOST'])\n callback='%s://%s/settings/callback?url=%s' % (self.request.scheme, os.environ['HTTP_HOST'], url)\n\n td=Twitdao()\n request_token = td.fetch_request_token(callback=callback)\n if not request_token and users.is_current_user_admin():\n self.redirect('/config')\n return\n elif not request_token:\n self.redirect('/settings')\n return\n\n cookie_id = _generate_id()\n memcache.set(cookie_id, request_token)\n self.set_cookie('cid', cookie_id)\n self.redirect(td.get_authorize_url(request_token, force_login=True))\n\n\nclass AuthCallback(BaseHandler):\n def get(self):\n denied = self.param('denied', default_value=None)\n\n if denied:\n self.render('denied.html')\n return\n\n oauth_verifier = self.param('oauth_verifier')\n cookie_id = self.get_cookie('cid','')\n request_token = memcache.get(cookie_id)\n\n if not request_token or 'oauth_token' not in request_token:\n self.delete_cookie('cid')\n self.error(404)\n return\n\n td = Twitdao(md.AccessToken(\n oauth_token=request_token['oauth_token'],\n oauth_token_secret=request_token['oauth_token_secret']\n ))\n\n access_token = td.fetch_access_token(oauth_verifier)\n\n md.save_access_token(\n user_id=access_token['user_id'],\n screen_name=access_token['screen_name'],\n oauth_token=access_token['oauth_token'],\n oauth_token_secret=access_token['oauth_token_secret'],\n app_user = users.get_current_user()\n )\n\n self.delete_cookie('cid')\n self.redirect(self.param('url'))\n\n\nclass Settings(BaseHandler):\n def get(self):\n cursor=self.param('cursor', default_value=None)\n default_token = md.get_default_access_token()\n tokens, cursor = md.get_user_access_tokens(users.get_current_user(), 10, cursor)\n\n self.render('settings.html', {\n 'default_token':default_token,\n 'tokens':tokens,\n 'cursor':cursor,\n 'where':'settings'\n })\n\n\nclass SetDefaultToken(BaseHandler):\n def post(self):\n token_key = self.param('token_key')\n token = md.get_access_token(token_key, users.get_current_user())\n md.set_default_access_token(token)\n self.redirect('/settings')\n\n\nclass DeleteToken(BaseHandler):\n def post(self):\n token_key = self.param('token_key')\n t = md.delete_access_token(token_key, users.get_current_user())\n self.redirect('/settings')\n\n\nclass SettingsProfile(BaseHandler):\n def get(self):\n tk=self.param('tk')\n if not tk:\n self.error(404)\n return\n\n token = md.get_access_token(tk, users.get_current_user())\n if not token:\n self.redirect('/settings')\n return\n\n td=Twitdao(token)\n token_user=td.users_show_by_id(user_id=token.user_id, _twitdao_force_refresh=True)\n\n self.render('settings-profile.html', {\n 'token_key':tk,\n 'token':token,\n 'token_user':token_user,\n 'owner_user':token_user,\n 'where':'settings-profile'\n })\n\n def post(self):\n tk=self.param('tk')\n if not tk:\n self.error(404)\n return\n\n\n token = md.get_access_token(tk, users.get_current_user())\n if not token:\n self.redirect('/settings')\n return\n\n td=Twitdao(token)\n \n image=self.param('picture')\n if image:\n filename=self.request.POST[u'picture'].filename.encode('utf-8')\n td.account_update_profile_image(('image', filename, image))\n\n params=self.params(['name', 'url', 'location', 'description', 'include_entities'])\n for k in params:\n params[k]=params[k].encode('utf-8')\n td.account_update_profile(**params)\n\n self.redirect('/settings/profile?tk=%s' % tk)\n\n\nclass SettingsDesign(BaseHandler):\n def get(self):\n tk=self.param('tk')\n if not tk:\n self.error(404)\n return\n\n token = md.get_access_token(tk, users.get_current_user())\n if not token:\n self.redirect('/settings')\n return\n\n td=Twitdao(token)\n token_user=td.users_show_by_id(user_id=token.user_id, _twitdao_force_refresh=True)\n\n self.render('settings-design.html', {\n 'token_key':tk,\n 'token':token,\n 'token_user':token_user,\n 'owner_user':token_user,\n 'where':'settings-design'\n })\n\n def post(self):\n tk=self.param('tk')\n if not tk:\n self.error(404)\n return\n\n ds_type=self.param('ds_type')\n\n token = md.get_access_token(tk, users.get_current_user())\n if not token:\n self.redirect('/settings')\n return\n\n td=Twitdao(token)\n if ds_type == 'colors':\n params=self.params([\n 'profile_background_color',\n 'profile_text_color',\n 'profile_link_color',\n 'profile_sidebar_fill_color',\n 'profile_sidebar_border_color',\n 'include_entities',\n ])\n td.account_update_profile_colors(**params)\n elif ds_type == 'background':\n image=self.param('image')\n if image:\n params=self.params(['tile','include_entities'])\n for k in params:\n params[k]=params[k].encode('utf-8')\n filename=self.request.POST[u'image'].filename.encode('utf-8')\n td.account_update_profile_background_image(('image', filename, image), **params)\n\n self.redirect('/settings/design?tk=%s' % tk)\n\n\nclass SettingsTwitdao(BaseHandler):\n def get(self):\n tk=self.param('tk')\n if not tk:\n self.error(404)\n return\n\n token = md.get_access_token(tk, users.get_current_user())\n if not token:\n self.redirect('/settings')\n return\n\n td=Twitdao(token)\n token_user=td.users_show_by_id(user_id=token.user_id)\n\n self.render('settings-twitdao.html', {\n 'token_key':tk,\n 'token':token,\n 'token_user':token_user,\n 'owner_user':token_user,\n 'where':'settings-twitdao'\n })\n\n def post(self):\n tk=self.param('tk')\n if not tk:\n self.error(404)\n return\n\n ds_type=self.param('ds_type')\n\n token = md.get_access_token(tk, users.get_current_user())\n if not token:\n self.redirect('/settings')\n return\n\n show_media=self.param('show_media')\n\n settings={}\n settings['show_media']=True if show_media=='True' else False\n\n md.set_token_settings(tk, users.get_current_user(), **settings)\n\n self.redirect('/settings/twitdao?tk=%s' % tk)\n\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/settings', Settings),\n ('/settings/auth', Auth),\n ('/settings/callback', AuthCallback),\n ('/settings/delete_token', DeleteToken),\n ('/settings/set_default_token', SetDefaultToken),\n\n ('/settings/profile', SettingsProfile),\n ('/settings/design', SettingsDesign),\n ('/settings/twitdao', SettingsTwitdao),\n #('/settings/sync', SettingsSync), #TODO\n\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.49925559759140015, "alphanum_fraction": 0.5245657563209534, "avg_line_length": 19.149999618530273, "blob_id": "fcbb950ba05fafa37feddcebbf87121043c788dd", "content_id": "61525b9afc4dc7da4d816f023999a84327801856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2015, "license_type": "no_license", "max_line_length": 66, "num_lines": 100, "path": "/index.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import users\n\nfrom base import BaseHandler\n\nimport md\n\n_mobile = [\n '2.0 MMP',\n '240x320',\n '400X240',\n 'AvantGo',\n 'BlackBerry',\n 'Blazer',\n 'Cellphone',\n 'Danger',\n 'DoCoMo',\n 'Elaine/3.0',\n 'EudoraWeb',\n 'Googlebot-Mobile',\n 'hiptop',\n 'IEMobile',\n 'KYOCERA/WX310K',\n 'LG/U990',\n 'MIDP-2.',\n 'MMEF20',\n 'MOT-V',\n 'NetFront',\n 'Newt',\n 'Nintendo Wii',\n 'Nitro', #Nintendo DS\n 'Nokia',\n 'Opera Mini',\n 'Opera Mobi', #Opera Mobile\n 'Palm',\n 'PlayStation Portable',\n 'portalmmm',\n 'Proxinet',\n 'ProxiNet',\n 'SHARP-TQ-GX10',\n 'SHG-i900',\n 'Small',\n 'SonyEricsson',\n 'Symbian OS',\n 'SymbianOS',\n 'TS21i-10',\n 'UP.Browser',\n 'UP.Link',\n 'webOS', #Palm Pre, etc.\n 'Windows CE',\n 'WinWAP',\n 'YahooSeeker/M1A1-R2D2',\n]\n\n_touch = [\n 'iPhone',\n 'iPod',\n 'Android',\n 'BlackBerry9530',\n 'LG-TU915 Obigo', #LG touch browser\n 'LGE VX',\n 'webOS', #Palm Pre, etc.\n 'Nokia5800',\n]\n\ndef _is_mobile(ua):\n for b in _mobile + _touch:\n if ua.find(b)!=-1:\n return True\n return False\n\n\nclass Index(BaseHandler):\n def get(self):\n if not users.get_current_user():\n login_url = users.create_login_url(\"/\")\n self.render('index.html', {'login_url':login_url})\n else:\n default_token = md.get_default_access_token()\n if default_token:\n if _is_mobile(self.request.headers['user-agent']):\n self.redirect('/m/u-/home')\n else:\n self.redirect('/t')\n return\n else:\n self.redirect('/settings')\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/', Index),\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5691097378730774, "alphanum_fraction": 0.5776811838150024, "avg_line_length": 22.79310417175293, "blob_id": "2e838af1ec4e25efc961ea4c50f008db510b50fc", "content_id": "104939ba74223bb2f45ea8f8ee21995dd74315cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 24220, "license_type": "no_license", "max_line_length": 103, "num_lines": 1015, "path": "/static/js/common.js", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "function getConfig(name, defval){\n\tif(typeof $c!='undefined'){\n\t\treturn $c[name];\n\t}\n\treturn defval;\n}\n\nfunction charLeft(num) {\n\tif(!num)num=140;\n\tvar tweet=$(\"#Status\").val();\n\tif(tweet==null||typeof tweet.length=='undefined')return;\n\tvar left=num-tweet.length;\n\tif(left<0){$(\"#CharCounter\").css(\"color\",\"#C00\");}\n\telse{$(\"#CharCounter\").css(\"color\",\"#000\");}\n\t$(\"#CharCounter\").html(left);\n\tsetCookie('saved_tweet',tweet.substr(0,280),3600*24*30);\n}\n\nfunction changeHeading(){\n\tvar text=$('#Status').val();\n\tif(text==null||typeof text.match=='undefined')return;\n\tif((m=text.match(/^@([\\w\\d_]+).*/))!=null){\n\t\t$('#TweetForm h3').text('Reply to '+m[1]);\n\t}else{\n\t\t$('#In_reply_to_status_id').val('');\n\t\t$('#TweetForm h3').text('What\\'s happening?');\n\t}\n}\n\nfunction updateDate(){\n\t$(\".created-at\").each(function(){\n\t\tvar date = new Date(Number($(this).attr(\"time\"))),\n\t\t\tnow = new Date(),\n\t\t\tdiffer = (now - date)/1000,\n\t\t\tdateFormated = '';\n\t\t\n\t\tif(differ <= 0){\n\t\t\tdateFormated='Just now!';\n\t\t}else if(differ < 60){\n\t\t\tdateFormated = Math.ceil(differ) + \" seconds ago\";\n\t\t}else if(differ < 3600){\n\t\t\tdateFormated = Math.ceil(differ/60) + \" minutes ago\";\n\t\t}else if(differ < 3600*24){\n\t\t\tdateFormated = \"about \" + Math.ceil(differ/3600) + \" hours ago\";\n\t\t}/*else if(differ < 3600*24*7){\n\t\t\tdateFormated = \"about \" + Math.floor(differ/3600/24) + \" days ago\";\n\t\t}*/else{\n\t\t\tdateFormated = date.toLocaleString();\n\t\t}\n\t\t$(this).text(dateFormated);\n\t});\n}\n\nfunction startTip(text){\n\t$('#Tip').text(text).fadeIn('fast');\n}\nfunction endTip(text){\n\t$('#Tip').text(text).fadeIn('fast');\n\tsetTimeout(function(){\n\t\t$('#Tip').fadeOut();\n\t},1500);\n}\nfunction errorEndTip(text){\n\t$('#Tip').text(text).fadeIn('fast').addClass('error');\n\tsetTimeout(function(){\n\t\t$('#Tip').fadeOut(function(){\n\t\t\t$(this).removeClass('error');\n\t\t});\n\t},1500);\n}\n\nfunction startTinyTip(text){\n\t$('#TinyTip').text(text).show();\n}\nfunction endTinyTip(text){\n\tif(text){\n\t\t$('#TinyTip').text(text).fadeOut();\n\t}else{\n\t\t$('#TinyTip').fadeOut();\n\t}\n}\n\nfunction setCookie(name, value, seconds) {\n\tvalue=encodeURIComponent(value);\n\tif (typeof(seconds) != 'undefined') {\n\t\tvar date = new Date();\n\t\tdate.setTime(date.getTime() + (seconds*1000));\n\t\tvar expires = \"; expires=\" + date.toGMTString();\n\t}else{\n\t\tvar expires = \"\";\n\t}\n\tdocument.cookie = name+\"=\"+value+expires+\"; path=/\";\n}\nfunction getCookie(name) {\n\tname = name + \"=\";\n\tvar carray = document.cookie.split(';');\n\tfor(var i=0;i < carray.length;i++) {\n\t\tvar c = carray[i];\n\t\twhile (c.charAt(0)==' ') c = c.substring(1,c.length);\n\t\tif (c.indexOf(name) == 0) return decodeURIComponent(c.substring(name.length,c.length));\n\t}\n\treturn null;\n}\nfunction deleteCookie(name) {\n\tsetCookie(name, \"\", -1);\n}\n\n\n\nfunction setSelectionRange(input, startIndex, stopIndex){\n\tif (input.setSelectionRange){\n\t\tinput.setSelectionRange(startIndex, stopIndex);\n\t\tinput.focus();\n\t}else if(input.createTextRange){//IE\n\t\tvar range = input.createTextRange();\n\t\trange.collapse(true);\n\t\trange.moveStart('character', startIndex);\n\t\trange.moveEnd('character', stopIndex - startIndex);\n\t\trange.select();\n\t}\n}\nfunction setCursorPos(input, pos){\n\tsetSelectionRange(input, pos, pos);\n}\n\nfunction twitdao(){\n\tvar _={};\n\t_.del_active=false;\n\t_.fav_active=false;\n\t_.ret_active=false;\n\t_.upd_active=false;\n\t_.fol_active=false;\n\t_.blk_active=false;\n\t_.rep_active=false;\n\t_.shw_active=false;\n\t_.sed_active=false;//message\n\t_.del=function(id, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tif(!confirm('Are you sure you want to delete this tweet?'))return;\n\t\tstartTip('Deleting...');\n\t\tif(!t.del_active){\n\t\t\tt.del_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/delete/'+id,\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.del_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip('Tweet deleted!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t\terrorEndTip('Oops! Unknown Error!');\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.del_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.show=function(id, success, error, start, end){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTinyTip('Loading...');\n\t\tif(!t.shw_active){\n\t\t\tt.shw_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"GET\",\n\t\t\t\t'url':'/x/show/'+id,\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.shw_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTinyTip('Ok!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t\tendTinyTip(d.info);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t\tendTinyTip('Oops! Unknown Error!');\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.shw_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.retweet=function(id,success,error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Retweeting...');\n\t\tif(!t.ret_active){\n\t\t\tt.ret_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/retweet/'+id,\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.ret_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip('Retweeted!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Unknown Error!');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.ret_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.undoRetweet=function(id, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Trying to undo Retweet...');\n\t\tif(!t.ret_active){\n\t\t\tt.ret_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/delete/'+id,\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.ret_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip('Success!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Unknown Error!');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.ret_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.favorite=function(id, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Trying to add favorite tweet...');\n\t\tif(!t.fav_active){\n\t\t\tt.fav_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/favorite/'+id+'/create',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.fav_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip('Favorited!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Something going wrong.');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.fav_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.unFavorite=function(id,success,error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Trying to delete favorite tweet...');\n\t\tif(!t.fav_active){\n\t\t\tt.fav_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/favorite/'+id+'/delete',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.fav_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip('UnFavorited!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Something going wrong.');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.fav_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.update=function(status, in_reply_to_status_id, others, success, error){\n\t\tvar isf=$.isFunction(others);\n\t\terror=isf?success:error;\n\t\tsuccess=isf?others:success;\n\t\tothers=isf?{}:others;\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tif(status.length>140){\n\t\t\terrorEndTip('Status is over 140 characters.');\n\t\t\terror();\n\t\t\treturn;\n\t\t};\n\t\tstartTip('Updating status...');\n\t\t\tif(!t.upd_active){\n\t\t\tt.upd_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/update',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'data':$.extend({'in_reply_to_status_id':in_reply_to_status_id, 'status':status}, others),\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.upd_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tcharLeft();\n\t\t\t\t\t\tendTip('Status updated successfully!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Unknown Error!');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){\n\t\t\t\t\tt.upd_active=false;\n\t\t\t\t\terror();\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t};\n\t_.follow=function(screen_name, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Trying to Follow '+ screen_name +' ...');\n\t\tif(!t.fol_active){\n\t\t\tt.fol_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/friends/'+screen_name+'/make',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.fol_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip(screen_name+' is your friend now!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Something going wrong.');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.fol_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.unFollow=function(screen_name, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Trying to unfollow '+screen_name+'...');\n\t\tif(!t.fol_active){\n\t\t\tt.fol_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/friends/'+screen_name+'/break',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.fol_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip(screen_name+' is not your friend anymore.');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Something going wrong.');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.fol_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.block=function(screen_name, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Trying to block '+screen_name+' ...');\n\t\tif(!t.blk_active){\n\t\t\tt.blk_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/block/'+screen_name+'/add',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.blk_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip(screen_name+' is blocked!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Something going wrong.');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.blk_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.unBlock=function(screen_name, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tstartTip('Trying to unblock ' +screen_name+ ' ...');\n\t\tif(!t.blk_active){\n\t\t\tt.blk_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/block/'+screen_name+'/remove',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.blk_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip(screen_name+' is unblocked!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Something going wrong.');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.blk_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.report=function(screen_name, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tif(!confirm('Will report '+screen_name+' for spam.\\n Are you sure?'))return;\n\t\tstartTip('Trying to report ' +screen_name+ ' for spam ...');\n\t\tif(!t.rep_active){\n\t\t\tt.rep_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/report/'+screen_name,\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.rep_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip(screen_name+' is reported and blocked!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Something going wrong.');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.rep_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\t_.send=function(text, screen_name, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tif(text.length>140){\n\t\t\terrorEndTip('Message is over 140 characters.');\n\t\t\terror();\n\t\t\treturn;\n\t\t}else if(text&&text.length<=0){\n\t\t\terrorEndTip('Message is empty.');\n\t\t\terror();\n\t\t\treturn;\n\t\t};\n\t\tstartTip('Sending message...');\n\t\tif(!t.sed_active){\n\t\t\tt.sed_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/message_send',\n\t\t\t\t'dataType':'json',\n\t\t\t\t'data':{'screen_name':screen_name, 'text':text},\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.sed_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip('Message sent!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terrorEndTip('Oops! Unknown Error!');\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){\n\t\t\t\t\tt.sed_active=false;\n\t\t\t\t\terror();\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t};\n\n\t_.dms_active=false;\n\t_.delmsg=function(id, success, error){\n\t\tif(!$.isFunction(success))success=function(){}\n\t\tif(!$.isFunction(error))error=function(){}\n\t\tvar t=this;\n\t\tif(!confirm('Are you sure you want to delete this Message?'))return;\n\t\tstartTip('Deleting...');\n\t\tif(!t.dms_active){\n\t\t\tt.dms_active=true;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"POST\",\n\t\t\t\t'url':'/x/message_destroy/'+id,\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tt.dms_active=false;\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tendTip('Message deleted!');\n\t\t\t\t\t\tsuccess(d);\n\t\t\t\t\t}else if(d&&d.info){\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t\terrorEndTip(d.info);\n\t\t\t\t\t}else{\n\t\t\t\t\t\terror(d);\n\t\t\t\t\t\terrorEndTip('Oops! Unknown Error!');\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t'error':function(){t.dms_active=false;error();}\n\t\t\t});\n\t\t}\n\t};\n\treturn _;\n}\n\nfunction more(more_url, params){\n\tvar _={};\n\t_.url=more_url;\n\t_.params=params;\n\t_.active=false;\n\t_.count=0;\n\t_.do_more=function(callback){\n\t\tvar current=document.location.pathname;\n\t\tvar t=this;\n\t\t$('#pagination .loading').show();\n\t\t$('#pagination .more').hide();\n\t\t$.ajax({\n\t\t\t'type':\"GET\",\n\t\t\t'url':t.url,\n\t\t\t'data':t.params,\n\t\t\t'dataType':'json',\n\t\t\t'success':function(d){\n\t\t\t\tif(d&&d.success){\n\t\t\t\t\t$('#pagination .loading').hide();\n\t\t\t\t\tif(d.count<=0){return;}\n\t\t\t\t\t$('#Timeline .tweets').append(d.tweets);\n\t\t\t\t\t$('#pagination .more').attr('href',d.href||'').show();\n\t\t\t\t\tt.params=d.params;\n\t\t\t\t}\n\t\t\t\tupdateDate();\n\t\t\t\tcallback(d);\n\t\t\t},\n\t\t\t'error':function(){\n\t\t\t\t$('#pagination .more').show();\n\t\t\t\t$('#pagination .loading').hide();\n\t\t\t\tcallback();\n\t\t\t}\n\t\t});\n\t};\n\t_.next=function(){\n\t\tvar t=this;\n\t\tif(!t.active){\n\t\t\tt.active=true;\n\t\t\tt.do_more(function(){\n\t\t\t\tt.active=false;\n\t\t\t\tt.count++;\n\t\t\t});\n\t\t}\n\t};\n\t$(function(){\n\t\t$('#pagination .more').live('click', function(e){\n\t\t\t_.next();e.preventDefault();\n\t\t});\n\t\tvar max_more=100, preload=50+50;\n\t\t$(window).bind('scroll',function(){\n\t\t\tif( _.count<max_more && ($('body').height()-preload) < ($(window).height()+$(window).scrollTop()) ){\n\t\t\t\t_.next();\n\t\t\t}else if($('#Timeline .tweets').length>this.page_size){\n\t\t\t\t_.collapse();\n\t\t\t}\n\t\t});\n\t});\n\treturn _;\n}\n\nfunction refresh(url, params){\n\n\tvar live_initial=3000; //恢复时间\n\tvar live_interval=70*1000; //显示间隔时间\n\n\tvar interval=70*1000;\n\tvar min_interval=70*1000; //最短刷新时(暂定)\n\tvar max_interval = 5*60*1000; //最长刷新间隔(暂定)\n\tvar decay=1.5; //刷新间隔调整系数(暂定)\n\n\tvar counter=0;//for logging.\n\tvar otitle=document.title;\n\n\tvar console=window.console||{log:function(){}};\n\t//console={log:function(){}};\n\n\tvar live={\n\t\ttimer:null,\n\t\tpaused:false,\n\t\tstart:function(){\n\t\t\tif(this.paused)return;\n\t\t\tvar t=this;\n\t\t\tif(t.timer==null){\n\t\t\t\tconsole.log('%s:live.start()',counter++);\n\t\t\t\tt.timer=setTimeout(function(){\n\t\t\t\t\tt.run();\n\t\t\t\t}, live_initial);\n\t\t\t\tmonitor.update_status();\n\t\t\t}\n\t\t},\n\t\tstop:function(){\n\t\t\tconsole.log('%s:live.stop()',counter++);\n\t\t\ttry{\n\t\t\t\tclearTimeout(this.timer);\n\t\t\t}finally{\n\t\t\t\tthis.timer=null;\n\t\t\t\tmonitor.update_status();\n\t\t\t}\n\t\t},\n\t\tpause:function(){\n\t\t\tconsole.log('%s:live.pause()',counter++);\n\t\t\tthis.paused=true;\n\t\t\tthis.stop();\n\t\t\t$('#Pause').text('Paused').animate({'width':70},200);\n\t\t\ttry{clearTimeout(this.count_down_timer);}finally{}\n\t\t\tmonitor.update_status();\n\t\t},\n\t\tresume:function(){\n\t\t\tconsole.log('%s:live.resume()',counter++);\n\t\t\tthis.paused=false;\n\t\t\tthis.start();\n\t\t\tvar t=this;\n\t\t\tfunction cd(c){\n\t\t\t\tif(c<1){\n\t\t\t\t\tvar $p=$('#Pause').text('Action');\n\t\t\t\t\tsetTimeout(function(){\n\t\t\t\t\t\t$p.animate({'width':0},200,function(){$(this).hide();});\n\t\t\t\t\t},500)\n\t\t\t\t\treturn;\n\t\t\t\t}else{\n\t\t\t\t\t$('#Pause').text(Math.round(c));\n\t\t\t\t}\n\t\t\t\tt.count_down_timer=setTimeout(function(){cd(c-1);},1000);\n\t\t\t};\n\t\t\tcd(Math.round(live_initial/1000));\n\t\t\tmonitor.update_status();\n\t\t},\n\t\trun:function(){\n\t\t\tconsole.log('%s:live.run()', counter++);\n\n\t\t\tvar t=this;\n\t\t\tt.timer=setTimeout(function(){\n\t\t\t\tt.run();\n\t\t\t}, live_interval);\n\n\t\t\tvar i=monitor.get();\n\t\t\t// simple\n\t\t\t// $(i).hide().prependTo('#Timeline .tweets').slideDown();\n\n\t\t\t// \n\t\t\tvar $t=$(i).prependTo('#Timeline .tweets');\n\t\t\tvar h=$t.height()+parseInt($t.css('padding-top'))+parseInt($t.css('padding-bottom'));\n\t\t\t$t.css({'margin-top':-h}).animate({'margin-top':0});//10*h\n\n\t\t\tmonitor.update_status();\n\t\t}\n\t};\n\n\tvar normal={\n\t\tstart:function(){\n\t\t\tconsole.log('%s:normal.start()',counter++);\n\t\t\tif(monitor.size()>0){\n\t\t\t\t$('#Notifier').slideDown();\n\t\t\t}\n\t\t\tmonitor.update_status();\n\t\t},\n\t\tstop:function(){\n\t\t\tconsole.log('%s:normal.stop()',counter++);\n\t\t\t$('#Notifier').hide();\n\t\t\tmonitor.update_status();\n\t\t},\n\t\tpause:function(){monitor.update_status();},\n\t\tresume:function(){monitor.update_status();},\n\t\trun:function(){\n\t\t\tconsole.log('%s:normal.run()',counter++);\n\t\t\t$('#Notifier').hide();\n\t\t\t$('.separator').removeClass('separator');\n\t\t\tvar ii=monitor.get_all().reverse();\n\t\t\tvar $ii=$(ii);\n\t\t\t$ii.last().addClass('separator');\n\t\t\t$('#Timeline .tweets').prepend($ii);\n\t\t\t//$('#Timeline .tweets').prepend(ii.reverse());\n\t\t\tmonitor.update_status();\n\t\t}\n\t};\n\n\tvar mode=(getCookie('LM'))?live:normal;\n\n\tvar producer={\n\t\ttimer:null,\n\t\tstart:function(){\n\t\t\tvar t=this;\n\t\t\tif(t.timer==null){\n\t\t\t\tconsole.log('%s:producer.start()',counter++);\n\t\t\t\tt.timer=setTimeout(function(){\n\t\t\t\t\tt.run();\n\t\t\t\t}, interval);\n\t\t\t}\n\t\t\tmonitor.update_status();\n\t\t},\n\t\tstop:function(){\n\t\t\tconsole.log('%s:producer.stop()',counter++);\n\t\t\ttry{\n\t\t\t\tclearTimeout(this.timer);\n\t\t\t}finally{\n\t\t\t\tthis.timer=null;\n\t\t\t}\n\t\t\tmonitor.update_status();\n\t\t},\n\t\trun:function(){\n\t\t\tvar t=this;\n\t\t\tt.timer = setTimeout(function(){\n\t\t\t\tif(!t.active){\n\t\t\t\t\tt.active=true;\n\t\t\t\t\tt.feed(function(){\n\t\t\t\t\t\tt.active=false;\n\t\t\t\t\t\tmonitor.update_status();\n\t\t\t\t\t\tif(t.timer==null)return;//?\n\t\t\t\t\t\tt.run();\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t},interval);\n\t\t},\n\t\tparams:params,\n\t\turl:url,\n\t\tfeed:function(callback){\n\t\t\tvar t=this;\n\t\t\t$.ajax({\n\t\t\t\t'type':\"GET\",\n\t\t\t\t'url':t.url,\n\t\t\t\t'data':t.params,\n\t\t\t\t'dataType':'json',\n\t\t\t\t'success':function(d){\n\t\t\t\t\tif(d&&d.success){\n\t\t\t\t\t\tif(d.params)t.params=d.params;\n\t\t\t\t\t\tif(d.count>0){\n\t\t\t\t\t\t\tmonitor.put($(d.tweets).filter(function(){return this.nodeType==1}).toArray().reverse());\n\t\t\t\t\t\t\tinterval=(interval/decay)<min_interval?min_interval:(interval/decay);\n\t\t\t\t\t\t}else{\n\t\t\t\t\t\t\tinterval=(interval*decay)>max_interval?max_interval:(interval*decay);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tconsole.log('%s:producer.feed(), data.count=%s, interval=%s',counter++, d.count, interval);\n\t\t\t\t\t}\n\t\t\t\t\tcallback(d);\n\t\t\t\t},\n\t\t\t\t'error':function(){\n\t\t\t\t\tcallback();\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t};\n\n\tvar monitor={\n\t\tcapacity:1000,\n\t\tbuffer:[],\n\t\tput:function(i){\n\t\t\tif($.isArray(i)){\n\t\t\t\tthis.buffer.push.apply(this.buffer, i);\n\t\t\t}else{\n\t\t\t\tthis.buffer.push(i);\n\t\t\t}\n\t\t\tconsole.log('%s:monitor.put(), length=%s',counter++,this.buffer.length);\n\t\t\tif(this.buffer.length>this.capacity){\n\t\t\t\tproducer.stop();\n\t\t\t}\n\t\t\tif(this.buffer.length>0){\n\t\t\t\tmode.start();\n\t\t\t}\n\t\t},\n\t\tget:function(){\n\t\t\tvar i=this.buffer.shift();\n\t\t\tconsole.log('%s:monitor.get(), length=%s',counter++,this.buffer.length);\n\t\t\tif(this.buffer.length<=this.capacity){\n\t\t\t\tproducer.start();\n\t\t\t}\n\t\t\tif(this.buffer.length<=0){\n\t\t\t\tmode.stop();\n\t\t\t}\n\t\t\treturn i;\n\t\t},\n\t\tget_all:function(){\n\t\t\tvar i=this.buffer;\n\t\t\tthis.buffer=[];\n\t\t\tconsole.log('%s:monitor.get_all(), length=%s',counter++,this.buffer.length);\n\t\t\tproducer.start();\n\t\t\tmode.stop();\n\t\t\treturn i;\n\t\t},\n\t\tsize:function(){\n\t\t\treturn this.buffer.length;\n\t\t},\n\t\tupdate_status:function(){\n\t\t\tvar t=this;\n\t\t\tif(updateDate)updateDate();\n\t\t\tt.set_notifier_text();\n\t\t\tsetTimeout(function(){t.set_title();}, 200);\n\t\t},\n\t\tset_notifier_text:function(){\n\t\t\tif(this.buffer.length>this.capacity){\n\t\t\t\t$('#Notifier').text(this.capacity+'+ new tweets.');\n\t\t\t}else if(this.buffer.length>1){\n\t\t\t\t$('#Notifier').text(this.buffer.length+' new tweets');\n\t\t\t}else if(this.buffer.length==1){\n\t\t\t\t$('#Notifier').text('1 new tweet.');\n\t\t\t}else{\n\t\t\t\t$('#Notifier').text('Look! A dragonfly. ');\n\t\t\t}\n\t\t},\n\t\tset_title:function(){\n\t\t\tif(mode.paused){\n\t\t\t\tdocument.title=otitle+' [Paused]';\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif(this.buffer.length>this.capacity){\n\t\t\t\tdocument.title=otitle+' ('+this.capacity+'+)';\n\t\t\t}else if(this.buffer.length>0){\n\t\t\t\tdocument.title=otitle+' ('+this.buffer.length+')';\n\t\t\t}else{\n\t\t\t\tdocument.title=otitle;\n\t\t\t}\n\t\t}\n\t};\n\n\tvar _={\n\t\tpause:function(){\n\t\t\tmode.pause();\n\t\t},\n\t\tresume:function(){\n\t\t\tmode.resume();\n\t\t},\n\t\tflush:function(){\n\t\t\tnormal.run();\n\t\t},\n\t\ton:function(){\n\t\t\tmode=live;\n\t\t\tlive.start();\n\t\t\tnormal.stop();\n\t\t\t$('#Mode').text('Live Mode').addClass('live-mode');\n\t\t\tsetCookie('LM','t',3600*24*30);\n\t\t},\n\t\toff:function(){\n\t\t\tmode=normal;\n\t\t\tnormal.start();\n\t\t\tlive.stop();\n\t\t\t$('#Mode').text('Live Mode').removeClass('live-mode');\n\t\t\t$('#Pause').animate({'width':0},200,function(){$(this).hide();});\n\t\t\tdeleteCookie('LM');\n\t\t},\n\t\tstart:function(){\n\t\t\tproducer.start();\n\t\t}\n\t};\n\n\t$(function(){\n\t\t$('#Notifier').click(function(e){\n\t\t\t_.flush();\n\t\t\te.preventDefault();\n\t\t});\n\t\tvar paused=false;\n\t\t$(document).keydown(function(e){\n\t\t\tif(e.keyCode==27){//Esc\n\t\t\t\tif(paused){\n\t\t\t\t\t_.resume();\n\t\t\t\t\tpaused=false;\n\t\t\t\t}else{\n\t\t\t\t\t_.pause();\n\t\t\t\t\tpaused=true;\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\tvar hasFocus = true;\n\t\tvar active_element;\n\t\tfunction setFocusEvents(){\n\t\t\tactive_element = document.activeElement;\n\t\t\tif ($.browser.msie){\n\t\t\t\t$(document).bind('focusout',function(){onblur();});\n\t\t\t\t$(document).bind('focusin',function(){onfocus();});\n\t\t\t}else{\n\t\t\t\t$(window).bind('blur',function(){onblur();});\n\t\t\t\t$(window).bind('focus',function(){onfocus();});\n\t\t\t}\n\t\t}\n\t\tfunction onfocus()\t{\n\t\t\tif(!hasFocus){\n\t\t\t\t_.resume();\n\t\t\t}\n\t\t\thasFocus = true;\n\t\t}\n\t\tfunction onblur()\t{\n\t\t\tif (active_element != document.activeElement) {\n\t\t\t\tactive_element = document.activeElement;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif(hasFocus){\n\t\t\t\t_.pause();\n\t\t\t}\n\t\t\thasFocus = false;\n\t\t}\n\t\tsetFocusEvents();\n\n\t\tif(getCookie('LM')=='t'){\n\t\t\t$('#Mode').text('Live Mode').addClass('live-mode');\n\t\t\t$('#Mode').toggle(function(){_.off();},function(){_.on()});\n\t\t}else{\n\t\t\t$('#Mode').text('Live Mode').removeClass('live-mode');\n\t\t\t$('#Mode').toggle(function(){_.on();},function(){_.off()});\n\t\t}\n\t});\n\n\treturn _;\n}\n\n//common actions\n$(function(){\n\t$.ajaxSetup({\n\t\t'cache':false,\n\t\t'error':function(xhr,text){\n\t\t\terrorEndTip(text);\n\t\t},\n\t\t'timeout':10000\n\t});\n\n\t$('#Nav a, .screen_name a, .tweet-text a').live('click', function(e){\n\t\tvar url=$(this).attr('href');\n\t\tvar tar=$(this).attr('target');\n\t\tif(tar=='_blank'){\n\t\t\treturn\n\t\t}else{\n\t\t\tstartTinyTip('Loading...');\n\t\t}\n\t\tsetTimeout(function(){\n\t\t\twindow.location=url;\n\t\t},0);\n\t\te.preventDefault();return false;\n\t});\n\n\t$(window).bind('scroll',function(){\n\t\tif($(window).scrollTop()>0){$('#ToTop,#LToTop').show();}\n\t\telse{$('#ToTop,#LToTop').hide();}\n\n if(!window.XMLHttpRequest){//ie6\n\t\t\tvar st=$(document).scrollTop(),wh=$(window).height();\n $('#ToTop').css(\"top\", st+wh-100-50);\n $('#LToTop').css(\"top\", st+130);//TODO\n }\n\t});\n\t\n\t$('#ToTop,#LToTop').click(function(){\n\t\tvar time=150;\n\t\tif($.browser.webkit){\n\t\t\t$(\"body\").animate({scrollTop:0},time);\n\t\t}else{\n\t\t\t$(\"html\").animate({scrollTop:0},time);\n\t\t}\n\t}).mouseover(function(){\n\t\t$(this).addClass('hover');\n\t}).mouseout(function(){\n\t\t$(this).removeClass('hover');\n\t});\n\n});\n" }, { "alpha_fraction": 0.6303317546844482, "alphanum_fraction": 0.6303317546844482, "avg_line_length": 69.66666412353516, "blob_id": "2f845351f1c7653c233a3ce22077880581dfee33", "content_id": "0bb03e01d194a055dd99b927c484f9a97afc0501", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 211, "license_type": "no_license", "max_line_length": 184, "num_lines": 3, "path": "/templates/mobile/unblock.html", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n\n{% block main %}{% include \"user-profile-lite.html\" %}<form action=\"/m/u-{{ user.screen_name }}/ub\" method=\"post\"><p><input type=\"Submit\" value=\"Unblock\"></p></form>{% endblock main %}" }, { "alpha_fraction": 0.47630149126052856, "alphanum_fraction": 0.4790702760219574, "avg_line_length": 29.911035537719727, "blob_id": "77da13e9397b327919e262bd77df047a1191b72e", "content_id": "661b785482ff625e1b0bd076fbc72ad8733a6a02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27449, "license_type": "no_license", "max_line_length": 138, "num_lines": 888, "path": "/ajax.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import taskqueue\n\nfrom base import BaseHandler\nfrom django.utils import simplejson as json\nfrom urllib import urlencode\nfrom twitdao import Twitdao\n\nimport md\nimport twitpic2\n\nclass UpdateStatus(BaseHandler):\n\n def post(self):\n status = self.param('status')\n \n params = self.params([\n 'in_reply_to_status_id',\n 'lat',\n 'long',\n 'place_id',\n 'display_coordinates',\n 'trim_user',\n 'include_entities',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n tweet = td.statuses_update(status=status.encode('utf-8'), **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.write(json.dumps({\n 'success':'error' not in tweet,\n 'info':tweet['error'] if 'error' in tweet else 'OK',\n 'tweet':tweet if 'error' not in tweet else None,\n }))\n\nclass UploadImage(BaseHandler):\n def post(self):\n media = self.param('media')\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n app_config = md.get_app_config()\n \n td = Twitdao(token)\n\n twitpic = twitpic2.TwitPic2(\n consumer_key = app_config.consumer_key,\n consumer_secret = app_config.consumer_secret,\n access_token = 'oauth_token=%s&oauth_token_secret=%s' % (token.oauth_token, token.oauth_token_secret),\n service_key = app_config.twitpic_api_key,\n )\n\n try:\n if media:\n filename=self.request.POST[u'media'].filename.encode('utf-8')\n resp=twitpic.api_call('POST', 'upload', {'message':''}, files=[('media', filename, media)])\n self.write(json.dumps({\n 'success':'id' in resp,\n 'info':'OK',\n 'response':resp,\n }))\n except Exception, e:\n self.write(json.dumps({\n 'success':False,\n 'info':str(e),\n 'response':None,\n }))\n except:\n self.write(json.dumps({\n 'success':False,\n 'info':'Unkown Error.',\n 'response':None,\n }))\n\nclass ShowStatus(BaseHandler):\n def get(self, id):\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n tweet = td.statuses_show(id=id, **params)\n tweet_html = self.render('ajax/tweet.html', {\n 'token':token,\n 'token_user':token_user,\n 'tweet':tweet,\n }, out=False)\n\n self.write(json.dumps({\n 'tweet':tweet_html if 'error' not in tweet else None,\n 'success':'error' not in tweet,\n 'info':tweet['error'] if 'error' in tweet else 'OK',\n }))\n\nclass HomeTimeline(BaseHandler):\n def get(self, slug):\n \n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities'\n ])\n params['count'] = 100\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n timeline = td.home_timeline(**params)\n tweets = self.render('ajax/home.html', {\n 'token':token,\n 'token_user':token_user,\n 'timeline':timeline,\n }, out=False)\n\n if slug == 'refresh':\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['since_id'] = str(timeline[0]['id'])\n count = len(timeline)\n else:\n tweets=''\n next_params['since_id'] = str(params['since_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count\n }))\n else:\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['max_id'] = str(timeline[-1]['id']-1)\n count = len(timeline)\n else:\n tweet=''\n next_params['max_id'] = str(params['max_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n 'href':'/t?%s' % urlencode(next_params)\n }))\n\n\nclass Mentions(BaseHandler):\n def get(self, slug):\n\n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities'\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return \n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n timeline = td.mentions(**params)\n tweets = self.render('ajax/mentions.html', {\n 'token':token,\n 'token_user':token_user,\n 'timeline':timeline,\n }, out=False)\n\n if slug == 'refresh':\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['since_id'] = str(timeline[0]['id'])\n count = len(timeline)\n else:\n tweets=''\n next_params['since_id'] = str(params['since_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count\n }))\n else:\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['max_id'] = str(timeline[-1]['id']-1)\n count = len(timeline)\n else:\n tweets=''\n next_params['max_id'] = str(params['max_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n 'href':'/t/mentions?%s' % urlencode(next_params)\n }))\n\n\n\nclass Retweets(BaseHandler):\n def get(self, which, slug):\n\n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_entities',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return \n\n td = Twitdao(token)\n timeline=[]\n if which == 'retweeted_by_me':\n timeline = td.retweeted_by_me(**params)\n elif which == 'retweeted_to_me':\n timeline = td.retweeted_to_me(**params)\n elif which == 'retweeted_of_me':\n timeline = td.retweets_of_me(**params)\n token_user = td.users_show_by_id(user_id = token.user_id)\n tweets = self.render('ajax/retweets.html', {\n 'token':token,\n 'token_user':token_user,\n 'timeline':timeline,\n }, out=False)\n\n if slug == 'refresh':\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['since_id'] = str(timeline[0]['id'])\n count = len(timeline)\n else:\n tweets=''\n next_params['since_id'] = str(params['since_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count\n }))\n else:\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['max_id'] = str(timeline[-1]['id']-1)\n count = len(timeline)\n else:\n tweets=''\n next_params['max_id'] = str(params['max_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n 'href':'/t/retweets/%s?%s' % (which, urlencode(next_params))\n }))\n\n\n\nclass RetweetedBy(BaseHandler):\n def get(self, tweet_id):\n\n params = self.params([\n 'count',\n 'page',\n 'trim_user',\n 'include_entities'\n ], include_entities='0')\n #default count number is 20.\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n users = td.statuses_retweeted_by(id=tweet_id, **params)\n\n retweeted_by = self.render('ajax/retweeted-by.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'users':users,\n },out=False)\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'retweeted_by':retweeted_by,\n }))\n\n\nclass UserTimeline(BaseHandler):\n def get(self, screen_name, slug):\n\n params = self.params([\n 'user_id',\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities',\n ],include_rts='true')\n\n token = md.get_default_access_token()\n #if not token:\n # token = md.get_proxy_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n owner_user = td.users_show_by_screen_name( screen_name=screen_name, **params)\n token_user = td.users_show_by_id(user_id = token.user_id)\n timeline = td.user_timeline(screen_name=screen_name, **params)\n tweets = self.render('ajax/user.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n },out=False)\n\n if slug == 'refresh':\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['since_id'] = str(timeline[0]['id'])\n count = len(timeline)\n else:\n tweets=''\n next_params['since_id'] = str(params['since_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n }))\n else:\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['max_id'] = str(timeline[-1]['id']-1)\n count = len(timeline)\n else:\n tweets=''\n next_params['max_id'] = str(params['max_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n 'href':'/t/%s?%s' % (screen_name, urlencode(next_params))\n }))\n\n\nclass Favorite(BaseHandler):\n def post(self, status_id, slug):\n params = self.params(['include_entities'])\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n \n td = Twitdao(token)\n tweet=None\n if slug=='create':\n tweet = td.favorites_create(id=status_id, **params)\n elif slug=='delete':\n tweet = td.favorites_destroy(id=status_id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n \n self.write(json.dumps({\n 'tweet':tweet if 'error' not in tweet else None,\n 'success':'error' not in tweet,\n 'info':tweet['error'] if 'error' in tweet else 'OK',\n }))\n\nclass Retweet(BaseHandler):\n def post(self, id):\n params = self.params(['trim_user','include_entities'])\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n \n td = Twitdao(token)\n tweet = td.statuses_retweet(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.write(json.dumps({\n 'tweet':tweet if 'error' not in tweet else None,\n 'success':'error' not in tweet,\n 'info':tweet['error'] if 'error' in tweet else 'OK',\n }))\n\n\nclass DeleteStatus(BaseHandler):\n def post(self, id):\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n \n td = Twitdao(token)\n tweet = td.statuses_destroy(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.write(json.dumps({\n 'tweet':tweet if 'error' not in tweet else None,\n 'success':'error' not in tweet,\n 'info':tweet['error'] if 'error' in tweet else 'OK',\n }))\n\n#TODO\n#lists, \n\nclass Follow(BaseHandler):\n def post(self,screen_name, slug):\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n fuser=None\n if 'make' == slug:\n fuser = td.friendships_create(screen_name = screen_name)\n else:\n fuser = td.friendships_destroy(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n\n if 'error' in fuser:\n self.write(json.dumps({\n 'success':False,\n 'info':fuser['error'],\n }))\n else:\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'user':fuser,\n }))\n\n\nclass Block(BaseHandler):\n def post(self, screen_name, slug):\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n buser=None\n if 'add' == slug:\n buser = td.blocks_create(screen_name = screen_name)\n else:\n buser = td.blocks_destroy(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n\n if 'error' in buser:\n self.write(json.dumps({\n 'success':False,\n 'info':buser['error'],\n }))\n else:\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'user':buser,\n }))\n\n\nclass ReportSpam(BaseHandler):\n def post(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n\n td = Twitdao(token)\n ruser = td.report_spam(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n\n if 'error' in ruser:\n self.write(json.dumps({\n 'success':False,\n 'info':ruser['error'],\n }))\n else:\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'user':ruser,\n }))\n\n\nclass Blocking(BaseHandler):\n def get(self):\n pass\n\nclass SavedSearch(BaseHandler):\n def get(self):\n pass\n\n\nclass MessageSend(BaseHandler):\n def post(self):\n screen_name = self.param('screen_name')\n user_id = self.param('user_id')\n text = self.param('text')\n\n params = self.params(['include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n \n td = Twitdao(token)\n message = td.direct_messages_new(user_id=user_id, screen_name=screen_name, text=text.encode('utf-8'), **params)\n\n if 'error' in message:\n self.write(json.dumps({\n 'success':False,\n 'info':message['error'],\n }))\n else:\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'message':message,\n }))\n\nclass MessageDestroy(BaseHandler):\n def post(self, id):\n params = self.params(['include_entities'])\n \n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'No access token avaliable.',\n }))\n return\n \n td = Twitdao(token)\n message = td.direct_messages_destroy(id=id, **params)\n\n if 'error' in message:\n self.write(json.dumps({\n 'success':False,\n 'info':message['error'],\n }))\n else:\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'message':message,\n }))\n\n\nclass ListTimeline(BaseHandler):\n def get(self, screen_name, slug, xlug):\n \n params = self.params(['since_id','max_id','per_page','page','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n #ls = td.user_list_id_get(id=slug, screen_name=screen_name)\n timeline = td.user_list_id_statuses(id=slug, screen_name = screen_name, **params)\n\n tweets=self.render('ajax/list.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n #'list':ls,\n 'timeline':timeline,\n },out=False)\n\n if xlug == 'refresh':\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['since_id'] = str(timeline[0]['id'])\n count = len(timeline)\n else:\n tweets=''\n next_params['since_id'] = str(params['since_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n }))\n else:\n next_params={}\n count=0\n if type(timeline) == list and len(timeline):\n next_params['max_id'] = str(timeline[-1]['id']-1)\n count = len(timeline)\n else:\n tweets=''\n next_params['max_id'] = str(params['max_id'])\n count = 0\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n 'href':'/t/%s/%s?%s'% (screen_name, slug, urlencode(next_params))\n }))\n\n\nclass HackedSearch(BaseHandler):\n def get(self, slug):\n q = self.param('q')\n since_id=self.param('since_id')\n page=self.param('page')\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'Token error.'\n }))\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n searchd=td.hacked_search(q.encode('utf-8'), since_id, page)\n timeline=searchd['statuses']\n\n count=0\n next_params={'q':q}\n if slug=='refresh':\n if type(timeline) == list and len(timeline):\n next_params['since_id'] = str(timeline[0]['id'])\n else:\n next_params['since_id'] = str(since_id)\n elif slug=='more':\n next_params['page'] = searchd['next_page']\n count = len(timeline)\n\n tweets=self.render('ajax/hacked_search.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n },out=False)\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'tweets':tweets,\n 'params':next_params,\n 'count':count,\n 'href': '/a/search?%s' % urlencode({'page':searchd['next_page'], 'q':q.encode('utf-8')})\n }))\n\n\nclass HackedFollowingFollowersOf(BaseHandler):\n def get(self):\n user_id = self.param('user_id')\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'Token error.'\n }))\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n res=td.hacked_following_followers_of(user_id)\n tweets=self.render('ajax/following_followers_of.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'res':res,\n },out=False)\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'html':tweets,\n }))\n\n\nclass HackedFollowsInCommonWith(BaseHandler):\n def get(self):\n user_id = self.param('user_id')\n\n token = md.get_default_access_token()\n if not token:\n self.write(json.dumps({\n 'success':False,\n 'info':'Token error.'\n }))\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n res=td.hacked_follows_in_common_with(user_id)\n tweets=self.render('ajax/follows_in_common_with.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'res':res,\n },out=False)\n\n self.write(json.dumps({\n 'success':True,\n 'info':'OK',\n 'html':tweets,\n }))\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/x/update', UpdateStatus),\n ('/x/delete/([0-9]+)', DeleteStatus),\n ('/x/show/([0-9]+)', ShowStatus),\n\n ('/x/home/(refresh|more)', HomeTimeline),\n\n ('/x/mentions/(refresh|more)', Mentions),\n\n ('/x/retweets/(retweeted_by_me|retweeted_to_me|retweeted_of_me)/(refresh|more)', Retweets),\n ('/x/retweet/([0-9]+)', Retweet),\n ('/x/retweeted_by/([0-9]+)', RetweetedBy),\n \n ('/x/user/([0-9a-zA-Z_]+)/(refresh|more)', UserTimeline),\n\n ('/x/list/([0-9a-zA-Z_]+)/([0-9a-zA-Z\\-%]+)/(refresh|more)', ListTimeline),\n\n ('/x/message_send', MessageSend),\n ('/x/message_destroy/([0-9]+)', MessageDestroy),\n\n ('/x/favorite/([0-9]+)/(create|delete)', Favorite),\n \n ('/x/friends/([0-9a-zA-Z_]+)/(make|break)', Follow),\n\n ('/x/block/([0-9a-zA-Z_]+)/(add|remove)', Block),\n\n ('/x/report/([0-9a-zA-Z_]+)', ReportSpam),\n ('/x/upload_image', UploadImage),\n\n ('/x/search/(refresh|more)', HackedSearch),\n ('/x/following_followers_of', HackedFollowingFollowersOf),\n ('/x/follows_in_common_with', HackedFollowsInCommonWith),\n\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5967479944229126, "alphanum_fraction": 0.5993496179580688, "avg_line_length": 33.53932571411133, "blob_id": "e1ca9e23d9b4719eca3613297163b86e441bed31", "content_id": "b0f0b9ea873b50607ffcb349c5c9f4b1b23f6195", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3075, "license_type": "no_license", "max_line_length": 80, "num_lines": 89, "path": "/base.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.dist import use_library\nuse_library('django','1.2')\n\nfrom django.conf import settings\nsettings.configure(INSTALLED_APPS=('zombie',))\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import users\n\nfrom Cookie import SimpleCookie\nimport os\n\ntemplate.register_template_library('templatetags.string')\ntemplate.register_template_library('templatetags.fix')\ntemplate.register_template_library('templatetags.entities')\ntemplate.register_template_library('templatetags.tags')\n\nclass BaseHandler(webapp.RequestHandler):\n\n def initialize(self, request, response):\n webapp.RequestHandler.initialize(self, request, response)\n self.current = os.environ['PATH_INFO']\n self.logout_url = users.create_logout_url(\"/\")\n self.template_vals = {\n 'self':self\n }\n\n def render(self,tempalte_name, template_values={}, out=True):\n self.template_vals.update(template_values)\n directory = os.path.dirname(__file__)\n path = os.path.join(directory, os.path.join('templates', tempalte_name))\n result = template.render(path, self.template_vals)\n if out:\n self.response.out.write(result)\n return result\n\n def param(self, name, **kw):\n return self.request.get(name, **kw)\n\n def write(self, c):\n return self.response.out.write(c)\n \n def params(self, param_list, **default_vals):\n params={}\n for i in param_list:\n param=self.request.get(i)\n if param:\n params[i] = param\n elif i in default_vals:\n params[i]=default_vals[i]\n elif i=='include_entities': #temp\n params[i]='t'\n return params\n\n def jedirect(self, uri, time=5000, text=\"Redirecting...\"):\n self.write('''<script type=\"text/javascript\">\n setTimeout(function(){window.location=\"%s\"},%s)\n </script>''' % (uri, time))\n self.write('%s' % text)\n\n def set_cookie(self, key, value='', max_age=None,\n path='/', domain=None, secure=None, httponly=False,\n version=None, comment=None):\n cookies = SimpleCookie()\n cookies[key] = value\n for var_name, var_value in [\n ('max-age', max_age),\n ('path', path),\n ('domain', domain),\n ('secure', secure),\n ('HttpOnly', httponly),\n ('version', version),\n ('comment', comment),\n ]:\n if var_value is not None and var_value is not False:\n cookies[key][var_name] = str(var_value)\n header_value = cookies[key].output(header='').lstrip()\n self.response.headers.add_header('Set-Cookie', header_value)\n \n def get_cookie(self, key, default=None):\n if key in self.request.cookies:\n return self.request.cookies[key]\n else:\n return default\n\n def delete_cookie(self, key):\n self.set_cookie(key, '', max_age=0)\n\n" }, { "alpha_fraction": 0.5421656966209412, "alphanum_fraction": 0.5450809597969055, "avg_line_length": 33.13788986206055, "blob_id": "a63db51ad07fef75a3250367d5595cdb7a0ac2e7", "content_id": "c5082faaf3aedddc8d1392b596d4670fc01e3a6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28471, "license_type": "no_license", "max_line_length": 140, "num_lines": 834, "path": "/mobile.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import users\nfrom google.appengine.api import taskqueue\n\nfrom base import BaseHandler\nfrom twitdao import Twitdao\n\nimport md\nimport utils\nimport twitpic2\n\nimport logging\n\n\nclass Home(BaseHandler):\n def get(self):\n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities'\n ])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n timeline = td.home_timeline(**params)\n if 'error' in timeline:\n timeline=[]\n self.render('mobile/home.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'where':'home'\n })\n\nclass Mentions(BaseHandler):\n def get(self):\n params=self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities'\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return \n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n timeline = td.mentions(**params)\n if 'error' in timeline:\n timeline=[]\n self.render('mobile/mentions.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'timeline':timeline,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'where':'mentions'\n })\n\nclass Favorites(BaseHandler):\n def get(self, screen_name):\n params = self.params(['page', 'include_entities'])\n page = self.param('page')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n if not screen_name:\n screen_name=token.screen_name\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n favorites = td.favorites(id=screen_name, **params)\n prev_page, next_page = None, 2\n if page:\n try:\n page = int(page)\n prev_page = page-1 if page-1>0 else None\n next_page = page+1\n except:\n pass\n\n self.render('mobile/favorites.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'favorites':favorites,\n 'prev_page':prev_page,\n 'next_page':next_page,\n 'where':'favorites',\n })\n\nclass Followers(BaseHandler):\n def get(self, screen_name):\n params = self.params([\n 'user_id',\n 'cursor',\n 'include_entities',\n 'count'\n ], cursor=-1, count=50)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n if not screen_name:\n screen_name=token.screen_name\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n followers = td.statuses_followers(screen_name=screen_name, **params)\n\n self.render('mobile/followers.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'error': followers['error'] if 'error' in followers else False,\n 'followers':followers if 'error' in followers else followers['users'],\n 'next_cursor':None if 'error' in followers else followers['next_cursor'],\n 'next_cursor_str':None if 'error' in followers else followers['next_cursor_str'],\n 'previous_cursor':None if 'error' in followers else followers['previous_cursor'],\n 'previous_cursor_str':None if 'error' in followers else followers['previous_cursor_str'],\n 'where':'followers',\n })\n\nclass Following(BaseHandler):\n def get(self, screen_name):\n params = self.params([\n 'user_id',\n 'cursor',\n 'include_entities',\n 'count'\n ], cursor=-1, count=50)\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n if not screen_name:\n screen_name=token.screen_name\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = td.users_show_by_screen_name(screen_name = screen_name)\n following = td.statuses_friends(screen_name=screen_name, **params)\n\n self.render('mobile/following.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'error': following['error'] if 'error' in following else False,\n 'following':following if 'error' in following else following['users'],\n 'next_cursor':None if 'error' in following else following['next_cursor'],\n 'next_cursor_str':None if 'error' in following else following['next_cursor_str'],\n 'previous_cursor':None if 'error' in following else following['previous_cursor'],\n 'previous_cursor_str':None if 'error' in following else following['previous_cursor_str'],\n 'where':'following',\n })\n\nclass Messages(BaseHandler):\n def get(self, mbox):\n params = self.params([\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'include_entities',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n direct_messages = []\n if mbox=='inbox':\n direct_messages = td.direct_messages(**params)\n elif mbox=='sent':\n direct_messages = td.direct_messages_sent(**params)\n else:\n self.error(404)\n return\n\n self.render('mobile/messages.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'max_id':str(direct_messages[-1]['id']-1) if type(direct_messages)==list and len(direct_messages)>0 else None,\n 'since_id':direct_messages[0]['id_str'] if type(direct_messages)==list and len(direct_messages)>0 else None,\n 'messages':direct_messages,\n 'where': 'messages',\n 'at': mbox,\n })\n\nclass SendMessage(BaseHandler):\n def get(self):\n screen_name = self.param('screen_name')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n\n self.render('mobile/message-send.html',{\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'screen_name':screen_name,\n })\n\n def post(self):\n screen_name = self.param('screen_name')\n user_id = self.param('user_id')\n text = self.param('text')\n\n params = self.params(['include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n message = td.direct_messages_new(user_id=user_id, screen_name=screen_name, text=text.encode('utf-8'), **params)\n\n self.redirect('/m/m-sent')\n\nclass DeleteMessage(BaseHandler):\n def get(self):\n id=self.param('id')\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n \n #No show single message api.\n message = None \n\n self.render('mobile/message-del.html',{\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'message':message,\n 'id':id\n })\n\n def post(self):\n params = self.params(['include_entities'])\n id = self.param('id')\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n message = td.direct_messages_destroy(id=id, **params)\n self.redirect('/m/m-inbox')\n\nclass User(BaseHandler):\n def get(self, screen_name):\n params = self.params([\n 'user_id',\n 'since_id',\n 'max_id',\n 'count',\n 'page',\n 'trim_user',\n 'include_rts',\n 'include_entities',\n ],include_rts='true')\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n owner_user = td.users_show_by_screen_name( screen_name=screen_name )\n token_user = td.users_show_by_id(user_id = token.user_id)\n friendship = td.friendships_show(source_id=token.user_id, target_screen_name=screen_name)\n timeline = td.user_timeline(screen_name=screen_name, **params)\n self.render('mobile/user.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,\n 'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,\n 'timeline':timeline,\n 'friendship':friendship,\n 'where':'user',\n })\n\nclass ActionFollow(BaseHandler):\n def get(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n follow_user = td.users_show_by_screen_name(screen_name = screen_name)\n\n self.render('mobile/follow.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':follow_user,\n })\n\n def post(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n follow_user = td.friendships_create(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/m/u-%s' % screen_name)\n\nclass ActionUnfollow(BaseHandler):\n def get(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n \n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n follow_user = td.users_show_by_screen_name(screen_name = screen_name)\n\n self.render('mobile/unfollow.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':follow_user,\n })\n\n def post(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n follow_user = td.friendships_destroy(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/m/u-%s' % screen_name)\n\nclass ActionBlock(BaseHandler):\n def get(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n block_user = td.users_show_by_screen_name(screen_name = screen_name)\n self.render('mobile/block.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':block_user,\n })\n\n def post(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n block_user = td.blocks_create(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/m/u-%s' % screen_name)\n\nclass ActionUnblock(BaseHandler):\n def get(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n block_user = td.users_show_by_screen_name(screen_name = screen_name)\n self.render('mobile/unblock.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'user':block_user,\n })\n\n def post(self, screen_name):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n td = Twitdao(token)\n follow_user = td.blocks_destroy(screen_name = screen_name)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method=\"GET\" )\n self.redirect('/m/u-%s' % screen_name)\n\nclass ActionDelete(BaseHandler):\n def get(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n id=utils.tweet_id_decode(tweet_id)\n\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('mobile/tweet-del.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n \n def post(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n id=utils.tweet_id_decode(tweet_id)\n\n td = Twitdao(token)\n tweet = td.statuses_destroy(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.redirect('/m/u-/home')\n\nclass ActionTweet(BaseHandler):\n def get(self):\n screen_name = self.param('screen_name')\n tweet_id = self.param('tweet_id')\n\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n tweet_id = utils.tweet_id_decode(tweet_id)\n\n if screen_name:\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n self.render('mobile/reply.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'screen_name':screen_name,\n })\n else:\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=tweet_id,**params)\n self.render('mobile/reply.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self):\n status = self.param('status')\n\n params = self.params([\n 'in_reply_to_status_id',\n 'lat',\n 'long',\n 'place_id',\n 'display_coordinates',\n 'trim_user',\n 'include_entities',\n ])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n td = Twitdao(token)\n td.statuses_update(status=status.encode('utf-8'), **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.redirect('/m/u-/home')\n\nclass ShowTweet(BaseHandler):\n def get(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n \n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n tweet_id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=tweet_id,**params)\n\n self.render('mobile/tweet-show.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\nclass ActionQuote(BaseHandler):\n def get(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('mobile/quote.html', {\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\nclass ActionRetweet(BaseHandler):\n def get(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('mobile/retweet.html', {\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n tweet = td.statuses_retweet(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n\n self.redirect('/m/u-/home')\n\nclass ActionUndoRetweet(BaseHandler):\n def get(self, tweet_id):\n pass\n\nclass ActionFavorite(BaseHandler):\n def get(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('mobile/favorite.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self, tweet_id):\n params = self.params(['include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n tweet = td.favorites_create(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.redirect('/m/u-%s/favs' % token.screen_name)\n\nclass ActionUnfavorite(BaseHandler):\n def get(self, tweet_id):\n params = self.params(['trim_user','include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n tweet = td.statuses_show(id=id, **params)\n\n self.render('mobile/unfavorite.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n 'tweet':tweet,\n })\n\n def post(self, tweet_id):\n params = self.params(['include_entities'])\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n id = utils.tweet_id_decode(tweet_id)\n td = Twitdao(token)\n tweet = td.favorites_destroy(id=id, **params)\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n self.redirect('/m/u-%s/favs' % token.screen_name)\n\n\nclass Settings(BaseHandler):\n def get(self, section):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n cursor=self.param('cursor', default_value=None)\n tokens, cursor = md.get_user_access_tokens(users.get_current_user(), 10, cursor)\n td=Twitdao(token)\n token_user=td.users_show_by_id(user_id=token.user_id)\n\n self.render('mobile/settings.html', {\n 'token':token,\n 'tokens':tokens,\n 'token_user':token_user,\n 'owner_user':token_user,\n 'where':'settings'\n })\n\n def post(self, section):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n if section=='token':\n token_key = self.param('tk')\n token = md.get_access_token(token_key, users.get_current_user())\n md.set_default_access_token(token)\n elif section=='media':\n show_avatar=self.param('show_avatar')\n show_media=self.param('show_media')\n settings={}\n settings['m_show_avatar']=True if show_avatar=='t' else False\n settings['m_show_media']=True if show_media=='t' else False\n md.set_token_settings(token.key(), users.get_current_user(), **settings)\n elif section=='opti':\n opti=self.param('opti')\n settings={}\n settings['m_optimizer']=opti if opti!='none' or opti=='' else None\n md.set_token_settings(token.key(), users.get_current_user(), **settings)\n\n self.redirect('/m/s-')\n\n\nclass UploadPhoto(BaseHandler):\n def get(self):\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n td = Twitdao(token)\n token_user = td.users_show_by_id(user_id = token.user_id)\n owner_user = token_user\n self.render('mobile/upload.html', {\n 'token':token,\n 'token_user':token_user,\n 'owner_user':owner_user,\n })\n\n def post(self):\n media = self.param('media')\n status = self.param('status')\n\n token = md.get_default_access_token()\n if not token:\n self.redirect('/settings')\n return\n\n app_config = md.get_app_config()\n td = Twitdao(token)\n twitpic = twitpic2.TwitPic2(\n consumer_key = app_config.consumer_key,\n consumer_secret = app_config.consumer_secret,\n access_token = 'oauth_token=%s&oauth_token_secret=%s' % (token.oauth_token, token.oauth_token_secret),\n service_key = app_config.twitpic_api_key,\n )\n\n try:\n if media:\n filename=self.request.POST[u'media'].filename.encode('utf-8')\n resp=twitpic.api_call('POST', 'upload', {'message':status.encode('utf-8')}, files=[('media', filename, media)])\n full_status=status+\" \"+resp['url']\n tweet_status = full_status\n if len(full_status)-140>0:\n tweet_status = status[:140-len(resp['url'])-4]+\"... \"+resp['url']\n td.statuses_update(status=tweet_status.encode('utf-8'))\n taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method=\"GET\" )\n except Exception, e:\n logging.debug(e)\n except:\n raise\n self.redirect('/m/u-/home')\n\n\nclass UserAgentTest(BaseHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/plain'\n self.write(self.request.headers['user-agent'])\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/m(?:|/|/u-/home)', Home),\n ('/m/u-/at', Mentions),\n ('/m/u-([0-9a-zA-Z_]*)/favs', Favorites),\n ('/m/u-([0-9a-zA-Z_]*)/foers', Followers),\n ('/m/u-([0-9a-zA-Z_]*)/foing', Following),\n\n ('/m/m-(inbox|sent)', Messages),\n ('/m/m-send', SendMessage),\n ('/m/m-del', DeleteMessage),\n\n ('/m/u-([0-9a-zA-Z_]+)', User),\n ('/m/u-([0-9a-zA-Z_]+)/fo', ActionFollow),\n ('/m/u-([0-9a-zA-Z_]+)/ufo', ActionUnfollow),\n ('/m/u-([0-9a-zA-Z_]+)/b', ActionBlock),\n ('/m/u-([0-9a-zA-Z_]+)/ub', ActionUnblock),\n\n ('/m/t-', ActionTweet),\n ('/m/t-([0-9a-zA-Z_\\-\\.]+)', ShowTweet),\n ('/m/t-([0-9a-zA-Z_\\-\\.]+)/qt', ActionQuote),\n ('/m/t-([0-9a-zA-Z_\\-\\.]+)/del', ActionDelete),\n ('/m/t-([0-9a-zA-Z_\\-\\.]+)/rt', ActionRetweet),\n ('/m/t-([0-9a-zA-Z_\\-\\.]+)/urt', ActionUndoRetweet),\n ('/m/t-([0-9a-zA-Z_\\-\\.]+)/fav', ActionFavorite),\n ('/m/t-([0-9a-zA-Z_\\-\\.]+)/ufav', ActionUnfavorite),\n\n ('/m/s-(token|media|opti|)', Settings),\n ('/m/p-', UploadPhoto),\n ('/m/uat-', UserAgentTest),\n\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5773714780807495, "alphanum_fraction": 0.5786963701248169, "avg_line_length": 30.450000762939453, "blob_id": "beb6a3649d70b56d2af0c68f1e93d5ffcca91e01", "content_id": "4fa6b48e2ba3ffbc38ae9702f77b66a130866f38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3774, "license_type": "no_license", "max_line_length": 166, "num_lines": 120, "path": "/config.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\nfrom google.appengine.api import taskqueue\n\nfrom base import BaseHandler\nimport md\n\nimport os\nimport logging\n\n\nclass AppConfig(BaseHandler):\n def get(self):\n app_config = None\n if users.is_current_user_admin():\n app_config = md.get_app_config()\n self.render('app-config.html', {\n 'app_config':app_config,\n 'where':'twitdao-config'\n })\n\n def post(self):\n params=self.params([\n 'consumer_key',\n 'consumer_secret',\n 'request_token_url',\n 'access_token_url',\n 'authorize_url',\n 'authenticate_url',\n 'api_url',\n 'search_api_url',\n 'twitpic_api_key',\n ])\n md.set_app_config(**params)\n self.redirect('/config')\n\n\nclass ImageProxyConfig(BaseHandler):\n def get(self):\n image_proxy_config = None\n if users.is_current_user_admin():\n image_proxy_config = md.get_image_proxy_config()\n self.render('image-proxy-config.html', {\n 'image_proxy_config':image_proxy_config,\n 'where':'image_proxy-config'\n })\n\n def post(self):\n params=self.params([\n 'flickr_api_key',\n 'flickr_api_secret',\n 'flickr_rest_api_url',\n ])\n md.set_image_proxy_config(**params)\n self.redirect('/config/image_proxy')\n\n\nclass Memcache(BaseHandler):\n def get(self):\n stats = memcache.get_stats()\n self.render('memcache-config.html',{\n 'stats':stats,\n 'success':self.params('success'),\n 'where':'memcache-config'\n })\n\n def post(self):\n success = memcache.flush_all()\n self.redirect('/config/memcache?success=%s' % success)\n\n\nclass CleanUpAccesses(BaseHandler):\n def get(self):\n self.render('clean-up-accesses.html',{'where':'clean-up-accesses'})\n\n def post(self):\n\n self.response.headers['Content-Type'] = 'text/plain'\n\n cursor = self.param('cursor', default_value=None)\n\n manual = not ( 'X-AppEngine-QueueName' in self.request.headers or 'X-AppEngine-Cron' in self.request.headers )\n\n tokens, next_cursor = md.get_access_tokens(size=50, cursor=cursor)\n for token in tokens:\n taskqueue.add(queue_name='clean-up-accesses', url='/q/verify_access', params={'tk':token.key()}, method='GET')\n logging.debug('Add token: %s' % token)\n if manual:\n self.write('Add token: %s\\n' % token)\n\n if next_cursor:\n taskqueue.add(queue_name='clean-up-accesses', url='/config/clean_up_accesses', params={'cursor':next_cursor}, method='POST')\n logging.debug('More cursor: %s' % next_cursor)\n if manual:\n self.write('\\nMore cursor: %s\\n' % next_cursor)\n self.write('\\nThe program is still working, and will run for some time.\\n')\n self.write('Go: [https://appengine.google.com/queuedetails?&app_id=%s&queue_name=clean-up-accesses] to watch details.' % os.environ['APPLICATION_ID'])\n self.write('\\n'*20)\n else:\n logging.debug('No more accesses.')\n if manual:\n self.write('\\nThe End.\\n')\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/config', AppConfig),\n ('/config/image_proxy', ImageProxyConfig),\n ('/config/memcache', Memcache),\n ('/config/clean_up_accesses', CleanUpAccesses),\n\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6814720630645752, "alphanum_fraction": 0.6890863180160522, "avg_line_length": 27.178571701049805, "blob_id": "d8e4fbb063bce20e4d7a57d86afc9ff94ab564ab", "content_id": "392ce3fba53a035c4454ae7719d6e4a85d6964e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/templatetags/fix.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom django.template.defaultfilters import stringfilter\n\nimport re\n\nregister = webapp.template.create_template_register()\n\[email protected]\n@stringfilter\ndef secure_image(image_url):\n ''' *.twimg.com to https://*.amazonaws.com '''\n \n #comment this line when need https.\n return image_url\n\n m=re.search(r'a([0-9]+)\\..+(/profile_images/.+)', image_url, re.I)\n if m:\n return 'https://s3.amazonaws.com/twitter_production%s' % m.group(2)\n return image_url\nsecure_image.is_safe=True\n\n_origin_image_re=re.compile('_(normal|mini|bigger)\\.(png|gif|jpg|jpeg)$', re.I)\[email protected]\n@stringfilter\ndef origin_image(image_url):\n return _origin_image_re.sub('.\\g<2>', image_url)\norigin_image.is_safe=True" }, { "alpha_fraction": 0.5785440802574158, "alphanum_fraction": 0.6091954112052917, "avg_line_length": 27.94444465637207, "blob_id": "18c365c14208d9861e417b01c3f071dc87c550ea", "content_id": "f164f127e6310f95e9cfb27508b2c7a8b1be6c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 82, "num_lines": 18, "path": "/utils.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "\n_urlsafe_chars='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.'\n_urlsafe_chars_num=len(_urlsafe_chars)\n\ndef tweet_id_encode(n):\n tl, n = [], long(n)\n while(n>0):\n m, n = n%_urlsafe_chars_num, n//_urlsafe_chars_num\n tl.insert(0,_urlsafe_chars[int(m)])\n return ''.join(tl)\n\ndef tweet_id_decode(t):\n t=str(t)\n n,i=0,len(t)-1\n for c in t:\n if c not in _urlsafe_chars: return 0\n n+=(_urlsafe_chars.index(c)*pow(_urlsafe_chars_num, i))\n i-=1\n return n\n" }, { "alpha_fraction": 0.6204379796981812, "alphanum_fraction": 0.6213017702102661, "avg_line_length": 41.794822692871094, "blob_id": "c1acc805639458cea008f728d11985f2ce816b70", "content_id": "dd22be398fa081a7db0591adc52e9b87ae0c68dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23173, "license_type": "no_license", "max_line_length": 150, "num_lines": 541, "path": "/twitdao.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.api import memcache\n\nfrom twitter import Twitter\nimport md\n\nUSER_CACHE_TIME = 10*60\nTWEET_CACHE_TIME = 60*60\n\nclass Twitdao():\n def __init__(self, token=None):\n self.token = token\n\n config = md.get_app_config()\n \n if token:\n self.twitter = Twitter(\n oauth_token=self.token.oauth_token,\n oauth_token_secret=self.token.oauth_token_secret,\n \n consumer_key=config.consumer_key,\n consumer_secret=config.consumer_secret,\n request_token_url=config.request_token_url,\n access_token_url=config.access_token_url,\n authorize_url=config.authorize_url,\n authenticate_url=config.authenticate_url,\n api_url=config.api_url,\n search_api_url=config.search_api_url\n )\n else:\n self.twitter = Twitter( \n consumer_key=config.consumer_key,\n consumer_secret=config.consumer_secret,\n request_token_url=config.request_token_url,\n access_token_url=config.access_token_url,\n authorize_url=config.authorize_url,\n authenticate_url=config.authenticate_url,\n api_url=config.api_url,\n search_api_url=config.search_api_url\n )\n\n def fetch_request_token(self, callback=None):\n return self.twitter.fetch_request_token(callback)\n\n def fetch_access_token(self, verifier):\n access_token = self.twitter.fetch_access_token(verifier)\n return access_token\n\n def get_authenticate_url(self, request_token, force_login=False):\n return self.twitter.get_authenticate_url(request_token, force_login)\n\n def get_authorize_url(self, request_token, force_login=False):\n return self.twitter.get_authorize_url(request_token, force_login)\n\n #==========================================================================\n def _cache_timeline(self, timeline, **params):\n if not 'errors' in timeline:\n trim_user=params['trim_user'] if 'trim_user' in params else None\n include_entities=params['include_entities'] if 'include_entities' in params else None\n td=dict(('%s-%s-%s' % (tweet['id_str'], trim_user, include_entities), tweet) for tweet in timeline)\n return memcache.set_multi(td, time=TWEET_CACHE_TIME, key_prefix=\"tweet-\")\n return False\n \n def _cache_tweet(self, tweet, **params):\n if not 'errors' in tweet:\n trim_user=params['trim_user'] if 'trim_user' in params else None\n include_entities=params['include_entities'] if 'include_entities' in params else None\n return memcache.set( 'tweet-%s-%s-%s' % (tweet['id_str'], trim_user, include_entities), tweet, time=TWEET_CACHE_TIME,)\n return False\n\n def _get_cached_tweet(self, id, **params):\n trim_user=params['trim_user'] if 'trim_user' in params else None\n include_entities=params['include_entities'] if 'include_entities' in params else None\n return memcache.get( 'tweet-%s-%s-%s' % (id, trim_user, include_entities) )\n \n def _del_cached_tweet(self, id, **params):\n trim_user=params['trim_user'] if 'trim_user' in params else None\n include_entities=params['include_entities'] if 'include_entities' in params else None\n return memcache.delete( 'tweet-%s-%s-%s' % (id, trim_user, include_entities) )\n\n #好像不好。\n def _cache_users(self, users, **params):\n if not 'errors' in users:\n include_entities = params['include_entities'] if 'include_entities' in params else None\n us=dict(('%s-%s' % (user['id_str'], include_entities), user) for user in users)\n us.update(dict(('%s-%s' % (user['screen_name'], include_entities), user) for user in users))\n return memcache.set_multi(us, key_prefix=\"user-\", time=USER_CACHE_TIME)\n return False\n\n def _cache_user(self, user, **params):\n if not 'errors' in user:\n include_entities = params['include_entities'] if 'include_entities' in params else None\n return memcache.set_multi({\n ('id-%s-%s' % (user['id_str'], include_entities)):user,\n ('screen_name-%s-%s' % (user['screen_name'], include_entities)):user\n }, key_prefix=\"user-\", time=USER_CACHE_TIME)\n return False\n\n def _get_cached_user_by_id(self, id, **params):\n include_entities = params['include_entities'] if 'include_entities' in params else None\n return memcache.get('user-id-%s-%s' % (id, include_entities))\n\n def _get_cached_user_by_screen_name(self, screen_name, **params):\n include_entities = params['include_entities'] if 'include_entities' in params else None\n return memcache.get('user-screen_name-%s-%s' % (screen_name, include_entities))\n\n #删不全啊。\n def _del_cached_user_by_id(self, id, **params):\n include_entities = params['include_entities'] if 'include_entities' in params else None\n return memcache.delete('user-id-%s-%s' % (id, include_entities))\n\n def _del_cached_user_by_screen_name(self, screen_name, **params):\n include_entities = params['include_entities'] if 'include_entities' in params else None\n return memcache.delete('user-screen_name-%s-%s' % (screen_name, include_entities))\n\n def public_timeline(self, **params):\n #trim_user, include_entities\n timeline = self.twitter.api_call('GET','statuses/sample', params)\n return timeline\n\n def home_timeline(self, **params):\n #since_id, max_id, count, page, trim_user, include_rts, include_entities\n timeline = self.twitter.api_call('GET','statuses/home_timeline', params)\n self._cache_timeline(timeline, **params)\n return timeline\n\n def friends_timeline(self, **params):\n #since_id, max_id, count, page, trim_user, include_rts, include_entities\n timeline = self.twitter.api_call('GET','statuses/friends_timeline', params)\n self._cache_timeline(timeline, **params)\n return timeline\n\n def user_timeline(self, **params):\n #user_id, screen_name, since_id, max_id, count, page, trim_user, include_rts, include_entities\n timeline = self.twitter.api_call('GET','statuses/user_timeline', params)\n self._cache_timeline(timeline, **params)\n return timeline\n\n def mentions(self, **params):\n #since_id, max_id, count, page, trim_user, include_rts, include_entities\n timeline = self.twitter.api_call('GET','statuses/mentions_timeline', params)\n self._cache_timeline(timeline, **params)\n return timeline\n\n def retweeted_by_me(self, **params):\n #since_id, max_id, count, page, trim_user, include_entities\n timeline = self.twitter.api_call('GET','statuses/retweeted_by_me', params)\n self._cache_timeline(timeline, **params)\n return timeline\n\n def retweeted_to_me(self, **params):\n #since_id, max_id, count, page, trim_user, include_entities\n timeline = self.twitter.api_call('GET','statuses/retweeted_to_me', params)\n self._cache_timeline(timeline, **params)\n return timeline\n\n def retweets_of_me(self, **params):\n #since_id, max_id, count, page, trim_user, include_entities\n timeline = self.twitter.api_call('GET','statuses/retweets_of_me', params)\n self._cache_timeline(timeline, **params)\n return timeline\n\n # Tweets Resources\n def statuses_show(self, id, **params):\n #trim_user, include_entities\n tweet = self._get_cached_tweet(id, **params)\n if not tweet:\n tweet = self.twitter.api_call('GET', 'statuses/show/%s' % id, params)\n self._cache_tweet(tweet, **params)\n return tweet\n\n def statuses_update(self, status, **params):\n #in_reply_to_status_id, lat, long, place_id, display_coordinates, trim_user, include_entities\n pms={'status':status}\n pms.update(params)\n tweet = self.twitter.api_call('POST', 'statuses/update', pms)\n return tweet\n\n def statuses_destroy(self, id, **params):\n #trim_user, include_entities\n tweet = self.twitter.api_call('POST', 'statuses/destroy/%s' % id, params)\n self._del_cached_tweet(id, **params)\n return tweet\n\n def statuses_retweet(self, id, **params):\n #trim_user, include_entities\n tweet = self.twitter.api_call('POST', 'statuses/retweet/%s' % id, params)\n return tweet\n\n def statuses_retweets(self, id, **params):\n #count, trim_user, include_entities\n tweets = self.twitter.api_call('GET', 'statuses/retweets/%s' % id, params)\n return tweets\n\n def statuses_retweeted_by(self, id, **params):\n #count, page, trim_user, include_entities\n users = self.twitter.api_call('GET', 'statuses/%s/retweeted_by' % id, params)\n return users\n\n def statuses_retweeted_by_ids(self, id, **params):\n #count, page, trim_user, include_entities\n ids = self.twitter.api_call('GET', 'statuses/%s/retweeted_by/ids' % id, params)\n return ids\n\n #User resources\n #users_show\n def users_show_by_id(self, user_id, **params):\n user=None\n _tdfr=False\n if '_twitdao_force_refresh' in params:\n _tdfr=params['_twitdao_force_refresh']\n del params['_twitdao_force_refresh']\n if not _tdfr:\n user=self._get_cached_user_by_id(user_id, **params)\n if not user:\n params.update({'user_id':user_id})\n user = self.twitter.api_call('GET', 'users/show', params)\n self._cache_user(user, **params)\n return user\n\n #users_show\n def users_show_by_screen_name(self, screen_name, **params):\n user=None\n _tdfr=False\n if '_twitdao_force_refresh' in params:\n _tdfr=params['_twitdao_force_refresh']\n del params['_twitdao_force_refresh']\n if not _tdfr:\n user=self._get_cached_user_by_screen_name(screen_name, **params)\n if not user:\n params.update({'screen_name':screen_name})\n user = self.twitter.api_call('GET', 'users/show', params)\n self._cache_user(user, **params)\n return user\n\n def users_lookup(self, user_id=None, screen_name=None, **params):\n #include_entities\n pms={}\n if user_id:\n pms = {'user_id':user_id}\n elif screen_name:\n pms ={'screen_name':screen_name}\n pms.update(params)\n users = self.twitter.api_call('POST', 'users/lookup', pms)\n return users\n\n def users_search(self, q, **params):\n #per_page, page, include_entities\n pms = {'q':q}\n pms.update(params)\n users = self.twitter.api_call('GET', 'users/search', pms)\n return users\n\n def users_suggestions(self):\n sugs = self.twitter.api_call('GET', 'users/suggestions')\n return sugs\n\n def users_suggestions_slug(self, slug):\n sugs = self.twitter.api_call('GET', 'users/suggestions/%s' % slug)\n return sugs\n\n def users_profile_image(self, screen_name, **params):\n #size\n url = self.twitter.api_call('GET', 'users/profile_image/%s' % screen_name, params)\n return url\n\n def statuses_friends(self, **params):\n #user_id, screen_name, cursor, include_entities\n friends = self.twitter.api_call('GET', 'friends/list', params)\n return friends\n\n def statuses_followers(self, **params):\n #user_id, screen_name, cursor, include_entities\n followers = self.twitter.api_call('GET', 'followers/list', params)\n return followers\n\n #List Resources\n def user_lists_post(self, name, **params):\n '''Creates a new list for the authenticated user. Accounts are limited to 20 lists.'''\n #mode, description\n pms = {'name':name}\n pms.update(params)\n ls = self.twitter.api_call('POST', '%s/lists' % self.token.screen_name, pms)\n return ls\n\n def user_lists_id_post(self, id, **params):\n '''Updates the specified list.\n #name, mode, description'''\n ls = self.twitter.api_call('POST', '%s/lists/%s' % (self.token.screen_name, id), params)\n return ls\n\n def user_lists_get(self, screen_name=None, **params):\n '''List the lists of the specified user. Private lists will be included if the authenticated users\n is the same as the user who's lists are being returned.'''\n #cursor\n if not screen_name:\n screen_name = self.token.screen_name\n lists = self.twitter.api_call('GET', '%s/lists' % screen_name, params)\n return lists\n\n def user_list_id_get(self, id, screen_name=None):\n '''Show the specified list. Private lists will only be shown if the authenticated user owns the specified list.'''\n if not screen_name:\n screen_name = self.token.screen_name\n ls = self.twitter.api_call('GET', '%s/lists/%s' % (screen_name, id) )\n return ls\n\n def user_list_id_delete(self, id):\n '''Deletes the specified list. Must be owned by the authenticated user.'''\n ls = self.twitter.api_call('POST', '%s/lists/%s' % (self.token.screen_name, id), {'_method':'DELETE'})\n return ls\n\n def user_list_id_statuses(self, id, screen_name, **params):\n '''Show tweet timeline for members of the specified list.'''\n #since_id, max_id, per_page, page, include_entities\n ls = self.twitter.api_call('GET', '%s/lists/%s/statuses' % (screen_name, id), params)\n return ls\n\n def user_list_memberships(self, screen_name, **params):\n '''List the lists the specified user has been added to.'''\n #cursor\n lists = self.twitter.api_call('GET', '%s/lists/memberships' % screen_name, params)\n return lists\n\n def user_list_subscriptions(self, screen_name, **params):\n '''List the lists the specified user follows.'''\n #cursor\n lists = self.twitter.api_call('GET', '%s/lists/subscriptions' % screen_name, params)\n return lists\n\n\n #List Subscribers Resources\n def user_list_id_subscribers_get(self, screen_name, list_id, **params):\n '''Returns the subscribers of the specified list.'''\n #cursor, include_entities\n users = self.twitter.api_call('GET', '%s/%s/subscribers' % (screen_name, list_id), params )\n return users\n\n def user_list_id_subscribers_post(self, screen_name, list_id):\n '''Make the authenticated user follow the specified list.'''\n return self.twitter.api_call('POST', '%s/%s/subscribers' % (screen_name, list_id) )\n\n def user_list_id_subscribers_delete(self, screen_name, list_id, **params):\n '''Unsubscribes the authenticated user form the specified list.'''\n params['_method'] = 'DELETE'\n return self.twitter.api_call('POST', '%s/%s/subscribers' % (screen_name, list_id), params )\n\n def user_list_id_subscribers_id(self, screen_name, list_id, id, **params):\n '''Check if a user is a subscriber of the specified list.'''\n #include_entities\n return self.twitter.api_call('POST', '%s/%s/subscribers/%s' % (screen_name, list_id, id), params )\n\n\n #List Members Resources\n def user_list_id_members_get(self, screen_name, list_id, **params):\n ''' Returns the members of the specified list. '''\n #cursor, include_entities\n return self.twitter.api_call('GET', '%s/%s/members' % (screen_name, list_id), params )\n\n def user_list_id_members_post(self, screen_name, list_id, id):\n '''Add a member to a list. The authenticated user must own the list to be able to add members to it. \n Lists are limited to having 500 members.'''\n params={}\n params['id'] = id\n return self.twitter.api_call('POST', '%s/%s/members' % (screen_name, list_id), params )\n \n def user_list_id_members_create_all(self, screen_name, list_id, **params):\n '''Adds multiple members to a list, by specifying a comma-separated list of member ids or screen names. \n The authenticated user must own the list to be able to add members to it. Lists are limited to having 500 members, \n and you are limited to adding up to 100 members to a list at a time with this method.'''\n #screen_name, user_id\n return self.twitter.api_call('POST', '%s/%s/create_all' %(screen_name, list_id) ,params )\n\n def user_list_id_members_delete(self, screen_name, list_id, id):\n '''Removes the specified member from the list. The authenticated user must be the list's owner to remove members from the list.'''\n params={}\n params['_method'] = 'DELETE'\n params['id'] = id\n return self.twitter.api_call('POST', '%s/%s/members' % (screen_name, list_id), params )\n\n def user_list_id_members_id(self, screen_name, list_id, id, **params):\n '''Check if a user is a member of the specified list.'''\n #include_entities\n return self.twitter.api_call('GET', '%s/%s/members/%s' % (screen_name, list_id, id), params )\n\n\n #Direct Messages Resources\n def direct_messages(self, **params):\n #since_id, max_id, count, page, include_entities\n messages = self.twitter.api_call('GET', 'direct_messages', params)\n return messages\n \n def direct_messages_sent(self, **params):\n #since_id, max_id, count, page, include_entities\n message = self.twitter.api_call('GET', 'direct_messages/sent', params)\n return message\n\n def direct_messages_new(self, screen_name, user_id, text, **params):\n #include_entities\n pms = {}\n if user_id:\n params['user_id'] = user_id\n elif screen_name:\n params['screen_name'] = screen_name\n params['text'] = text\n pms.update(params)\n message = self.twitter.api_call('POST', 'direct_messages/new', pms)\n return message\n\n def direct_messages_destroy(self, id, **params):\n #include_entities\n message = self.twitter.api_call('POST', 'direct_messages/destroy/%s' % id, params)\n return message\n\n #Favorites Resources\n def favorites(self, **params):\n #id, page, include_entities\n favorites = None\n if 'id' in params:\n id = params['id']\n del params['id']\n favorites = self.twitter.api_call('GET', 'favorites/%s' % id, params)\n else:\n favorites = self.twitter.api_call('GET', 'favorites/list', params)\n return favorites\n\n def favorites_create(self, id, **params):\n #include_entities\n tweet = self.twitter.api_call('POST', 'favorites/create/%s' % id, params)\n return tweet\n\n def favorites_destroy(self, id, **params):\n #include_entities\n tweet = self.twitter.api_call('POST', 'favorites/destroy/%s' % id, params)\n return tweet\n\n\n #Friendship Resources\n def friendships_create(self, **params):\n #user_id, screen_name, follow, include_entities\n user = self.twitter.api_call('POST', 'friendships/create', params)\n return user\n\n def friendships_destroy(self, **params):\n #user_id, screen_name, include_entities\n user = self.twitter.api_call('POST', 'friendships/destroy', params)\n return user\n \n def friendships_show(self, **params):\n #source_id, source_screen_name, target_id, target_screen_name\n return self.twitter.api_call('GET', 'friendships/show', params)\n\n\n #Account Resources\n def account_verify_credentials(self, **params):\n #include_entities\n return self.twitter.api_call('GET', 'account/verify_credentials', params)\n\n def account_rate_limit_status(self):\n return self.twitter.api_call('GET', 'account/rate_limit_status')\n\n def account_update_delivery_device(self, device, **params):\n #device(sms, none), include_entities\n return self.twitter.api_call('POST', 'account/update_delivery_device', params)\n\n def account_update_profile_colors(self, **params):\n #profile_background_color, profile_text_color, profile_link_color, profile_sidebar_fill_color, profile_sidebar_border_color, include_entities \n return self.twitter.api_call('POST', 'account/update_profile_colors', params)\n\n def account_update_profile_image(self, image, **params):\n #include_entities\n #image-> ('param_name', file_name, image_content) \n return self.twitter.api_call('POST', 'account/update_profile_image', params, [image])\n\n def account_update_profile_background_image(self, image, **params):\n #tile, include_entities\n #image-> ('param_name', file_name, image_content)\n return self.twitter.api_call('POST', 'account/update_profile_background_image', params, [image])\n\n def account_update_profile(self, **params):\n #name, url, location, description, include_entities\n return self.twitter.api_call('POST', 'account/update_profile', params)\n\n\n #Block Resources\n def blocks_create(self, **params):\n #user_id, screen_name, include_entities\n user = self.twitter.api_call('POST', 'blocks/create', params)\n return user\n \n def blocks_destroy(self, **params):\n #user_id, screen_name, include_entities\n user = self.twitter.api_call('POST', 'blocks/destroy', params)\n return user\n \n def blocks_blocking(self, **params):\n #page, include_entities\n blocking = self.twitter.api_call('GET', 'blocks/list', params)\n return blocking#user list\n\n #Spam Reporting resources\n def report_spam(self, **params):\n #user_id, screen_name, include_entities\n user = self.twitter.api_call('POST', 'users/report_spam', params)\n return user\n \n #Saved Searches Resources\n def saved_searches(self):\n return self.twitter.api_call('GET','saved_searches/list')\n\n def API_limit_rate(self):\n return self.twitter.api_call('GET','account/rate_limit_status')\n\n def saved_searches_show(self, id):\n return self.twitter.api_call('GET','saved_searches/show/%s' % id)\n\n def saved_searches_create(self, **params):\n #query\n return self.twitter.api_call('POST','saved_searches/create/%s', params)\n \n def saved_searches_destroy(self, id):\n return self.twitter.api_call('POST','saved_searches/destroy/%s' % id)\n\n #Search API\n def search(self, q, **params):\n #lang, locate, rpp, page, since_id, until, geocode, show_user, result_type\n timeline = self.twitter.search_api_call(q, **params)\n return timeline\n\n #Hacked Search\n def hacked_search(self, q, since_id=None, page=None):\n return self.twitter.hacked_search(q, since_id, page)\n\n #Hacked \n def hacked_following_followers_of(self, user_id):\n # Also followed by.\n return self.twitter.hacked_following_followers_of(user_id)\n\n def hacked_follows_in_common_with(self, user_id):\n # You both follow.\n return self.twitter.hacked_follows_in_common_with(user_id)\n\n" }, { "alpha_fraction": 0.6032883524894714, "alphanum_fraction": 0.6041315197944641, "avg_line_length": 28.283950805664062, "blob_id": "8a69ff9002f6e27ee653744278564fbba39e23f0", "content_id": "9575e44a6817957672346a81edabb54fbebffcb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2372, "license_type": "no_license", "max_line_length": 114, "num_lines": 81, "path": "/queue.py", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\n\nfrom base import BaseHandler\nfrom twitdao import Twitdao\n\nimport md\nimport urllib\nimport logging\n\nclass UpdateUserCache(BaseHandler):\n\n def get(self):\n tk = self.param('tk')\n screen_name = self.param('screen_name')\n user_id = self.param('user_id')\n\n params={'_twitdao_force_refresh':True}\n include_entities = self.param('include_entities')\n if include_entities:\n params.update({'include_entities':include_entities})\n\n token = md.get_access_token(tk)\n td = Twitdao(token)\n user = None\n if user_id:\n user=td.users_show_by_id(user_id=user_id, **params)\n elif screen_name:\n user=td.users_show_by_screen_name(screen_name=screen_name, **params)\n logging.debug(user)\n if 'X-AppEngine-QueueName' not in self.request.headers:\n self.write(repr(user))\n\n\nclass VerifyAccess(BaseHandler):\n def get(self):\n tk = self.param('tk')\n token = md.get_access_token(tk)\n\n if not token:\n logging.debug('Token not found.')\n return\n\n td = Twitdao(token)\n token_user = td.account_verify_credentials()\n if 'error' in token_user:\n logging.debug('Delete invalid token: %s' % token)\n md.delete_access_token(token.key())\n else:\n logging.debug('Verified token: %s' % token)\n if 'X-AppEngine-QueueName' not in self.request.headers:\n self.write(repr(token_user))\n\n\nclass ListAddUser(BaseHandler):\n def get(self):\n tk = self.param('tk')\n list_id = self.param('list_id')\n screen_name = self.param('screen_name')\n\n token = md.get_access_token(tk)\n td = Twitdao(token)\n lst=td.user_list_id_members_post(token.screen_name, urllib.quote(list_id.encode('utf-8')), id=screen_name)\n logging.debug(lst)\n if 'X-AppEngine-QueueName' not in self.request.headers:\n self.write(repr(lst))\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/q/update_user_cache', UpdateUserCache),\n ('/q/verify_access', VerifyAccess),\n ('/q/list_add_user', ListAddUser),\n\n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6277292370796204, "alphanum_fraction": 0.6320960521697998, "avg_line_length": 182.39999389648438, "blob_id": "40290e26fac506278ecf728444b50508435c549c", "content_id": "cccd32e99d5d8b327e51fa6ba4592ec53d07406b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 916, "license_type": "no_license", "max_line_length": 680, "num_lines": 5, "path": "/templates/inc/list.html", "repo_name": "mfs6174/Twitdao11", "src_encoding": "UTF-8", "text": "<li class=\"list\" slug=\"{{ list.slug }}\" lid=\"{{ list.id }}\">\n\t<span class=\"list-img\"><img src=\"{{ list.user.profile_image_url|secure_image }}\" width=\"36\" height=\"36\" /></span>\n\t<span class=\"list-heading\">{% ifequal list.mode \"private\" %}<img src=\"/images/lock.gif\" />{% endifequal %}<a href=\"/t{{ list.uri }}\" class=\"full-name\">{{ list.full_name }}</a> Following:{{ list.member_count }} Followers:{{ list.subscriber_count }} <span class=\"list-action\">{% ifnotequal list.user.id token_user.id %}{% if list.following %}<a href=\"/a/list_unfollow{{ list.uri }}\" class=\"list_unfo\">Unfollow</a> {% else %}<a href=\"/a/list_follow{{ list.uri }}\" class=\"list_fo\">Follow</a> {% endif %}{% else %}<a href=\"/a/list_edit/{{ list.slug }}\" class=\"list_edit\">Edit</a> <a href=\"/a/list_delete/{{ list.slug }}\" class=\"list_delete\">Delete</a>{% endifnotequal %}</span></span>\n\t<span class=\"list-bio\">{{ list.description }}</span>\n</li>" } ]
30
nax71/flask-with-sqlalchemy
https://github.com/nax71/flask-with-sqlalchemy
7abde4d0987ffa28227c81f8d933a3ef1e335ea8
cf0a949e3c0b3fa3f02c0596bf1b7a36a2179f83
e59d8c3e38cacb1ab1059d3d54e8b00b61dd31fd
refs/heads/master
2020-06-06T23:58:48.795828
2019-06-21T12:57:49
2019-06-21T12:57:49
192,880,651
0
0
null
2019-06-20T08:27:20
2019-06-21T08:55:51
2019-06-21T12:57:49
Python
[ { "alpha_fraction": 0.6198764443397522, "alphanum_fraction": 0.629983127117157, "avg_line_length": 32.58490753173828, "blob_id": "4f91a9d9e76b5eada2a317e062e5c981f8512cce", "content_id": "6b93bfa53626596bec9d21e33937908091ed8763", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1781, "license_type": "no_license", "max_line_length": 71, "num_lines": 53, "path": "/tests/test_views.py", "repo_name": "nax71/flask-with-sqlalchemy", "src_encoding": "UTF-8", "text": "# tests/test_views.py\nfrom flask_testing import TestCase\nfrom wsgi import app\n\nclass TestViews(TestCase):\n def create_app(self):\n app.config['TESTING'] = True\n return app\n\n def test_products_json(self):\n response = self.client.get(\"/products\")\n products = response.json\n #print(type(products))\n self.assertIsInstance(products, list)\n# GET\n def test_GET_EXIST_PRODUCT(self):\n response = self.client.get(\"/products/1\")\n products = response.data.decode()\n jproducts = response.json\n print(f\"type : {type(jproducts)}\")\n print(f\"response: {response.status_code}\")\n print(f\"jproducts: {jproducts}\")\n self.assertEqual(response.status_code,200)\n\n def test_GET_NO_EXIST_PRODUCT(self):\n response = self.client.get(\"/products/5\")\n products = response.data.decode()\n jproducts = response.json\n\n print(f\"type : {type(jproducts)}\")\n print(f\"response: {response.status_code}\")\n print(f\"jproducts: {jproducts}\")\n\n self.assertIsInstance(jproducts, list)\n self.assertEqual(len(jproducts),0)\n self.assertEqual(response.status_code,200)\n\n def test_add_product(self):\n new_name = 'MontBlanc'\n response = self.client.post(\"/products\",data={'name':new_name})\n product = response.data.decode()\n jproduct = response.json\n print(f\"New product: {product}\")\n self.assertEqual(jproduct['name'],new_name)\n\n #response = self.client.get(\"/products/4\")\n #roducts = response.data.decode()\n #self.assertEqual(response.status_code,200)\n\n def test_delete_product(self):\n response = self.client.delete(\"/products/42\")\n\n self.assertEqual(response.status_code,204)\n\n" }, { "alpha_fraction": 0.8021978139877319, "alphanum_fraction": 0.8021978139877319, "avg_line_length": 44.5, "blob_id": "6e41549398f978db092b4f8e3a6ce7735ef95e2a", "content_id": "f8c1246e214d81c185869aaf0e42c1eda51bd9d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 91, "license_type": "no_license", "max_line_length": 66, "num_lines": 2, "path": "/README.md", "repo_name": "nax71/flask-with-sqlalchemy", "src_encoding": "UTF-8", "text": "# flask-with-sqlalchemy\n# Execute the test with FLASK_ENV=development pipenv run flask run\n" }, { "alpha_fraction": 0.6865735054016113, "alphanum_fraction": 0.6912325024604797, "avg_line_length": 27.445783615112305, "blob_id": "e00c5656ed80fd47e982207bc6e5501905f79043", "content_id": "6ae3350fe9e7a7452b41f0fee58769f73f116e72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2361, "license_type": "no_license", "max_line_length": 95, "num_lines": 83, "path": "/wsgi.py", "repo_name": "nax71/flask-with-sqlalchemy", "src_encoding": "UTF-8", "text": "# wsgi.py\nfrom flask import Flask,request,abort,render_template\n\nfrom config import Config\napp = Flask(__name__)\napp.config.from_object(Config)\n\nimport os\nimport logging\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow # Order is important here!\n\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\nfrom models import Product\nfrom schema import products_schema,product_schema\n\n\n#logging.warn(os.environ[\"DUMMY\"])\[email protected]('/')\ndef home():\n products = db.session.query(Product).all()\n return render_template('home.html', products=products)\[email protected]('/<int:id>')\ndef product_html(id):\n product = db.session.query(Product).get(id)\n return render_template('product.html', product=product)\n\[email protected]('/hello')\ndef hello():\n return \"Hello World!\"\n\[email protected]('/products')\ndef get_products():\n products = db.session.query(Product).all() # SQLAlchemy request => 'SELECT * FROM products'\n return products_schema.jsonify(products)\n\[email protected]('/products/<int:id_prod>')\ndef get_product(id_prod):\n products = db.session.query(Product).filter_by(id=id_prod)\n return products_schema.jsonify(products)\n\[email protected]('/products/<int:id_prod>', methods=['DELETE'])\ndef delete_product(id_prod):\n db.session.query(Product).filter_by(id=id_prod).delete()\n db.session.commit()\n return '', 204\n # product = db.session.query(Product).get(id_prod)\n # print(f\"delete_product {id_prod}:\")\n\n # if not product:\n # abort(404)\n # db.session.delete(product)\n # db.session.commit()\n # return product_schema.jsonify(None)\n\[email protected]('/products', methods=['POST'])\ndef post_method():\n name = request.form.get('name')\n print(f\"Name: {name}\")\n if not isinstance(name,str):\n abort(404)\n new_prod = Product(name=name)\n db.session.add(new_prod)\n db.session.commit()\n # recupere l'element introduit avec commit dans la base\n # new_prod contiendra l'elemnt recupere de la base\n print(\"new_prod = \", new_prod)\n return product_schema.jsonify(new_prod)\n\[email protected]('/products', methods=['PATCH'])\ndef patch_method():\n pass\n\[email protected]('/celery')\ndef products():\n from tasks import very_slow_add\n very_slow_add.delay(1, 2) # This pushes a task to Celery and does not block.\n\n products = db.session.query(Product).all()\n return products_schema.jsonify(products)\n" } ]
3
isabella232/pynamodb-attributes
https://github.com/isabella232/pynamodb-attributes
c72b0fad559f97ba4a60e143e90619865563dedc
289f2df1e18837469a25ae65ba821fc0728509f1
b9a263be76d4d1b4c069b5aeaa3fe7bd39b670aa
refs/heads/master
2022-11-18T05:31:27.255790
2020-07-22T14:39:16
2020-07-22T14:39:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6295337080955505, "alphanum_fraction": 0.6308290362358093, "avg_line_length": 26.571428298950195, "blob_id": "5d1f246f342825f8b2fd3eb87137c6ec494b53f3", "content_id": "50082feb25bc319eb2a8c785e02623d6d5c5d0e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "permissive", "max_line_length": 75, "num_lines": 28, "path": "/pynamodb_attributes/_typing.py", "repo_name": "isabella232/pynamodb-attributes", "src_encoding": "UTF-8", "text": "from typing import Any\nfrom typing import Generic\nfrom typing import overload\nfrom typing import TYPE_CHECKING\nfrom typing import TypeVar\n\nimport pynamodb.attributes\n\n_T = TypeVar('_T')\n\n# TODO: derive from pynamodb.attributes.Attribute directly when pynamodb>=5\nif TYPE_CHECKING:\n _A = TypeVar('_A', bound=pynamodb.attributes.Attribute[Any])\n\n class Attribute(Generic[_T], pynamodb.attributes.Attribute[_T]):\n @overload\n def __get__(self: _A, instance: None, owner: Any) -> _A:\n ...\n\n @overload\n def __get__(self, instance: Any, owner: Any) -> _T:\n ...\n\n def __get__(self, instance: Any, owner: Any) -> Any:\n ...\nelse:\n class Attribute(Generic[_T], pynamodb.attributes.Attribute):\n pass\n" } ]
1
Coastchb/django_learning
https://github.com/Coastchb/django_learning
86afcfca3b8f8477bb6aa6137aacf6ab2bf7ac88
9305c92cfeea8092bbe422d23148c9d632bd683b
bfab376696c96ca67f35d34192a675976096267c
refs/heads/master
2020-05-23T03:37:03.666795
2019-05-16T00:36:40
2019-05-16T00:36:40
186,620,142
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5404120683670044, "alphanum_fraction": 0.5927099585533142, "avg_line_length": 26.434782028198242, "blob_id": "63825736208253bf8a2dc4c9d0fa6fff36bc43e4", "content_id": "5e0ecf2dcfab12bad7ae7bf29ef1aa72d6492ec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 98, "num_lines": 23, "path": "/project_0/polls/migrations/0003_auto_20190516_0834.py", "repo_name": "Coastchb/django_learning", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0 on 2019-05-16 00:34\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('polls', '0002_auto_20190511_1701'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='question',\n name='pub_date',\n field=models.DateTimeField(help_text='when published', verbose_name='date published'),\n ),\n migrations.AlterField(\n model_name='question',\n name='question_text',\n field=models.CharField(help_text='content of the query', max_length=200),\n ),\n ]\n" } ]
1
ai17339/Survivor-Game
https://github.com/ai17339/Survivor-Game
c581d4580a3724ec07dfcc80df9e2e3e6611f764
c31aa8e7f7f5e9b87fd3751f01cbc07d25b83c57
4758e8834f46935618c805e58575237dae1d3d2d
refs/heads/master
2021-05-20T15:49:26.314089
2020-04-02T04:58:14
2020-04-02T04:58:14
252,354,465
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4567584991455078, "alphanum_fraction": 0.489643394947052, "avg_line_length": 44.028846740722656, "blob_id": "dacc226ed8b25e50f890e9d8602c7760d2c842ec", "content_id": "6b39ed4a123dcee5df5373d218f51485b32ddea6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14049, "license_type": "no_license", "max_line_length": 214, "num_lines": 312, "path": "/Game.pyw", "repo_name": "ai17339/Survivor-Game", "src_encoding": "UTF-8", "text": "# -------------------------------------------------------------------------------\n# Name: Survivor\n# Purpose:\n#\n# Author: Archie Irving\n#\n# Created: 27/02/2020\n# -------------------------------------------------------------------------------\n\n# Imports\nimport math, time, random, pygame\nimport Useful, Assets, Sprites\nfrom Constants import*\n\n# Scenes #######################################################################################################################################################################\n# ##############################################################################################################################################################################\n\n# MAIN MENU SCENE ##############################################################################################################################################################\ndef main_menu():\n # Buttons\n play_button = Sprites.Button(COLORS[\"blackish\"], COLORS[\"red\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], 400, 350, 600, 100, 100, 8, \"PLAY\", True)\n controls_button = Sprites.Button(COLORS[\"blackish\"], COLORS[\"red\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], 400, 500, 600, 100, 100, 8, \"CONTROLS\", True)\n exit_button = Sprites.Button(COLORS[\"blackish\"], COLORS[\"red\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], 400, 800, 400, 100, 100, 8, \"EXIT\", True)\n \n # Creates spinning sprite\n player_sprite = Sprites.menu_sprite(1300, 500)\n \n # Runs the menu loop\n run = True\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n # DISPLAY ##############################################################################################################################################################\n\n # Background\n SCREEN.blit(Assets.background.BACKGROUND_1, (0, 0))\n\n # Draws buttons\n play_button.update()\n controls_button.update()\n exit_button.update()\n\n # Button Actions\n if play_button.was_clicked:\n main_game()\n if controls_button.was_clicked:\n controls_menu()\n if exit_button.was_clicked:\n pygame.quit()\n\n # Draws Title \n Sprites.Text.draw_text('SURVIVOR', 300 , COLORS[\"red\"], SCREEN, 400, 100)\n Sprites.Text.draw_text('SURVIVOR', 300 , COLORS[\"black\"], SCREEN, 400 - 2, 100 - 1)\n\n # Draws Sprite\n player_sprite.update()\n\n # Updates dispay every frame \n pygame.display.update()\n\n pygame.quit()\n\n# CONTROLS SCENE ###############################################################################################################################################################\ndef controls_menu():\n # Backdrop for text\n controls_button = Sprites.Button(COLORS[\"blackish\"], COLORS[\"blackish\"], COLORS[\"blackish\"], COLORS[\"white\"], COLORS[\"white\"], COLORS[\"white\"], COLORS[\"black\"], 'centered', 100, 800, 700, 100, 8, \"\", True)\n\n # Button that takes player back to menu\n menu_button = Sprites.Button(COLORS[\"blackish\"], COLORS[\"red\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], COLORS[\"silver\"], COLORS[\"black\"], 'centered', 850, 650, 100, 100, 8, \"BACK TO MENU\", True)\n\n # Runs loop\n run = True\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n run = False\n\n # DISPLAY ##############################################################################################################################################################\n \n # Background\n SCREEN.blit(Assets.background.BACKGROUND_1, (0, 0))\n\n # Draws buttons\n controls_button.update()\n menu_button.update()\n\n # Button Actions\n if menu_button.was_clicked:\n main_menu()\n\n # Controls title\n Sprites.Text.draw_text('CONTROLS', 80 , COLORS[\"white\"], SCREEN, 'centered', 120)\n\n # Shooting control\n Sprites.Text.draw_text('Shooting:', 50 , COLORS[\"silver\"], SCREEN, 600, 210)\n Sprites.Text.draw_text('MouseButton1', 50 , COLORS[\"silver\"], SCREEN, 1050, 210)\n\n # Movement\n Sprites.Text.draw_text('Move Left:', 50 , COLORS[\"silver\"], SCREEN, 600, 270)\n Sprites.Text.draw_text('a', 50 , COLORS[\"silver\"], SCREEN, 1050, 270)\n Sprites.Text.draw_text('Move Right:', 50 , COLORS[\"silver\"], SCREEN, 600, 330)\n Sprites.Text.draw_text('d', 50 , COLORS[\"silver\"], SCREEN, 1050, 330)\n Sprites.Text.draw_text('Move Up:', 50 , COLORS[\"silver\"], SCREEN, 600, 390)\n Sprites.Text.draw_text('w', 50 , COLORS[\"silver\"], SCREEN, 1050, 390)\n Sprites.Text.draw_text('Move Down:', 50 , COLORS[\"silver\"], SCREEN, 600, 450)\n Sprites.Text.draw_text('s', 50 , COLORS[\"silver\"], SCREEN, 1050, 450)\n\n # Development tool\n Sprites.Text.draw_text('Show colision boxes:', 50 , COLORS[\"silver\"], SCREEN, 600, 510)\n Sprites.Text.draw_text('p', 50 , COLORS[\"silver\"], SCREEN, 1050, 510)\n\n # Updates display every frame\n pygame.display.update()\n\n# GAME SCENE ###################################################################################################################################################################\ndef main_game():\n # Clears all lists \n BULLETS.clear()\n ZOMBIES.clear()\n PLAYER.clear()\n BULLETS_FIRED_LIST.clear()\n\n # Resets all counts \n global round_count\n global round_count\n global zombie_count\n global bullets_fired\n round_count = 0 \n old_round_count = 0\n zombie_count = 0\n bullets_fired = 0\n dead_zombies = 0\n\n # Creates ammo and health bar rects\n UI_bar_width = 1115\n UI_bar_hieght = 300\n UI_bar_x = 350\n UI_bar_y = 960\n \n # Creates a player sprite\n PLAYER1 = Sprites.Player(700, 500, 3, SPRITE_STAND)\n PLAYER.append(PLAYER1)\n\n # Scene loop\n run = True\n while run:\n \n # Event handeling\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n pygame.display.quit()\n pygame.quit()\n \n # Time Keeper\n Useful.Game.delta_time = Useful.Game.get_deltatime()\n Useful.Game.ticks_Last_Frame = pygame.time.get_ticks()\n\n # DISPLAY ##############################################################################################################################################################\n\n # Background\n SCREEN.blit(Assets.background.BACKGROUND_1, (0,0))\n\n # Displays hit_boxes of sprites\n Useful.Game.display_hit_box()\n\n # Bullets\n for bullet in BULLETS:\n if bullet.in_bounds == False:\n BULLETS.pop(BULLETS.index(bullet))\n\n # Blitz's bullet each frame\n bullet.update()\n\n # Updates number of bullets fired\n bullets_fired = len(BULLETS_FIRED_LIST)\n\n # Zombies to spawn each round\n if zombie_count == 0:\n round_count += 1\n zombie_count = round_count * 3\n\n # Randomly spawns zombies\n if round_count == old_round_count + 1:\n for zombie in range(0, zombie_count):\n random_spawn_point = Useful.zombie_spawn.spawn_point()\n ZOMBIES.append(Sprites.Zombie(random_spawn_point[0], random_spawn_point[1], Useful.zombie_spawn.speed(), Assets.zombie_animations.SPRITE_STAND))\n old_round_count = round_count\n \n # Updates zombies each frame\n for zombie in ZOMBIES:\n zombie.update()\n \n # Deletes zombie if its health is 0\n for zombie in ZOMBIES:\n if zombie.health <= 0:\n pygame.mixer.find_channel().play(pygame.mixer.Sound(r'Sounds\\ping2.wav'))\n ZOMBIE_DROPS.append(Sprites.zombie_drop(zombie.x, zombie.y))\n ZOMBIES.pop(ZOMBIES.index(zombie))\n zombie_count -= 1\n dead_zombies += 1\n\n # Zombie drops\n for drop in ZOMBIE_DROPS:\n drop.drop()\n if drop.finished == True:\n ZOMBIE_DROPS.pop(ZOMBIE_DROPS.index(drop))\n\n # Updates player each frame \n PLAYER1.update()\n\n # Ends game if player dies\n if PLAYER1.alive == False:\n time_dead = 0\n while True:\n Useful.Game.delta_time = Useful.Game.get_deltatime()\n Useful.Game.ticks_Last_Frame = pygame.time.get_ticks()\n Sprites.Text.draw_text(\"You DIED!\", 400, COLORS[\"red\"], SCREEN, 'centered', 400)\n time_dead += Useful.Game.delta_time\n if time_dead >= 2:\n dead_screen(dead_zombies, round_count, bullets_fired)\n pygame.display.update()\n print(time_dead)\n\n # UI ################################################################################################################################################################## \n \n # Ammo and HP\n UI_bar_border = pygame.draw.rect(SCREEN, COLORS['black'], (UI_bar_x - 5, UI_bar_y - 5, UI_bar_width + 10, UI_bar_hieght + 10))\n UI_bar_rect = pygame.draw.rect(SCREEN, COLORS['blackish'], (UI_bar_x, UI_bar_y, UI_bar_width, UI_bar_hieght))\n Sprites.Text.draw_text(\"AMMO: \" + str(PLAYER1.BULLETS) + \"/\" + str(PLAYER1.BULLETS_LEFT), 80, COLORS[\"yellow\"], SCREEN, UI_bar_x + 20, 980)\n \n # Round number \n round_number_bar_border = pygame.draw.rect(SCREEN, COLORS['black'], (0 - 5, UI_bar_y - 5, 350 + 10, 100 + 10))\n round_number_bar_rect = pygame.draw.rect(SCREEN, COLORS['blackish'], (0, UI_bar_y, 350, 100))\n Sprites.Text.draw_text(\"ROUND \" + str(round_count), 80, COLORS[\"white\"], SCREEN, 30, 980)\n\n # Health Bar\n bar_height = 50\n red_bar_width = 450\n health_bar_width = (red_bar_width/100) * PLAYER1.health\n red_bar = pygame.draw.rect(SCREEN, COLORS['red'], (UI_bar_x + 640, 980, red_bar_width, bar_height))\n green_bar = pygame.draw.rect(SCREEN, COLORS['green'], (UI_bar_x + 640, 980, health_bar_width, bar_height))\n Sprites.Text.draw_text(\"HP \", 80, COLORS[\"red\"], SCREEN, UI_bar_x + 530, 980)\n Sprites.Text.draw_text(str(PLAYER1.health), 80, COLORS[\"black\"], SCREEN, UI_bar_x + 670, 980)\n\n # Zombie bar\n zombie_bar_border = pygame.draw.rect(SCREEN, COLORS['black'], (UI_bar_x + UI_bar_width - 5, UI_bar_y - 5, 450 + 10, UI_bar_hieght + 10))\n zombie_bar_rect = pygame.draw.rect(SCREEN, COLORS['blackish'], (UI_bar_x + UI_bar_width, UI_bar_y, 450, UI_bar_hieght))\n zombies_left = Sprites.Text.draw_text(\"Zombies Left: \" + str(zombie_count), 40, COLORS[\"white\"], SCREEN, UI_bar_x + UI_bar_width + 15, 975)\n zombies_dead = Sprites.Text.draw_text(\"Zombies Killed: \" + str(dead_zombies), 40, COLORS[\"silver\"], SCREEN, UI_bar_x + UI_bar_width + 15, 1015)\n \n # Frame rate\n Sprites.Text.draw_text(str(round(1/Useful.Game.delta_time)) + \"FPS\", 40, COLORS[\"black\"], SCREEN, 1800, 20)\n\n # Updates display every frame \n pygame.display.update()\n\n# DEAD SCENE ###################################################################################################################################################################\ndef dead_screen(zombies_dead, rounds, bullets_fired):\n # Buttons\n play_again_button = Sprites.Button(COLORS[\"blackish\"], COLORS[\"red\"], COLORS[\"black\"], COLORS[\"white\"], COLORS[\"lime\"], COLORS[\"red\"], COLORS[\"black\"], 'centered', 300, 800, 100, 100, 8, \"PLAY AGAIN\", True)\n back_to_menu_button = Sprites.Button(COLORS[\"blackish\"], COLORS[\"red\"], COLORS[\"black\"], COLORS[\"white\"], COLORS[\"lime\"], COLORS[\"red\"], COLORS[\"black\"], 'centered', 450, 800, 100, 100, 8, \"BACK TO MENU\", True)\n stats = Sprites.Button(COLORS[\"blackish\"], COLORS[\"blackish\"], COLORS[\"blackish\"], COLORS[\"white\"], COLORS[\"white\"], COLORS[\"white\"], COLORS[\"black\"], 'centered', 590, 800, 400, 100, 8, \"\", True)\n\n # Clears lists\n BULLETS.clear()\n ZOMBIES.clear()\n PLAYER.clear()\n\n # Runs loop\n run = True\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n # DISPLAY ##############################################################################################################################################################\n\n # Background\n SCREEN.blit(Assets.background.BACKGROUND_1, (0, 0))\n\n # Draws buttons\n play_again_button.update()\n back_to_menu_button.update()\n stats.update()\n\n # Button Actions\n if play_again_button.was_clicked:\n main_game()\n if back_to_menu_button.was_clicked:\n main_menu()\n\n # Draws Title \n Sprites.Text.draw_text('YOU DIED', 300, COLORS[\"red\"], SCREEN, 'centered', 50)\n\n # STATS \n Sprites.Text.draw_text('STATS', 150, COLORS[\"silver\"], SCREEN, 'centered', 600)\n Sprites.Text.draw_text('Rounds Alive: ' + str(rounds), 100, COLORS[\"white\"], SCREEN, 'centered', 700)\n Sprites.Text.draw_text('Zombies Killed: ' + str(zombies_dead), 100, COLORS[\"white\"], SCREEN, 'centered', 800)\n Sprites.Text.draw_text('Bullets Fired: ' + str(bullets_fired), 100, COLORS[\"white\"], SCREEN, 'centered', 900)\n \n # Updates display every frame\n pygame.display.update()\n \n pygame.quit()\n\n# Game Start ########################################################################################################################################################################\n# ##############################################################################################################################################################################\nmain_menu()\n" }, { "alpha_fraction": 0.6605868339538574, "alphanum_fraction": 0.6756542325019836, "avg_line_length": 21.140350341796875, "blob_id": "ed74ef1db44c287d2b3d9827c50b299a06cafdc3", "content_id": "d3205232ed30d6bc37a113881fc264df58ea1eea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1261, "license_type": "no_license", "max_line_length": 59, "num_lines": 57, "path": "/Constants.py", "repo_name": "ai17339/Survivor-Game", "src_encoding": "UTF-8", "text": "# Imports\nimport os, pygame\n\nimport Assets\n\n#######################################################\n# Creates constant variables used over multiple files #\n#######################################################\n\n# initialize the pygame module\npygame.init()\npygame.mixer.set_num_channels(30)\n\n# Surface\npygame.display.set_caption(\"Survivor\")\n\n# Screen \nos.environ['SDL_VIDEO_WINDOW_POS'] = '0, 30'\nS_WIDTH = 1920\nS_HEIGHT = 1050\nSCREEN = pygame.display.set_mode((S_WIDTH, S_HEIGHT))\n\n# Lists\nBULLETS = []\nZOMBIES = []\nPLAYER = []\nZOMBIE_DROPS = []\nBULLETS_FIRED_LIST = []\n\n# Round count\nround_count = 0 \nold_round_count = 0\n\n# Zombie count\nzombie_count = 0\n\n# Bullet count\nbullets_fired = 0\n\n# Assets\nCOLORS = Assets.color.COLORS\n\n# Sprite Animations\nSPRITE_STAND = Assets.sprite_animations.SPRITE_STAND\nSPRITE_WALK = Assets.sprite_animations.SPRITE_WALK\nSPRITE_RELOAD = Assets.sprite_animations.SPRITE_RELOAD\nSPRITE_SHOOT = Assets.sprite_animations.SPRITE_SHOOT\n\n# Gun Sounds\nGUNSHOT_SOUND = pygame.mixer.Sound(r'Sounds\\gunshot.wav')\nGUNSHOT_SOUND2 = pygame.mixer.Sound(r'Sounds\\gunshot3.wav')\nRELOAD_SOUND = pygame.mixer.Sound(r'Sounds\\reload.wav')\n\n#pygame\nKEY_PRESSED = pygame.key.get_pressed()\nMOUSE_POS = pygame.mouse.get_pos()\nCLOCK = pygame.time.Clock()" }, { "alpha_fraction": 0.6082921624183655, "alphanum_fraction": 0.6310174465179443, "avg_line_length": 76.90540313720703, "blob_id": "312060e7c7fb384a4648734b178111f4a67befbf", "content_id": "ad97b24d77911a1f67a383ffa826063221dbdd7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11529, "license_type": "no_license", "max_line_length": 109, "num_lines": 148, "path": "/Assets.py", "repo_name": "ai17339/Survivor-Game", "src_encoding": "UTF-8", "text": "# Imports\nimport pygame\n\n#########################################\n# Creates lists of assests used in game #\n#########################################\n\nclass color:\n COLORS = {\n \"white\": (255, 255, 255),\n \"black\": (0, 0, 0),\n \"blackish\": (30, 30, 30),\n \"blue\": (0, 0, 255),\n \"lime\": (0, 255, 0),\n \"red\": (255, 0, 0),\n \"yellow\": (255, 255, 0),\n \"aqua\": (0, 255, 255),\n \"magenta\": (255, 0, 255),\n \"silver\": (192, 192, 192),\n \"gray\": (128, 128, 128),\n \"maroon\": (128, 0, 0),\n \"olive\": (128, 128, 0),\n \"green\": (0, 128, 0),\n \"purple\": (128, 0, 128),\n \"teal\": (0, 128, 128),\n \"navy\": (0, 0, 128),\n }\n\nclass sprite_animations:\n SPRITE_STAND = [pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_0.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_1.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_2.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_3.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_4.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_5.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_6.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_7.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_8.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_9.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_10.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_11.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_12.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_13.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_14.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_15.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_16.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_17.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_18.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\idle\\survivor-idle_rifle_19.png')]\n\n SPRITE_WALK = [pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_0.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_1.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_2.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_3.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_4.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_5.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_6.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_7.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_8.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_9.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_10.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_11.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_12.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_13.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_14.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_15.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_16.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_17.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_18.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\move\\survivor-move_rifle_19.png')]\n\n SPRITE_RELOAD = [pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_0.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_1.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_2.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_3.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_4.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_5.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_6.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_7.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_8.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_9.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_10.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_11.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_12.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_13.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_14.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_15.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_16.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_17.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_18.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\reload\\survivor-reload_rifle_19.png')]\n\n SPRITE_SHOOT = [pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\shoot\\survivor-shoot_rifle_0.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\shoot\\survivor-shoot_rifle_1.png'),\n pygame.image.load(r'Images\\Top_Down_Survivor\\rifle\\shoot\\survivor-shoot_rifle_2.png')]\n\nclass zombie_animations:\n SPRITE_STAND = [pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_0.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_1.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_2.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_3.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_4.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_5.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_6.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_7.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_8.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_9.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_10.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_11.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_12.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_13.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_14.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_15.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\idle\\skeleton-idle_16.png')]\n\n SPRITE_WALK = [pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_0.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_1.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_2.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_3.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_4.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_5.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_6.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_7.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_8.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_9.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_10.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_11.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_12.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_13.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_14.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_15.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\move\\skeleton-move_16.png')]\n\n SPRITE_ATTACK = [pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_0.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_1.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_2.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_3.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_4.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_5.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_6.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_7.png'),\n pygame.image.load(r'Images\\Top_Down_Zombie\\attack\\skeleton-attack_8.png')]\n\nclass bullet:\n SPRITE_BULLET = pygame.image.load(r'Images\\Bullet\\bullet-sprite-png-16.png')\n\nclass background:\n BACKGROUND_1 = pygame.image.load(r'Images\\Backgrounds\\background1.png')" }, { "alpha_fraction": 0.4987112879753113, "alphanum_fraction": 0.5161332488059998, "avg_line_length": 41.21763229370117, "blob_id": "6692133b7e466ee923105cbca580ab5e3e5360a4", "content_id": "89d1fe21fb9917a31800e55722d2730124153810", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30651, "license_type": "no_license", "max_line_length": 182, "num_lines": 726, "path": "/Sprites.py", "repo_name": "ai17339/Survivor-Game", "src_encoding": "UTF-8", "text": "# Imports\nimport math, time, random, pygame\nimport Useful, Assets, Sprites\nfrom Constants import*\nfrom pygame.math import Vector2\n\n# SMALL CLASSES ##############################################################################################################################\n# ############################################################################################################################################\n\n# Text\nclass Text:\n def draw_text(text, size, color, surface, x, y):\n font = pygame.font.SysFont('comicsans', size, bold=True)\n label = font.render(text, 1, color)\n if x == 'centered':\n x = (S_WIDTH / 2) - (label.get_width() / 2)\n else:\n x = x\n surface.blit(label, (x, y))\n\n# Drops valuable items to player when the kill zombies\nclass zombie_drop:\n def __init__(self, zombie_x, zombie_y):\n # Time keeper \n self.time = 0\n self.finished = False\n self.x = zombie_x\n self.y = zombie_y\n\n # Type of drop\n self.health_drop = False\n self.bullet_drop = False \n\n # Generates a random value out of 100 to use as percentage\n self.random_percent = random.randint(1, 100)\n\n # Randomizes player drops\n for player in PLAYER:\n \n # No drop\n if self.random_percent >= 1 and self.random_percent <= 50:\n return None \n\n # Bullet drop\n elif self.random_percent >= 51 and self.random_percent <= 75 and player.BULLETS_LEFT != 400:\n self.bullet_drop = True\n if player.BULLETS_LEFT <= 370:\n player.BULLETS_LEFT += 30\n else:\n player.BULLETS_LEFT = 400\n\n # Health drop\n elif self.random_percent >= 76 and self.random_percent <= 100 and player.health != 100:\n if player.health > 90:\n player.health = 100 \n else:\n player.health += 10 \n self.health_drop = True \n\n def drop(self):\n if self.health_drop == True:\n text_displayed = Text.draw_text(\"+10 HP\" , 20, COLORS[\"red\"], SCREEN, self.x, self.y)\n if self.bullet_drop == True:\n text_displayed = Text.draw_text(\"+30 Bullets\" , 20, COLORS[\"yellow\"], SCREEN, self.x, self.y)\n\n # Displays drop on screen for two seconds\n self.time += Useful.Game.delta_time\n if self.time >= 2:\n self.finished = True \n\n# Menu sprite\nclass menu_sprite():\n def __init__(self, x, y):\n # Position\n self.x = x\n self.y = y\n # Rotation\n self.angle = 0\n self.rotation_offset = Vector2(-60, 15)\n # Sprite\n self.current_sprite = SPRITE_STAND[0]\n self.sprite_rect = 0,0\n \n def update(self):\n # Updates angle\n self.angle += 2\n if self.angle >= 360:\n self.angle = 0\n\n # Updates sprite \n self.current_sprite = SPRITE_STAND[0]\n\n # Rotates sprite\n rotation_pivot = (self.x, self.y)\n rotation_offset_rotated = self.rotation_offset.rotate(-self.angle)\n rotated = Useful.Game.rotate_image(self.current_sprite, self.angle, rotation_pivot, rotation_offset_rotated, self.sprite_rect)\n self.current_sprite = rotated[0]\n self.sprite_rect = rotated[1]\n \n # Blits sprite\n SCREEN.blit(self.current_sprite, self.sprite_rect)\n\n\n# Main classes ###############################################################################################################################\n# ############################################################################################################################################\n\n# Button class ###############################################################################################################################\nclass Button(pygame.sprite.DirtySprite):\n def __init__(self, box_color, box_color2, box_color3, text_color, text_color2, text_color3, border_color, x, y,\n width, height, text_size, border_size, text='', border=None):\n pygame.sprite.DirtySprite.__init__(self)\n # Box colors for different states\n self.box_color = box_color\n self.box_color2 = box_color2\n self.box_color3 = box_color3\n # Text colors for different states\n self.text_color = text_color\n self.text_color2 = text_color2\n self.text_color3 = text_color3\n # Border color\n self.border_color = border_color\n # Co-ordinates for button\n self.x = x\n self.y = y\n # Dimensions for button\n self.width = width\n self.height = height\n self.text_size = text_size\n self.text = text\n self.border_size = border_size\n # Border\n self.border = border\n # Determines box and text colors\n self.color_box = COLORS[\"black\"]\n self.color_text = COLORS[\"black\"]\n # Records the state of the button\n self.button_down = False\n self.button_old_down = self.button_down\n self.was_clicked = False\n\n def update(self):\n # Creates ability to centre the bytton\n if self.x == 'centered':\n self.x = (S_WIDTH / 2) - (self.width / 2)\n\n # Checks if the mouse is over\n is_over_check = Useful.Game.is_over(self.x, self.y, self.width, self.height)\n \n # Changes the colors of the button if mouse is over\n if is_over_check:\n self.color_box = self.box_color2\n self.color_text = self.text_color2\n else:\n self.color_box = self.box_color\n self.color_text = self.text_color\n \n # Checks if the mouse is over and clicked and changes colors.\n if pygame.mouse.get_pressed()[0] and is_over_check:\n self.color_box = self.box_color3\n self.color_text = self.text_color3\n self.button_down = True\n else:\n self.button_down = False\n \n # Records if button is clicked \n if self.button_down == False and self.button_down != self.button_old_down and is_over_check:\n self.was_clicked = True\n \n # Records the state of the previous button\n self.button_old_down = self.button_down\n \n # Draws button border\n if self.border:\n pygame.draw.rect(SCREEN, self.border_color, (self.x - (self.border_size/2), self.y - (self.border_size/2), self.width + self.border_size, self.height + self.border_size))\n \n # Draws button\n pygame.draw.rect(SCREEN, self.color_box, (self.x, self.y, self.width, self.height))\n\n # Draws text\n font = pygame.font.SysFont('comicsans', self.text_size, bold=True)\n label = font.render(self.text, 1, self.color_text)\n\n # Text positions\n x = self.x + (self.width / 2) - (label.get_width() / 2)\n y = self.y + (self.height / 2) - (label.get_height() / 2)\n\n # Blits text\n SCREEN.blit(label, (x, y))\n\n# Player class ###############################################################################################################################\nclass Player(pygame.sprite.DirtySprite):\n def __init__(self, x, y, vel, images):\n pygame.sprite.DirtySprite.__init__(self)\n # Co-Ordinates of sprite\n self.x = x\n self.y = y\n # Hit box \n self.hit_box_x = 0\n self.hit_box_y = 0\n self.hit_box_width = 0\n self.hit_box_height = 0 \n self.hit_box_center = (0, 0)\n # Record of sprite state for animatiion\n self.idle = False\n self.move = False\n self.shoot = False\n self.reload = False \n # Sprite image and current animation\n self.current_images = images\n self.current_sprite = self.current_images[0]\n self.index = 0\n # Rotation of sprite\n self.angle = 0\n self.rotation_offset = Vector2(-35, 37)\n self.gun_offset = Vector2(-100, 2)\n self.hit_box_offset = Vector2(-0, 25)\n # Mask of sprite\n self.mask = pygame.mask.from_surface(self.current_sprite)\n # Sprite rect\n self.player_rect = 0, 0\n # Velocity \n self.vel = vel \n self.vel_left = vel\n self.vel_right = vel\n self.vel_up = vel\n self.vel_down = vel\n # Barrel position (tells bullets where to shoot from)\n self.barrel_pos_x = 0\n self.barrel_pos_y = 0\n # Number of bullets player can shoot\n self.BULLETS = 30\n self.BULLETS_LEFT = 180\n # Player health \n self.alive = True\n self.health = 100\n\n # Creates frame rate variables\n frame = 0\n fps = 20\n\n # Updates sprite every frame\n def update(self):\n # Animation ##########################################################################################################################\n\n # Finds the state of keys\n key_pressed = pygame.key.get_pressed()\n\n # Frame number \n Player.frame += Useful.Game.delta_time\n\n # Sprite animation frame rate\n if (Player.frame*1000) >= (1000/Player.fps):\n self.index += 1\n Player.frame = 0\n\n # changes sprite animation\n if self.index >= len(self.current_images):\n self.index = 0\n self.current_sprite = self.current_images[self.index]\n\n # Shooting Animation\n if pygame.mouse.get_pressed()[0] and not self.current_images == SPRITE_RELOAD and self.BULLETS >= 1 and self.current_images != SPRITE_SHOOT:\n self.index = 0\n self.current_images = SPRITE_SHOOT\n self.shoot = True\n Player.fps = 10\n\n # Plays gun sounds\n pygame.mixer.find_channel().play(GUNSHOT_SOUND2)\n pygame.mixer.find_channel().play(GUNSHOT_SOUND)\n\n # Updates number of bullets shot\n BULLETS_FIRED_LIST.append('b')\n\n # Activates a bullet sprite\n BULLETS.append(Projectile(self.x + 30, self.y + 200, self.x, self.y, self.barrel_pos_x, self.barrel_pos_y))\n \n if self.current_sprite == SPRITE_SHOOT[2] and self.shoot:\n self.BULLETS -= 1\n self.shoot = False\n global bullets_fired\n bullets_fired += 1 \n\n # Reload Animation\n reload_amount = 30 - self.BULLETS\n if reload_amount > self.BULLETS_LEFT:\n reload_amount = self.BULLETS_LEFT\n \n if key_pressed[pygame.K_r] and not self.shoot and not self.reload and self.BULLETS_LEFT >= 1:\n self.index = 0\n self.current_images = SPRITE_RELOAD\n self.reload = True \n Player.fps = 20\n\n # Plays reload sound\n pygame.mixer.find_channel().play(RELOAD_SOUND)\n\n if self.current_sprite == SPRITE_RELOAD[19]:\n self.reload = False \n\n # Updates ammocount\n self.BULLETS += reload_amount\n self.BULLETS_LEFT -= reload_amount\n\n \n # Walking Animation\n if self.move and not self.reload and not self.shoot:\n self.current_images = SPRITE_WALK\n Player.fps = 40\n\n # Standing Animation\n if not self.move and not self.reload and not self.shoot:\n self.current_images = SPRITE_STAND\n Player.fps = 20\n\n # Rotation ###########################################################################################################################\n\n # Resizing\n self.current_sprite = pygame.transform.scale(self.current_sprite, (230, 152))\n\n # Finds direction of mouse \n rotation_pivot = [self.x, self.y]\n mouse_x, mouse_y = pygame.mouse.get_pos()\n direction_x = mouse_x - rotation_pivot[0]\n direction_y = mouse_y - rotation_pivot[1]\n \n # Angle\n self.angle = (180 / math.pi) * -math.atan2(direction_y, direction_x)\n\n # Rotaded sprite\n rotation_offset_rotated = self.rotation_offset.rotate(-self.angle)\n rotated = Useful.Game.rotate_image(self.current_sprite, self.angle, rotation_pivot, rotation_offset_rotated, self.player_rect)\n \n # Updates sprite to rotated version\n self.current_sprite = rotated[0]\n self.player_rect = rotated[1]\n\n # Gun barrel position\n gun_offset_rotated = self.gun_offset.rotate(-self.angle)\n self.barrel_pos_x = rotation_pivot[0] - gun_offset_rotated[0]\n self.barrel_pos_y = rotation_pivot[1] - gun_offset_rotated[1]\n \n # Hit box center rotation\n hit_box_offset_rotated = self.hit_box_offset.rotate(-self.angle)\n \n # Collide checks #####################################################################################################################\n \n # Hit box parameters \n self.hit_box_width = 90 \n self.hit_box_height = 90\n self.hit_box_x = rotation_pivot[0] - (self.hit_box_width/2) - hit_box_offset_rotated[0]\n self.hit_box_y = rotation_pivot[1] - (self.hit_box_height/2) - hit_box_offset_rotated[1]\n self.hit_box_center = self.hit_box_x + (self.hit_box_width/2), self.hit_box_y + (self.hit_box_height/2)\n \n # Resets velocity every frame\n self.vel_left = self.vel\n self.vel_right = self.vel\n self.vel_up = self.vel\n self.vel_down = self.vel\n\n # Collide check with zombies\n for zombie in ZOMBIES:\n # If collision on left \n if (self.hit_box_x - self.vel) <= (zombie.hit_box_x + zombie.hit_box_width) and self.hit_box_x > zombie.hit_box_x:\n if self.hit_box_y <= (zombie.hit_box_y + zombie.hit_box_height) and (self.hit_box_y + self.hit_box_height) >= zombie.hit_box_y:\n self.vel_left = 0\n self.x += 1 * 100 * Useful.Game.delta_time\n else:\n self.vel_left = self.vel\n # If collision on right\n if (self.hit_box_x + self.hit_box_width + self.vel) >= zombie.hit_box_x and self.hit_box_x < zombie.hit_box_x:\n if self.hit_box_y <= (zombie.hit_box_y + zombie.hit_box_height) and (self.hit_box_y + self.hit_box_height) >= zombie.hit_box_y:\n self.vel_right = 0\n self.x -= 1 * 100 * Useful.Game.delta_time\n else:\n self.vel_right = self.vel\n # If collision on up \n if (self.hit_box_y - self.vel) <= (zombie.hit_box_y + zombie.hit_box_height) and self.hit_box_y > zombie.hit_box_y:\n if self.hit_box_x <= (zombie.hit_box_x + zombie.hit_box_width) and (self.hit_box_x + self.hit_box_width) >= zombie.hit_box_x:\n self.vel_up = 0\n self.y += 1 * 100 * Useful.Game.delta_time\n else:\n self.vel_up = self.vel\n # If collision on down\n if (self.hit_box_y + self.hit_box_height + self.vel) >= zombie.hit_box_y and self.hit_box_y < zombie.hit_box_y:\n if self.hit_box_x <= (zombie.hit_box_x + zombie.hit_box_width) and (self.hit_box_x + self.hit_box_width) >= zombie.hit_box_x:\n self.vel_down= 0\n self.y -= 1 * 100 * Useful.Game.delta_time\n else:\n self.vel_down = self.vel\n \n # Movement ##########################################################################################################################\n \n # Moves when WASD is pressed\n if key_pressed[pygame.K_a]:\n self.x -= self.vel_left * 100 * Useful.Game.delta_time\n if key_pressed[pygame.K_d]:\n self.x += self.vel_right * 100 * Useful.Game.delta_time\n if key_pressed[pygame.K_w]:\n self.y -= self.vel_up * 100 * Useful.Game.delta_time\n if key_pressed[pygame.K_s]:\n self.y += self.vel_down * 100 * Useful.Game.delta_time\n if key_pressed[pygame.K_a] or key_pressed[pygame.K_d] or key_pressed[pygame.K_w] or key_pressed[pygame.K_s]:\n self.move = True\n else:\n self.move = False \n\n # Inbounds check\n if self.x >= S_WIDTH - 40:\n self.x = S_WIDTH - 40\n if self.x <= 40:\n self.x = 40\n if self.y >= S_HEIGHT - 120:\n self.y = S_HEIGHT - 120\n if self.y <= 40:\n self.y = 40\n\n # Blits ##############################################################################################################################\n\n # Blits sprite to screen\n SCREEN.blit(self.current_sprite, self.player_rect)\n \n # Hit boxes\n if Useful.Game.draw_hit_box == True:\n # Shows TRUE OFFSET\n pygame.draw.rect(SCREEN, (255,255,0), (rotation_pivot[0] - rotation_offset_rotated[0], rotation_pivot[1] - rotation_offset_rotated[1], 5,5))\n # Shows gun offset\n pygame.draw.rect(SCREEN, (255,0,255), (rotation_pivot[0] - gun_offset_rotated[0], rotation_pivot[1] - gun_offset_rotated[1], 5,5))\n # Shows pivot point \n pygame.draw.rect(SCREEN, (0, 0,255), (rotation_pivot[0], rotation_pivot[1], 5,5))\n # Shows hit box center\n pygame.draw.rect(SCREEN, (255,0,0), (self.hit_box_x + (self.hit_box_width/2), self.hit_box_y + (self.hit_box_height/2), 5,5))\n # Shows hit box \n pygame.draw.rect(SCREEN, (255,0,0), (self.hit_box_x, self.hit_box_y, self.hit_box_width, self.hit_box_height), 2)\n \n # Health \n if self.health <= 0:\n self.alive = False \n\n# Zombie class ###############################################################################################################################\nclass Zombie(pygame.sprite.DirtySprite):\n def __init__(self, x, y, vel, images):\n pygame.sprite.DirtySprite.__init__(self)\n # Co-Ordinates of sprite\n self.x = x\n self.y = y\n # Hit box\n self.hit_box_x = 0\n self.hit_box_y = 0\n self.hit_box_width = 0\n self.hit_box_height = 0 \n self.hit_box_center = (0, 0)\n # Record of sprite state for animatiion\n self.idle = False\n self.move = True\n self.attack = False\n # Sprite image and current animation\n self.current_images = images\n self.current_sprite = self.current_images[0]\n self.index = 0\n # Rotation of sprite\n self.angle = 0\n self.rotation_offset = Vector2(-10, 10)\n self.hit_box_offset = Vector2(-0, 0)\n # Mask of sprite\n self.mask = pygame.mask.from_surface(self.current_sprite)\n # Sprite rect\n self.zombie_rect = 0, 0\n # Velocity \n self.vel = vel \n self.old_vel = vel\n # Animation frame rate\n self.frame = 0\n self.fps = random.randint(16, 26)\n # Distance from player\n self.distance = 1000\n # Zombie health\n self.health = 100\n\n # updates sprite every frame\n def update(self):\n # Animation ##########################################################################################################################\n\n # Finds the state of keys\n key_pressed = pygame.key.get_pressed()\n\n # Frame number\n self.frame += Useful.Game.delta_time\n \n # Sprite animation frame rate\n if (self.frame*1000) >= (1000/self.fps):\n self.index += 1\n self.frame = 0\n new_frame = True\n else: \n new_frame = False\n\n # changes sprite animation\n if self.index >= len(self.current_images):\n self.index = 0\n self.current_sprite = self.current_images[self.index]\n\n # Starts attack animation if 150 pixels or less from player\n if self.distance < 150:\n self.fps = 20\n self.current_images = Assets.zombie_animations.SPRITE_ATTACK\n self.attack = True\n\n # Declares if the player is not attacking that it is moving\n else: \n self.move = True\n self.attack = False\n\n # Makes the attack animation start on first image\n if self.attack == True and self.move == True:\n self.index = 0\n self.move = False\n\n # Movement animation\n if self.move == True:\n self.fps = 20\n self.current_images = Assets.zombie_animations.SPRITE_WALK\n\n # Damages player if the full attack animation is finished \n if self.current_sprite == Assets.zombie_animations.SPRITE_ATTACK[8]:\n self.attack = False\n if new_frame:\n for player in PLAYER:\n player.health -= 10\n\n # Rotation ###########################################################################################################################\n\n # Resizing\n self.current_sprite = pygame.transform.scale(self.current_sprite, (200, 184))\n self.mask = pygame.mask.from_surface(self.current_sprite)\n\n # Finds direction of player\n for player in PLAYER:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n direction_x = player.hit_box_center[0] - self.x\n direction_y = player.hit_box_center[1] - self.y\n\n # Zombie angle\n self.angle = (180 / math.pi) * -math.atan2(direction_y, direction_x)\n\n # Pivot point that image is rotated around\n rotation_pivot = [self.x, self.y]\n\n # Rotated sprite\n rotation_offset_rotated = self.rotation_offset.rotate(-self.angle)\n rotated = Useful.Game.rotate_image(self.current_sprite, self.angle, rotation_pivot, rotation_offset_rotated, self.zombie_rect)\n\n # Updates sprite to rotated version\n self.current_sprite = rotated[0]\n self.zombie_rect = rotated[1]\n\n # Hit box center rotation\n hit_box_offset_rotated = self.hit_box_offset.rotate(-self.angle)\n\n # Collide checks #####################################################################################################################\n\n # Hit box parameters \n self.hit_box_width = 110\n self.hit_box_height = 110\n self.hit_box_x = rotation_pivot[0] - (self.hit_box_width/2) - hit_box_offset_rotated[0]\n self.hit_box_y = rotation_pivot[1] - (self.hit_box_height/2) - hit_box_offset_rotated[1]\n self.hit_box_center = self.hit_box_x + (self.hit_box_width/2), self.hit_box_y + (self.hit_box_width/2)\n \n # Bullet check\n for bullet in BULLETS:\n if Useful.Game.collide_check(self, self.zombie_rect[0], self.zombie_rect[1], bullet, bullet.x, bullet.y):\n if self.health > 0:\n self.health -= 30\n BULLETS.pop(BULLETS.index(bullet))\n \n # Collide check with player \n for player in PLAYER:\n # If collision on left \n if (self.hit_box_x - self.vel) <= (player.hit_box_x + player.hit_box_width) and self.hit_box_x > player.hit_box_x:\n if self.hit_box_y <= (player.hit_box_y + player.hit_box_height) and (self.hit_box_y + self.hit_box_height) >= player.hit_box_y:\n self.x += 1 * 100 * Useful.Game.delta_time\n self.vel = 0\n else:\n self.vel = self.old_vel\n # If collision on right\n if (self.hit_box_x + self.hit_box_width + self.vel) >= player.hit_box_x and self.hit_box_x < player.hit_box_x:\n if self.hit_box_y <= (player.hit_box_y + player.hit_box_height) and (self.hit_box_y + self.hit_box_height) >= player.hit_box_y:\n self.x -= 1 * 100 * Useful.Game.delta_time\n self.vel = 0\n else:\n self.vel = 3\n # If collision on up \n if (self.hit_box_y - self.vel) <= (player.hit_box_y + player.hit_box_height) and self.hit_box_y > player.hit_box_y:\n if self.hit_box_x <= (player.hit_box_x + player.hit_box_width) and (self.hit_box_x + self.hit_box_width) >= player.hit_box_x:\n self.y += 1 * 100 * Useful.Game.delta_time\n self.vel = 0\n else:\n self.vel = self.old_vel\n # If collision on down\n if (self.hit_box_y + self.hit_box_height + self.vel) >= player.hit_box_y and self.hit_box_y < player.hit_box_y:\n if self.hit_box_x <= (player.hit_box_x + player.hit_box_width) and (self.hit_box_x + self.hit_box_width) >= player.hit_box_x:\n self.y -= 1 * 100 * Useful.Game.delta_time\n self.vel = 0\n else:\n self.vel = self.old_vel\n\n # Collide check with zombies\n for zombie in ZOMBIES:\n # If collision on left \n if (self.hit_box_x - self.vel) <= (zombie.hit_box_x + zombie.hit_box_width) and self.hit_box_x > zombie.hit_box_x:\n if self.hit_box_y <= (zombie.hit_box_y + zombie.hit_box_height) and (self.hit_box_y + self.hit_box_height) >= zombie.hit_box_y:\n self.x += 1 * 100 * Useful.Game.delta_time\n self.vel = 0\n else:\n self.vel = self.old_vel\n # If collision on right\n if (self.hit_box_x + self.hit_box_width + self.vel) >= zombie.hit_box_x and self.hit_box_x < zombie.hit_box_x:\n if self.hit_box_y <= (zombie.hit_box_y + zombie.hit_box_height) and (self.hit_box_y + self.hit_box_height) >= zombie.hit_box_y:\n self.x -= 1 * 100 * Useful.Game.delta_time\n self.vel = 0\n else:\n self.vel = self.old_vel\n # If collision on up \n if (self.hit_box_y - self.vel) <= (zombie.hit_box_y + zombie.hit_box_height) and self.hit_box_y > zombie.hit_box_y:\n if self.hit_box_x <= (zombie.hit_box_x + zombie.hit_box_width) and (self.hit_box_x + self.hit_box_width) >= zombie.hit_box_x:\n self.vel_up = 0\n self.y += 1 * 100 * Useful.Game.delta_time\n else:\n self.vel = self.old_vel\n # If collision on down\n if (self.hit_box_y + self.hit_box_height + self.vel) >= zombie.hit_box_y and self.hit_box_y < zombie.hit_box_y:\n if self.hit_box_x <= (zombie.hit_box_x + zombie.hit_box_width) and (self.hit_box_x + self.hit_box_width) >= zombie.hit_box_x:\n self.y -= 1 * 100 * Useful.Game.delta_time\n self.vel = 0\n else:\n self.vel = self.old_vel\n\n # Movement ##########################################################################################################################\n\n # Finds distance to player\n for player in PLAYER:\n distance_x = self.hit_box_center[0] - player.hit_box_center[0]\n distance_y = self.hit_box_center[1] - player.hit_box_center[1]\n\n # Makes the distance a positive number \n if distance_x <= -1:\n distance_x = distance_x * -1\n if distance_y <= -1:\n distance_y = distance_y * -1\n\n # Uses a^2 + b^2 = c^2 equation to find the distance\n distance_squared = (distance_x**2)+(distance_y**2)\n\n # Square roots c^2 \n self.distance = distance_squared**0.5\n \n # Caclulates velocity\n self.vel_x = round(math.cos(-self.angle / (180 / math.pi)) * self.vel)\n self.vel_y = round(math.sin(-self.angle / (180 / math.pi)) * self.vel)\n\n # Moves at set velocity\n self.x += self.vel_x\n self.y += self.vel_y\n\n # Blits ##############################################################################################################################\n\n # Blits sprite to screen \n SCREEN.blit(self.current_sprite, self.zombie_rect)\n\n # Hit boxes \n if Useful.Game.draw_hit_box:\n # Shows TRUE OFFSET\n pygame.draw.rect(SCREEN, (255,255,0), (rotation_pivot[0] - rotation_offset_rotated[0], rotation_pivot[1] - rotation_offset_rotated[1], 5,5))\n # Shows pivot point \n pygame.draw.rect(SCREEN, (0, 0,255), (rotation_pivot[0], rotation_pivot[1], 5,5))\n # Shows hit box center\n pygame.draw.rect(SCREEN, (255,0,0), (self.hit_box_x + (self.hit_box_width/2), self.hit_box_y + (self.hit_box_height/2), 5,5))\n # Shows hit box \n pygame.draw.rect(SCREEN, (255,0,0), (self.hit_box_x, self.hit_box_y, self.hit_box_width, self.hit_box_height), 2)\n\n # Health bar \n bar_height = 10\n health_bar_width = (100/100)*self.health\n red_bar_width = 100\n red_bar = pygame.draw.rect(SCREEN, COLORS['red'], (self.x - 50, self.y - 100, red_bar_width, bar_height))\n if self.health >= 1:\n green_bar = pygame.draw.rect(SCREEN, COLORS['green'], (self.x - 50, self.y - 100, health_bar_width, bar_height))\n\n# Bullet class ###############################################################################################################################\nclass Projectile(pygame.sprite.DirtySprite):\n def __init__(self, x, y, player_x, player_y, player_gun_x, player_gun_y):\n pygame.sprite.DirtySprite.__init__(self)\n # Starting co-ordinates of bullets\n self.x = player_gun_x\n self.y = player_gun_y\n # Bullet image \n self.current_sprite = pygame.transform.scale(Assets.bullet.SPRITE_BULLET, (10, 10))\n # Bullet mask\n self.mask = pygame.mask.from_surface(self.current_sprite)\n # Velocity of bullet\n self.vel = 50 #* 100 * Useful.Game.delta_time\n # Finds direction of mouse\n mouse_x, mouse_y = pygame.mouse.get_pos()\n direction_x = mouse_x - player_x\n direction_y = mouse_y - player_y\n # Bullet angle\n self.angle = math.atan2(direction_y, direction_x)\n self.vel_x = round(math.cos(self.angle) * self.vel)\n self.vel_y = round(math.sin(self.angle) * self.vel)\n # Records whether bullet is on the screen or not\n self.in_bounds = True \n\n # Updates sprite every frame\n def update(self):\n # Moves bullet in direction its shot \n self.x += self.vel_x\n self.y += self.vel_y\n\n # Records whether bullet is on the screen\n if self.x < 1980 and self.x > 0 and self.y > 0 and self.y < 1080:\n self.in_bounds = True\n else:\n self.in_bounds = False\n\n # Blitz image to screen\n SCREEN.blit(self.current_sprite, (round(self.x), round(self.y)))\n\n" }, { "alpha_fraction": 0.5611296892166138, "alphanum_fraction": 0.5938313007354736, "avg_line_length": 33.94805145263672, "blob_id": "d66ceef455dfcdae5edeebabfea95376f98f4879", "content_id": "acb54dc15f6bd1476b8f862335dc5c05f71d1861", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2691, "license_type": "no_license", "max_line_length": 91, "num_lines": 77, "path": "/Useful.py", "repo_name": "ai17339/Survivor-Game", "src_encoding": "UTF-8", "text": "# Imports\nimport math, time, random, pygame\nimport Useful, Assets, Sprites\n\n# Provides useful functions to the game\nclass Game():\n \n # DELTA TIME \n delta_time = 0\n ticks_Last_Frame = 0\n def get_deltatime():\n total_ticks = pygame.time.get_ticks()\n delta_time = (total_ticks - Game.ticks_Last_Frame) / 1000.0\n Game.ticks_Last_frame = total_ticks\n return delta_time\n \n # Rotates image\n def rotate_image(sprite, angle, pivot, offset, sprite_rect):\n sprite = pygame.transform.rotate(sprite, int(angle))\n sprite_rect = sprite.get_rect(center=pivot-offset)\n return sprite, sprite_rect \n \n # Checks if mouse is over \n def is_over(x, y, width, height):\n mouse = pygame.mouse.get_pos()\n if x < mouse[0] < (x + width) and y < mouse[1] < (y + height):\n return True\n \n # Checks if two sprites collide\n def collide_check(sprite1, s1_x, s1_y, sprite2, s2_x, s2_y):\n offset_x = int(s2_x) - int(s1_x)\n offset_y = int(s2_y) - int(s1_y)\n collide = sprite1.mask.overlap(sprite2.mask, (int(offset_x), int(offset_y)))\n if collide != None:\n return True\n else:\n return False\n\n # displays hit boxes of sprites\n draw_hit_box = False\n def display_hit_box():\n key_pressed = pygame.key.get_pressed()\n if key_pressed[pygame.K_p] and Game.draw_hit_box == False:\n Game.draw_hit_box = True\n time.sleep(0.2)\n elif key_pressed[pygame.K_p] and Game.draw_hit_box == True:\n Game.draw_hit_box = False \n time.sleep(0.2)\n\n# Randomizes variables in different zombies\nclass zombie_spawn():\n def spawn_point():\n spawn_point_list = []\n # Top right corner\n spawn_point_list.append((2000, random.randint(-10, 200)))\n # Top left corner\n spawn_point_list.append((-30, random.randint(-10, 200)))\n # Bottom right corner\n spawn_point_list.append((2000, random.randint(-900, 1080)))\n # Bottom left corner\n spawn_point_list.append((-30, random.randint(900, 1080)))\n\n\n random_spawn_point = spawn_point_list[random.randint(0, len(spawn_point_list) - 1)]\n return random_spawn_point[0], random_spawn_point[1]\n \n def speed():\n # Generates a random value out of 100 to use as percentage\n random_percent = random.randint(1, 100)\n\n # Returns a random speed\n if random_percent >= 1 and random_percent <= 50:\n return 4\n elif random_percent >= 51 and random_percent <= 75:\n return 8\n elif random_percent >= 76 and random_percent <= 100:\n return 15\n" } ]
5
esnemal/Python_Practice
https://github.com/esnemal/Python_Practice
06ba35163d287dde548eb770756b7a237cdac48d
d3a975993a16b010e58db8d573f4204187fa1117
7d2a086383f1aef7e00963723723795d89bbaebf
refs/heads/master
2022-07-18T00:55:58.554822
2020-05-14T06:02:19
2020-05-14T06:02:19
263,824,672
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43283581733703613, "alphanum_fraction": 0.5074626803398132, "avg_line_length": 10.166666984558105, "blob_id": "3cade3051027913aef7d60b95c11f5fa4a73c6d8", "content_id": "7b59c73a1f060624e3e6a236134df69a912b76e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 26, "num_lines": 18, "path": "/boolean.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "x=1\n\n\nif x:\n print(\"t\")\n\nlist = [1,2,3,4,5]\n\nfor list in list: #doubt\n print(list)\n\nlist= [1,3,5,7,8]\nprint (list)\n\nfor x in range(10):\n if x % 2 == 1: #doubt\n continue\n print(x)\n" }, { "alpha_fraction": 0.6124401688575745, "alphanum_fraction": 0.6507176756858826, "avg_line_length": 25.0625, "blob_id": "2fcf1c466855fafe333d36abbcbcfef3a159fee9", "content_id": "c7a2541dbd5e8ff6ba83b3cf3caf3a12121bd5d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 57, "num_lines": 16, "path": "/logical_op.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "\nmaths=input('maths=')\nprint ('type of maths=', type(maths))\nmaths=int(maths)\nprint ('type of maths=', type(maths))\nsci=input('sci=') #take input as string\nsci=int(sci) #type casting string to int\n\n\nif maths>=90 and sci>=90:\n print(\"exellent\")\nelif ((maths<90 and maths>=60) and (sci<90 and sci>=60)):\n print(\"average\")\nelif maths<60 and sci<60:\n print(\"need improvment\")\nelse:\n print(\"study more\")\n" }, { "alpha_fraction": 0.2847682237625122, "alphanum_fraction": 0.4900662302970886, "avg_line_length": 17.75, "blob_id": "f84108bb6831ddbf48f7afb76963ca079d5942a2", "content_id": "957f9c9d5a7c4f8c8a54d6b39a50ae16daaf7a83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/loop.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "number = [4,56,43,78,90,43,76,45,98,00,34,56,77,7,8]\n\nfor x in number:\n if x == 77:\n break\n if x % 2 == 1:\n continue\n print(x)\n\n" }, { "alpha_fraction": 0.6483516693115234, "alphanum_fraction": 0.6648351550102234, "avg_line_length": 19.22222137451172, "blob_id": "239e888837e83498482e8118adcf681366a37159", "content_id": "79311e00d215460c2c1470a5d044a1af2ff2ce50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 44, "num_lines": 9, "path": "/args_ex.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "import sys\n\nargs=sys.argv\nprint(\"type of args:\", type(args))\nprint(\"type od arge variable\",type(args[1]))\nfor i in args:\n print(i)\nsum=int(args[1])+int(args[2])\nprint(\"sum=\",sum)\n" }, { "alpha_fraction": 0.4811320900917053, "alphanum_fraction": 0.5707547068595886, "avg_line_length": 12.0625, "blob_id": "4e859e1e4e7cb80d63a936870bedaeefbce740ef", "content_id": "ea30411c723e63bc7bb2a8c552b94380b1f73d66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 23, "num_lines": 16, "path": "/conditional_st.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "\n\nvar1=100\nif var1:\n print(\"true\")\n print (\"var1\")\n\nvar2=0\nif var2==0:\n print(\"true\")\n print (\"var2\")\nprint(\"good bye\")\n\nvar3=-10\nif var3:\n print(\"print var3\")\n print(var3)\nprint(\"var3=\",var3)\n\n" }, { "alpha_fraction": 0.4849785268306732, "alphanum_fraction": 0.5321888327598572, "avg_line_length": 12.411765098571777, "blob_id": "5935c70081cd38a90bf7bbb7196a50a6ce1d8cfc", "content_id": "893306f3b1ea48752950e7a2bc9e8f9222b6c23e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 29, "num_lines": 17, "path": "/conditionalop.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "\n\na=3\nb=2\nlist=[3,5,7,8,9]\nif (a in list):\n print(\"a is in list\")\nelse:\n print(\"a is not in list\")\n\nif (b in list):\n print(\"b is in list\")\nelse:\n print(\"b is not in list\")\n\nx=2\nprint(x == 2)\nprint(x == 3)\nprint(x < 3)\n\n\n\n" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.6759259104728699, "avg_line_length": 17, "blob_id": "40e3a0eca4a9ae76cc5927df9b2769ff9524f7b5", "content_id": "bc3dd8a1da0f969774ecb8c36437a1b4988c15ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 52, "num_lines": 18, "path": "/class&object.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "class MyClass:\n variable = \"blah\"\n\n def function(self):\n print(\"This is a message inside the class.\")\n\nphonebook = {}\nphonebook[\"John\"] = 938477566\nphonebook[\"Jack\"] = 938377264\nphonebook[\"Jill\"] = 947662781\nprint(phonebook)\n\nfor name,number in phonebook.items():\n print(name,number)\n\nx = sys.path\n\nPrint(x)\n" }, { "alpha_fraction": 0.6677524447441101, "alphanum_fraction": 0.69923996925354, "avg_line_length": 28.677419662475586, "blob_id": "4fa00d2ab27c8bb31dcdbab5eac524d406851c1a", "content_id": "172237f1ead19ed049599420efad70f6efcbf560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "no_license", "max_line_length": 134, "num_lines": 31, "path": "/function.py", "repo_name": "esnemal/Python_Practice", "src_encoding": "UTF-8", "text": "# Modify this function to return a list of strings as defined above\ndef list_benefits():\n return \"More organized code\", \"More readable code\", \"Easier code reuse\", \"Allowing programmers to share and connect code together\"\n\n# Modify this function to concatenate to each benefit - \" is a benefit of functions!\"\ndef build_sentence(benefit):\n return\"Hello, %s , is a benefit of functions!\" %(benefit)\n\ndef name_the_benefits_of_functions():\n list_of_benefits = list_benefits()\n for benefit in list_of_benefits:\n print(build_sentence(benefit))\n\nname_the_benefits_of_functions()\n\nprint(build_sentence(\"sneha,this\"))\n\nphonebook = {}\nphonebook[\"John\"] = 938477566\nphonebook[\"Jack\"] = 938377264\nphonebook[\"Jill\"] = 947662781\nprint(phonebook)\n\nfor name,number in phonebook.items():\n print(name,\":\",number)\n\ndef main():\n print(\"inside main\")\nif __name__ == '__main__' :\n print(\"inside main12\")\n main()\n\n" } ]
8
t-cadet/simil
https://github.com/t-cadet/simil
ca62e0238f3abea93894a140a8ea454a2edd3aa3
713f4a2f37a4c9e7de60c561d5edae1e3d22157a
1fe5373264496a753274ecec0dbd2967b3b84437
refs/heads/master
2023-03-02T10:50:47.840474
2018-12-26T05:25:35
2018-12-26T05:25:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6594885587692261, "alphanum_fraction": 0.6705248951911926, "avg_line_length": 34.067962646484375, "blob_id": "726eb2b59cf5b849ce5f00c07a6dbf064625d966", "content_id": "74487bcfe264bf9d4d63dbaca3b2e4f741988fe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3717, "license_type": "no_license", "max_line_length": 112, "num_lines": 103, "path": "/main.py", "repo_name": "t-cadet/simil", "src_encoding": "UTF-8", "text": "# TODO: construire 3 modèles corrects \r\n# refactoriser pour appeler le model que je veux\r\n# analyser les erreurs\r\n# utiliser le dataset MR, refactoriser, analyser erreurs\r\n# implémenter les embeddings \r\n\r\nimport numpy as np\r\nfrom allennlp.data.dataset_readers import SnliReader\r\nfrom allennlp.data.vocabulary import Vocabulary\r\n\r\nfrom keras.models import Model\r\nfrom keras.layers import Input, Dense, Dropout\r\nfrom keras import layers\r\n\r\ndef mnliToList(dataset, vocab):\r\n premises = []\r\n hypothesis = []\r\n labels = []\r\n\r\n for instance in dataset:\r\n premises.append([vocab.get_token_index(token.text) for token in instance.fields['premise'].tokens])\r\n hypothesis.append([vocab.get_token_index(token.text) for token in instance.fields['hypothesis'].tokens])\r\n labels.append(vocab.get_token_index(instance.fields['label'].label, namespace=\"labels\"))\r\n return (premises, hypothesis, labels)\r\n\r\n\r\ndef getBow(dataset, vocab, bow_type='groundBow'):\r\n def countBow(sentence, vocab):\r\n bow = np.zeros(vocab.get_vocab_size())\r\n for token_id in sentence:\r\n bow[token_id]+=1\r\n return bow\r\n def groundBow(sentence, vocab): \r\n return [0 if c==0 else 1 for c in countBow(sentence, vocab)]\r\n def freqBow(sentence, vocab):\r\n return [c/len(sentence) for c in countBow(sentence, vocab)] \r\n\r\n bows = np.zeros((len(dataset), vocab.get_vocab_size()))\r\n i = 0\r\n for sentence in dataset:\r\n bows[i] = locals()[bow_type](sentence, vocab)\r\n i = i+1\r\n return bows\r\n\r\ndef getMnliBow(dataset, vocab, bow_type='groundBow'):\r\n premises, hypothesis, labs = mnliToList(dataset, vocab)\r\n\r\n premises = getBow(premises, vocab, bow_type)\r\n hypothesis = getBow(hypothesis, vocab, bow_type)\r\n\r\n labels = np.zeros((len(dataset), vocab.get_vocab_size(namespace='labels')))\r\n for i in range(len(dataset)):\r\n labels[i, labs[i]] = 1\r\n\r\n return (premises, hypothesis, labels)\r\n\r\nreader = SnliReader()\r\n\r\n# train_dataset = reader.read(cached_path('datasets/multinli_1.0/multinli_1.0_train.jsonl'))\r\ntrain_dataset = reader.read('tests/fixtures/train1000.jsonl') # Fixture\r\nvalidation_dataset = reader.read('tests/fixtures/val1000.jsonl') # Fixture\r\n#validation_dataset = reader.read('datasets/multinli_1.0/multinli_1.0_dev_matched.jsonl')\r\n\r\n# print(train_dataset)\r\n\r\nvocab = Vocabulary.from_instances(train_dataset + validation_dataset)\r\n# vocab.print_statistics()\r\n\r\nt_premises, t_hypothesis, t_labels = getMnliBow(train_dataset, vocab, 'freqBow')\r\nv_premises, v_hypothesis, v_labels = getMnliBow(validation_dataset, vocab, 'freqBow')\r\n\r\n# for i in range(3):\r\n# print(i)\r\n# print(t_premises[i])\r\n# print(t_hypothesis[i])\r\n# print(t_labels[i])\r\n\r\nprem_input = Input(shape=(vocab.get_vocab_size('tokens'),))\r\nprem_out = Dense(32, activation='relu')(prem_input)\r\nprem_out = Dense(16, activation='relu')(prem_out)\r\n# prem_out = Dense(8, activation='hard_sigmoid')(prem_input)\r\n\r\nhyp_input = Input(shape=(vocab.get_vocab_size('tokens'),))\r\nhyp_out = Dense(32, activation='relu')(hyp_input)\r\nhyp_out = Dense(16, activation='relu')(hyp_out)\r\n# hyp_out = Dense(8, activation='hard_sigmoid')(hyp_input)\r\n\r\nconcatenated = layers.concatenate([prem_out, hyp_out], axis=-1)\r\noutput = Dense(vocab.get_vocab_size('labels'), activation='softmax')(concatenated)\r\n\r\nmodel = Model([prem_input, hyp_input], output)\r\nmodel.compile(optimizer='rmsprop',\r\n loss='categorical_crossentropy',\r\n metrics=['acc'])\r\n\r\nmodel.fit(\r\n [t_premises, t_hypothesis],\r\n t_labels,\r\n batch_size=32,\r\n epochs=10,\r\n verbose=1,\r\n validation_data=([v_premises, v_hypothesis], v_labels)\r\n)\r\n" }, { "alpha_fraction": 0.6699857711791992, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 38.596153259277344, "blob_id": "5933fac9af29079565f8a188e12ffa73222ed318", "content_id": "75db966afec0de5421a12e8cb6b4f975d95875de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2109, "license_type": "no_license", "max_line_length": 100, "num_lines": 52, "path": "/tests/test_mnli_model.py", "repo_name": "t-cadet/simil", "src_encoding": "UTF-8", "text": "# pylint: disable=no-self-use,invalid-name\r\nfrom flaky import flaky\r\nimport pytest\r\nimport numpy\r\nfrom numpy.testing import assert_almost_equal\r\n\r\nfrom allennlp.common import Params\r\nfrom allennlp.common.checks import ConfigurationError\r\nfrom allennlp.common.testing import ModelTestCase\r\nfrom allennlp.models import Model\r\n\r\nfrom simil.models import BowMNLI\r\n\r\n\r\nclass TestBowMNLI(ModelTestCase):\r\n def setUp(self):\r\n super(TestBowMNLI, self).setUp()\r\n self.set_up_model('tests/experiments/mnli.json',\r\n 'tests/fixtures/multinli_1.0_train.jsonl')\r\n\r\n def test_forward_pass_runs_correctly(self):\r\n training_tensors = self.dataset.as_tensor_dict()\r\n output_dict = self.model(**training_tensors)\r\n assert_almost_equal(numpy.sum(output_dict[\"label_probs\"][0].data.numpy(), -1), 1, decimal=6)\r\n\r\n @flaky\r\n def test_model_can_train_save_and_load(self):\r\n self.ensure_model_can_train_save_and_load(self.param_file)\r\n\r\n @flaky\r\n def test_batch_predictions_are_consistent(self):\r\n self.ensure_batch_predictions_are_consistent()\r\n\r\n def test_model_load(self):\r\n params = Params.from_file('tests/experiments/mnli.json')\r\n model = Model.load(params, serialization_dir='tests/serialization')\r\n\r\n assert isinstance(model, BowMNLI)\r\n\r\n def test_mismatched_dimensions_raise_configuration_errors(self):\r\n params = Params.from_file(self.param_file)\r\n # Make the input_dim to the first feedforward_layer wrong - it should be 2.\r\n params[\"model\"][\"attend_feedforward\"][\"input_dim\"] = 10\r\n with pytest.raises(ConfigurationError):\r\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))\r\n\r\n params = Params.from_file(self.param_file)\r\n # Make the projection output_dim of the last layer wrong - it should be\r\n # 3, equal to the number of classes.\r\n params[\"model\"][\"aggregate_feedforward\"][\"output_dim\"] = 10\r\n with pytest.raises(ConfigurationError):\r\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))" }, { "alpha_fraction": 0.6311202645301819, "alphanum_fraction": 0.6429690718650818, "avg_line_length": 41.429786682128906, "blob_id": "2834fbbba88ac23fef1c7b0d86bf7f3fb6a98c58", "content_id": "0b9051df05657f7cfd49c2201635f5407e18da48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10212, "license_type": "no_license", "max_line_length": 296, "num_lines": 235, "path": "/RT_polarity.py", "repo_name": "t-cadet/simil", "src_encoding": "UTF-8", "text": "# load_rt_polarity_dataset\r\nimport os\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# ngram_vectorize\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_selection import SelectKBest, f_classif\r\n\r\n# mlp_model\r\nfrom tensorflow.python.keras import models\r\nfrom tensorflow.python.keras.layers import Dense, Dropout\r\n\r\n# checkLabels\r\nimport explore_data\r\n\r\n# train_ngram_model\r\nimport tensorflow as tf\r\n\r\nfrom collections import defaultdict\r\n\r\ndef load_rt_polarity_dataset(data_path = \"datasets\", seed=123, test_split=0.2):\r\n \"\"\"Loads the rt-polarity dataset.\r\n\r\n # Arguments\r\n data_path: string, path to the data directory.\r\n seed: int, seed for randomizer.\r\n test_split: float, proportion of test samples.\r\n\r\n # Returns\r\n A tuple of training and test data.\r\n Number of samples: 10662\r\n Number of categories: 2 (0 - negative, 1 - positive)\r\n\r\n # References\r\n Bo Pang and Lillian Lee, 'Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales.', Proceedings of the ACL, 2005.\r\n\r\n Download and uncompress archive from:\r\n http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polarydata.tar.gz\r\n \"\"\"\r\n rt_path = os.path.join(data_path, 'rt-polaritydata', 'rt-polaritydata')\r\n pos_path = os.path.join(rt_path, 'rt-polarity.pos')\r\n neg_path = os.path.join(rt_path, 'rt-polarity.neg')\r\n\r\n with open(pos_path, encoding=\"latin-1\") as pos_f, open(neg_path, encoding=\"latin-1\") as neg_f:\r\n pos_data = pos_f.readlines()\r\n neg_data = neg_f.readlines()\r\n assert len(pos_data)==len(neg_data)\r\n\r\n data = pos_data + neg_data\r\n labels = [1]*len(pos_data) + [0]*len(neg_data)\r\n\r\n return train_test_split(data, np.array(labels), random_state=seed, test_size=test_split)\r\n\r\ndef ngram_vectorize(train_texts, train_labels, test_texts, ngram_range=(1,2), top_k=20000, token_mode='word', min_doc_freq=2):\r\n \"\"\"Vectorizes texts as n-gram vectors.\r\n\r\n 1 text = 1 tf-idf vector the length of vocabulary of unigrams + bigrams.\r\n\r\n # Arguments\r\n train_texts: list, training text strings.\r\n train_labels: np.ndarray, training labels.\r\n test_texts: list, test text strings.\r\n ngram_range: tuple, range (inclusive) of n-gram sizes for tokenizing text.\r\n top_k: int, limit on the number of features.\r\n token_mode: string, whether text should be split into word or character n-grams. One of 'word', 'char'.\r\n min_doc_freq: int, minimum document/corpus frequency below which a token will be discarded.\r\n\r\n # Returns\r\n x_train, x_test: vectorized training and test texts\r\n \"\"\"\r\n # Create keyword arguments to pass to the 'tf-idf' vectorizer.\r\n kwargs = {\r\n 'ngram_range': ngram_range,\r\n 'dtype': 'int32',\r\n 'strip_accents': 'unicode',\r\n 'decode_error': 'replace',\r\n 'analyzer': token_mode,\r\n 'min_df': min_doc_freq,\r\n }\r\n vectorizer = TfidfVectorizer(**kwargs)\r\n\r\n # Learn vocabulary from training texts and vectorize training texts.\r\n x_train = vectorizer.fit_transform(train_texts)\r\n\r\n # Vectorize test texts.\r\n x_test = vectorizer.transform(test_texts)\r\n\r\n # Select top 'k' of the vectorized features.\r\n selector = SelectKBest(f_classif, k=min(top_k, x_train.shape[1]))\r\n selector.fit(x_train, train_labels)\r\n x_train = selector.transform(x_train).astype('float32')\r\n x_test = selector.transform(x_test).astype('float32')\r\n return x_train, x_test, vectorizer, selector\r\n\r\ndef mlp_model(units, input_shape, num_classes = 2, dropout_rate = 0.2, activation='relu', optimizer='rmsprop', regularizer=None):\r\n \"\"\"Creates an instance of a multi-layer perceptron model.\r\n\r\n # Arguments\r\n units: int array, output dimension for each of the layers.\r\n input_shape: tuple, shape of input to the model.\r\n num_classes: int, number of output classes.\r\n dropout_rate: float, percentage of input to drop at Dropout layers.\r\n activation: string, the activation function.\r\n\r\n # Returns\r\n An MLP model instance.\r\n \"\"\"\r\n model = models.Sequential()\r\n model.add(Dropout(rate=dropout_rate, input_shape=input_shape))\r\n\r\n for dim in units:\r\n model.add(Dense(units=dim, activation=activation, kernel_regularizer=regularizer))\r\n model.add(Dropout(rate=dropout_rate))\r\n\r\n op_units, op_activation, loss = (1, 'sigmoid', 'binary_crossentropy') if num_classes==2 else (num_classes, 'softmax', 'sparse_categorical_crossentropy')\r\n\r\n model.add(Dense(units=op_units, activation=op_activation))\r\n model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])\r\n return model\r\n\r\ndef checkLabels(train_labels, test_labels):\r\n # Verify that validation labels are in the same range as training labels.\r\n num_classes = explore_data.get_num_classes(train_labels)\r\n unexpected_labels = [v for v in test_labels if v not in range(num_classes)]\r\n if len(unexpected_labels):\r\n raise ValueError('Unexpected label values found in the validation set:'\r\n ' {unexpected_labels}. Please make sure that the '\r\n 'labels in the validation set are in the same range '\r\n 'as training labels.'.format(unexpected_labels=unexpected_labels))\r\n return num_classes\r\n\r\ndef train_model(model, x_train, train_labels, epochs=1000, val_split=0.2, batch_size=32, filename='rt_mlp_model'):\r\n\r\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_acc', patience=1)]\r\n\r\n # Train and validate model.\r\n history = model.fit(\r\n x_train,\r\n train_labels,\r\n epochs=epochs,\r\n callbacks=callbacks,\r\n validation_split=val_split,\r\n verbose=2, # Logs once per epoch.\r\n batch_size=batch_size)\r\n\r\n # Print results.\r\n history = history.history\r\n print('Validation accuracy: {acc}, loss: {loss}'.format(acc=history['val_acc'][-1], loss=history['val_loss'][-1]))\r\n\r\n # Save model.\r\n model.save('serial/'+filename+'.h5')\r\n return history['val_acc'][-1], history['val_loss'][-1]\r\n\r\ndef separateMissRaw(model, data, labels, v, s, save=False):\r\n \"\"\"Returns a list of string containing misclassified samples and another containing well classified samples\"\"\"\r\n wrong, right = [], []\r\n predictions = model.predict(s.transform(v.transform(data)).astype('float32'))\r\n for i in range(len(data)):\r\n if round(predictions[i][0])==labels[i]:\r\n right.append(data[i])\r\n else:\r\n wrong.append(data[i])\r\n if save:\r\n path = \"stats/rt/\"\r\n with open(path+\"right_samples_raw\",\"w\") as f:\r\n f.write(\"\".join(right))\r\n with open(path+\"wrong_samples_raw\",\"w\") as f:\r\n f.write(\"\".join(wrong))\r\n\r\n return wrong, right\r\n\r\ndef separateMiss(model, data, labels):\r\n \"\"\"Returns a list containing misclassified samples and another containing well classified samples\"\"\"\r\n neg, pos = [], []\r\n for sample, truth in list(zip(data, labels)):\r\n if(round(model.predict(sample)[0][0])==truth):\r\n pos.append(sample)\r\n else:\r\n neg.append(sample)\r\n return neg, pos\r\n\r\ndef getErrAnalysisStats(model, data, labels, vectorizer, save=False):\r\n vocab = vectorizer.get_feature_names()\r\n neg, pos = separateMiss(model, data, labels)\r\n neg_count, pos_count, stats = defaultdict(int), defaultdict(int), defaultdict(int)\r\n\r\n for sample in neg:\r\n for token_id in sample.indices:\r\n neg_count[vocab[token_id]]+=1\r\n for sample in pos:\r\n for token_id in sample.indices:\r\n pos_count[vocab[token_id]]+=1\r\n\r\n for key, neg_c in neg_count.items():\r\n stats[key] = [neg_c, pos_count[key], round(neg_c/(neg_c+pos_count[key]),3), neg_c+pos_count[key]]\r\n for key, pos_c in pos_count.items():\r\n if key not in neg_count:\r\n stats[key] = [0, pos_c, 0, pos_c]\r\n if save: \r\n neg_str = [[vocab[token_id] for token_id in sample.indices] for sample in neg]\r\n pos_str = [[vocab[token_id] for token_id in sample.indices] for sample in pos]\r\n saveStats(stats, neg_str, pos_str)\r\n return stats, neg, pos\r\n\r\ndef saveStats(stats, neg=None, pos=None, sep=\";\"):\r\n path = \"stats/rt/\"\r\n with open(path+\"wordErrorCount.csv\",\"w\") as f:\r\n f.write(\"word;wrong_count;right_count;wrong/tot;total\\n\")\r\n for key, value in stats.items():\r\n f.write(key+sep+sep.join([str(v) for v in value])+\"\\n\")\r\n if neg is not None:\r\n with open(path+\"wrong_samples\",\"w\") as f:\r\n f.write(\"\\n\".join([\",\".join(s) for s in neg]))\r\n if pos is not None:\r\n with open(path+\"right_samples\",\"w\") as f:\r\n f.write(\"\\n\".join([\",\".join(s) for s in pos]))\r\n\r\n\r\n# import operator\r\n# x = {1: 2, 3: 4, 4: 3, 2: 1, 0: 0}\r\n# sorted_x = sorted(x.items(), key=operator.itemgetter(1)) \r\n\r\ntrain_texts, test_texts, train_labels, test_labels = load_rt_polarity_dataset()\r\nnum_classes = checkLabels(train_labels, test_labels)\r\nx_train, x_test, v, s = ngram_vectorize(train_texts, train_labels, test_texts, top_k=20000, ngram_range=(1,3))\r\n\r\nmodel = mlp_model(units=[8], input_shape=x_train.shape[1:], num_classes=num_classes, dropout_rate=0.2, optimizer=tf.keras.optimizers.Adam(lr=1e-3))\r\n#train_model(model, x_train, train_labels)\r\n\r\n# model.predict(s.transform(v.transform([\"Kirito still looks 12 years old even though he's 22 or something. Tea parties still happen quite often\"])).astype('float32'))\r\n# array([[0.93839157]], dtype=float32)\r\n\r\n# Pacing is as bad as always, too slow as seen on the first 6 episodes, or too fast like in episodes 7 and 9;Kirito still looks 12 years old even though he's 22 or something;Tea parties still happen quite often;What the heck is Kirito doing on these 2 years at the academy, he sure isn't training\r\n# Pacing is as bad as always, too slow as seen on the first 6 episodes, or too fast like in episodes 7 and 9;What the heck is Kirito doing on these 2 years at the academy, he sure isn't training\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6649343371391296, "alphanum_fraction": 0.6704047918319702, "avg_line_length": 42.60975646972656, "blob_id": "c8d13a6a53fe17d9a09c19b8b82ae3919ba77a09", "content_id": "272c87be51cc17337c43cd160416c35a82cdfbe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3656, "license_type": "no_license", "max_line_length": 96, "num_lines": 82, "path": "/models/bow_mnli.py", "repo_name": "t-cadet/simil", "src_encoding": "UTF-8", "text": "from typing import Iterator, List, Dict\r\nimport torch\r\nimport torch.optim as optim\r\nimport numpy as np\r\nfrom allennlp.data import Instance\r\nfrom allennlp.data.fields import TextField, SequenceLabelField, Field, LabelField, MetadataField\r\nfrom allennlp.data.dataset_readers import DatasetReader\r\nfrom allennlp.common.file_utils import cached_path\r\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\r\nfrom allennlp.data.tokenizers import Token\r\nfrom allennlp.data.vocabulary import Vocabulary\r\nfrom allennlp.models import Model\r\nfrom allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder\r\nfrom allennlp.modules.token_embedders import Embedding\r\nfrom allennlp.modules.seq2seq_encoders import Seq2SeqEncoder, PytorchSeq2SeqWrapper\r\nfrom allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\r\nfrom allennlp.training.metrics import CategoricalAccuracy\r\nfrom allennlp.data.iterators import BucketIterator\r\nfrom allennlp.training.trainer import Trainer\r\nfrom allennlp.predictors import SentenceTaggerPredictor\r\n\r\nimport json\r\nimport logging\r\n\r\nfrom overrides import overrides\r\nfrom allennlp.data.tokenizers import Tokenizer, WordTokenizer\r\n\r\[email protected](\"bow_mnli\")\r\nclass BowMNLI(Model):\r\n def __init__(self, vocab: Vocabulary,\r\n premise_encoder: Optional[Seq2SeqEncoder] = None,\r\n hypothesis_encoder: Optional[Seq2SeqEncoder] = None,\r\n initializer: InitializerApplicator = InitializerApplicator(),\r\n regularizer: Optional[RegularizerApplicator] = None) -> None:\r\n super(BowMNLI, self).__init__(vocab, regularizer)\r\n \r\n self._premise_encoder = premise_encoder\r\n self._hypothesis_encoder = hypothesis_encoder or premise_encoder\r\n self.fc = nn.Sequential(\r\n nn.Linear(vocab.get_vocab_size('tokens'), 32),\r\n nn.ReLU(),\r\n nn.Linear(32, 16),\r\n nn.ReLU()\r\n )\r\n self.aggregate = nn.Sequential(nn.Linear(16, vocab.get_vocab_size('labels')))\r\n\r\n self._num_labels = vocab.get_vocab_size(namespace=\"labels\")\r\n self._accuracy = CategoricalAccuracy()\r\n self._loss = torch.nn.CrossEntropyLoss()\r\n\r\n initializer(self)\r\n\r\n def forward(self, # type: ignore\r\n premise: Dict[str, torch.LongTensor],\r\n hypothesis: Dict[str, torch.LongTensor],\r\n label: torch.IntTensor = None,\r\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\r\n\r\n premise_mask = get_text_field_mask(premise).float()\r\n hypothesis_mask = get_text_field_mask(hypothesis).float()\r\n \r\n out1 = self.fc(self._premise_encoder(premise, premise_mask))\r\n out2 = self.fc(self._hypothesis_encoder(hypothesis, hypothesis_mask))\r\n\r\n label_logits = self.aggregate(torch.cat([out1, out2], dim=-1))\r\n label_prob = torch.nn.functional.softmax(label_logits, dim=-1) \r\n\r\n output_dict = {\"label_logits\": label_logits,\r\n \"label_probs\": label_probs}\r\n\r\n if label is not None:\r\n output_dict[\"loss\"] = self._loss(label_logits, label.long().view(-1))\r\n self._accuracy(label_logits, label)\r\n\r\n if metadata is not None:\r\n output_dict[\"premise_tokens\"] = [x[\"premise_tokens\"] for x in metadata]\r\n output_dict[\"hypothesis_tokens\"] = [x[\"hypothesis_tokens\"] for x in metadata]\r\n\r\n return output_dict\r\n\r\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\r\n return {'accuracy': self._accuracy.get_metric(reset), }" }, { "alpha_fraction": 0.5767799615859985, "alphanum_fraction": 0.5870294570922852, "avg_line_length": 41.24802017211914, "blob_id": "bc5bbfb14b364101c57bb7b745fc03c2af8f12fc", "content_id": "a0aa29bb46fd3eae15b7bc275099928508c6c78d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16391, "license_type": "no_license", "max_line_length": 158, "num_lines": 379, "path": "/multinli.py", "repo_name": "t-cadet/simil", "src_encoding": "UTF-8", "text": "import os\r\n\r\n# ngram_vectorize\r\nfrom sklearn import preprocessing\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_selection import SelectKBest, f_classif\r\n\r\n# model\r\nfrom tensorflow.python.keras.models import Model\r\nfrom tensorflow.python.keras import models, layers, initializers, regularizers\r\nfrom tensorflow.python.keras.layers import Dense, Dropout, Input, Embedding, SeparableConv1D, MaxPooling1D, GlobalAveragePooling1D\r\n\r\n#train_model\r\nimport tensorflow as tf\r\n\r\nimport logging\r\nimport json\r\nimport random\r\n\r\nimport numpy as np\r\n\r\n# tensorboard\r\nfrom tensorflow.python.keras.callbacks import TensorBoard\r\nfrom time import time\r\n\r\n# sequence vectorize\r\nfrom tensorflow.python.keras.preprocessing import sequence\r\nfrom tensorflow.python.keras.preprocessing import text\r\n\r\nlogging.basicConfig(\r\n format='%(asctime)s - %(levelname)s - %(message)s',\r\n datefmt='%m/%d/%Y %I:%M:%S %p',\r\n level=logging.INFO#,\r\n #filename=\"log\"++\".txt\"\r\n )\r\n\r\ndef load_mnli(lim=None, filename=\"multinli_1.0_train.jsonl\", data_path = \"datasets/multinli_1.0\", seed=123):\r\n mnli_path = os.path.join(data_path, filename)\r\n with open(mnli_path) as mnli_file:\r\n logging.info(\"Reading MNLI instances from jsonl dataset at: %s\", mnli_path)\r\n pre, hyp, lab = [], [], []\r\n for line in mnli_file:\r\n sample = json.loads(line)\r\n if sample[\"gold_label\"] == '-':\r\n continue\r\n pre.append(sample[\"sentence1\"])\r\n hyp.append(sample[\"sentence2\"])\r\n lab.append(sample[\"gold_label\"])\r\n if lim is not None:\r\n lim -= 1\r\n if lim == 0:\r\n break\r\n\r\n random.seed(seed)\r\n random.shuffle(pre)\r\n random.seed(seed)\r\n random.shuffle(hyp)\r\n random.seed(seed)\r\n random.shuffle(lab)\r\n lab = preprocessing.LabelEncoder().fit_transform(lab)\r\n return pre, hyp, lab\r\n\r\ndef ngram_vectorize(pre, hyp, train_labels, ngram_range=(1,2), top_k=20000, token_mode='word', min_doc_freq=2):\r\n kwargs = {\r\n 'ngram_range': ngram_range,\r\n 'dtype': 'int32',\r\n 'strip_accents': 'unicode',\r\n 'decode_error': 'replace',\r\n 'analyzer': token_mode,\r\n 'min_df': min_doc_freq,\r\n }\r\n vectorizer = TfidfVectorizer(**kwargs)\r\n vectorizer.fit(pre+hyp)\r\n pre_train = vectorizer.transform(pre)\r\n hyp_train = vectorizer.transform(hyp)\r\n\r\n temp = pre_train + hyp_train\r\n selector = SelectKBest(f_classif, k=min(top_k, temp.shape[1]))\r\n selector.fit(temp, train_labels)\r\n\r\n pre_train = selector.transform(pre_train).astype('float32')\r\n hyp_train = selector.transform(hyp_train).astype('float32')\r\n return pre_train, hyp_train, vectorizer, selector\r\n\r\ndef siamese_mlp_model(units, input_shape, num_classes = 3, dropout_rate = 0.2, activation='relu', optimizer='rmsprop'):\r\n\r\n prem_input = Input(shape=input_shape)\r\n hyp_input = Input(shape=input_shape)\r\n prem_out = Dropout(rate=dropout_rate, input_shape=input_shape)(prem_input)\r\n hyp_out = Dropout(rate=dropout_rate, input_shape=input_shape)(hyp_input)\r\n\r\n for dim in units:\r\n prem_out = Dense(dim, activation=activation)(prem_out)\r\n hyp_out = Dense(dim, activation=activation)(hyp_out)\r\n prem_out = Dropout(rate=dropout_rate)(prem_out)\r\n hyp_out = Dropout(rate=dropout_rate)(hyp_out)\r\n\r\n concatenated = layers.concatenate([prem_out, hyp_out], axis=-1)\r\n\r\n op_units, op_activation, loss = (1, 'sigmoid', 'binary_crossentropy') if num_classes==2 else (num_classes, 'softmax', 'sparse_categorical_crossentropy')\r\n output = Dense(op_units, activation=op_activation)(concatenated)\r\n\r\n model = Model([prem_input, hyp_input], output)\r\n model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])\r\n return model\r\n\r\ndef sequence_vectorize(pre, hyp, top_k=20000, max_seq_len=500):\r\n \"\"\"Vectorizes texts as sequence vectors.\"\"\"\r\n\r\n # Create vocabulary with training texts.\r\n tokenizer = text.Tokenizer(num_words=top_k)\r\n tokenizer.fit_on_texts(pre+hyp)\r\n\r\n # Vectorize training texts.\r\n x_hyp = tokenizer.texts_to_sequences(pre)\r\n x_pre = tokenizer.texts_to_sequences(hyp)\r\n\r\n # Get max sequence length.\r\n max_length = len(max(x_hyp+x_pre, key=len))\r\n if max_length > max_seq_len:\r\n max_length = max_seq_len\r\n\r\n # Fix sequence length to max value. Sequences shorter than the length are\r\n # padded in the beginning and sequences longer are truncated\r\n # at the beginning.\r\n x_hyp = sequence.pad_sequences(x_hyp, maxlen=max_length).astype('float32')\r\n x_pre = sequence.pad_sequences(x_pre, maxlen=max_length).astype('float32')\r\n return x_hyp, x_pre, tokenizer\r\n\r\n# def sepcnn_model(input_shape,\r\n# num_features,\r\n# blocks=2,\r\n# filters=64,\r\n# kernel_size=3,\r\n# embedding_dim=200,\r\n# dropout_rate=0.2,\r\n# pool_size=3,\r\n# num_classes=3,\r\n# optimizer='rmsprop',\r\n# use_pretrained_embedding=False,\r\n# is_embedding_trainable=False,\r\n# embedding_matrix=None):\r\n# \"\"\"Creates and compiles an instance of a separable CNN model.\r\n\r\n# # Arguments\r\n# blocks: int, number of pairs of sepCNN and pooling blocks in the model.\r\n# filters: int, output dimension of the layers.\r\n# kernel_size: int, length of the convolution window.\r\n# embedding_dim: int, dimension of the embedding vectors.\r\n# dropout_rate: float, percentage of input to drop at Dropout layers.\r\n# pool_size: int, factor by which to downscale input at MaxPooling layer.\r\n# input_shape: tuple, shape of input to the model.\r\n# num_classes: int, number of output classes.\r\n# num_features: int, number of words (embedding input dimension).\r\n# use_pretrained_embedding: bool, true if pre-trained embedding is on.\r\n# is_embedding_trainable: bool, true if embedding layer is trainable.\r\n# embedding_matrix: dict, dictionary with embedding coefficients.\r\n\r\n# # Returns\r\n# A compiled sepCNN model instance.\r\n# \"\"\"\r\n# def TwoSepC1D(inp, filters=filters, kernel_size=kernel_size):\r\n# out = SeparableConv1D(filters=filters,\r\n# kernel_size=kernel_size,\r\n# activation='relu',\r\n# bias_initializer='random_uniform',\r\n# depthwise_initializer='random_uniform',\r\n# padding='same')(inp)\r\n# out = SeparableConv1D(filters=filters,\r\n# kernel_size=kernel_size,\r\n# activation='relu',\r\n# bias_initializer='random_uniform',\r\n# depthwise_initializer='random_uniform',\r\n# padding='same')(out)\r\n# return out\r\n\r\n# # Add embedding layer. If pre-trained embedding is used add weights to the\r\n# # embeddings layer and set trainable to input is_embedding_trainable flag.\r\n# if use_pretrained_embedding:\r\n# embedding_layer = Embedding(input_dim=num_features,\r\n# output_dim=embedding_dim,\r\n# input_length=input_shape[0],\r\n# weights=[embedding_matrix],\r\n# trainable=is_embedding_trainable)\r\n# else:\r\n# embedding_layer = Embedding(input_dim=num_features,\r\n# output_dim=embedding_dim,\r\n# input_length=input_shape[0])\r\n\r\n# pre_input = Input(shape=input_shape)\r\n# hyp_input = Input(shape=input_shape)\r\n\r\n# pre_out = embedding_layer(pre_input)\r\n# hyp_out = embedding_layer(hyp_input)\r\n\r\n# for _ in range(blocks-1):\r\n# pre_out = Dropout(rate=dropout_rate)(pre_out)\r\n# pre_out = TwoSepC1D(pre_out)\r\n# pre_out = MaxPooling1D(pool_size=pool_size)(pre_out)\r\n\r\n# hyp_out = Dropout(rate=dropout_rate)(hyp_out)\r\n# hyp_out = TwoSepC1D(hyp_out)\r\n# hyp_out = MaxPooling1D(pool_size=pool_size)(hyp_out)\r\n\r\n# pre_out = TwoSepC1D(pre_out, filters=filters*2)\r\n# pre_out = GlobalAveragePooling1D()(pre_out)\r\n# pre_out = Dropout(rate=dropout_rate)(pre_out)\r\n\r\n# hyp_out = TwoSepC1D(hyp_out, filters=filters*2)\r\n# hyp_out = GlobalAveragePooling1D()(hyp_out)\r\n# hyp_out = Dropout(rate=dropout_rate)(hyp_out)\r\n\r\n# out = layers.concatenate([pre_out, hyp_out], axis=-1)\r\n# out = Dense(32, 'relu')(out)\r\n# op_units, op_activation, loss = (1, 'sigmoid', 'binary_crossentropy') if num_classes==2 else (num_classes, 'softmax', 'sparse_categorical_crossentropy')\r\n# out = Dense(op_units, activation=op_activation)(out)\r\n\r\n# model = Model([pre_input, hyp_input], out)\r\n# model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])\r\n# return model\r\n\r\ndef sepcnn_model(input_shape,\r\n num_features,\r\n blocks=2,\r\n filters=64,\r\n kernel_size=3,\r\n embedding_dim=200,\r\n dropout_rate=0.2,\r\n pool_size=3,\r\n num_classes=3,\r\n optimizer='rmsprop',\r\n use_pretrained_embedding=False,\r\n is_embedding_trainable=False,\r\n embedding_matrix=None):\r\n \"\"\"Creates and compiles an instance of a separable CNN model.\r\n\r\n # Arguments\r\n blocks: int, number of pairs of sepCNN and pooling blocks in the model.\r\n filters: int, output dimension of the layers.\r\n kernel_size: int, length of the convolution window.\r\n embedding_dim: int, dimension of the embedding vectors.\r\n dropout_rate: float, percentage of input to drop at Dropout layers.\r\n pool_size: int, factor by which to downscale input at MaxPooling layer.\r\n input_shape: tuple, shape of input to the model.\r\n num_classes: int, number of output classes.\r\n num_features: int, number of words (embedding input dimension).\r\n use_pretrained_embedding: bool, true if pre-trained embedding is on.\r\n is_embedding_trainable: bool, true if embedding layer is trainable.\r\n embedding_matrix: dict, dictionary with embedding coefficients.\r\n\r\n # Returns\r\n A compiled sepCNN model instance.\r\n \"\"\"\r\n def TwoSepC1D(inp, filters=filters, kernel_size=kernel_size):\r\n out = SeparableConv1D(filters=filters,\r\n kernel_size=kernel_size,\r\n activation='relu',\r\n bias_initializer='random_uniform',\r\n depthwise_initializer='random_uniform',\r\n padding='same')(inp)\r\n out = SeparableConv1D(filters=filters,\r\n kernel_size=kernel_size,\r\n activation='relu',\r\n bias_initializer='random_uniform',\r\n depthwise_initializer='random_uniform',\r\n padding='same')(out)\r\n return out\r\n\r\n # Add embedding layer. If pre-trained embedding is used add weights to the\r\n # embeddings layer and set trainable to input is_embedding_trainable flag.\r\n if use_pretrained_embedding:\r\n embedding_layer = Embedding(input_dim=num_features,\r\n output_dim=embedding_dim,\r\n input_length=input_shape[0],\r\n weights=[embedding_matrix],\r\n trainable=is_embedding_trainable)\r\n else:\r\n embedding_layer = Embedding(input_dim=num_features,\r\n output_dim=embedding_dim,\r\n input_length=input_shape[0])\r\n\r\n inp = Input(shape=input_shape)\r\n x_out = embedding_layer(inp)\r\n for _ in range(blocks-1):\r\n x_out = Dropout(rate=dropout_rate)(x_out)\r\n x_out = TwoSepC1D(x_out)\r\n x_out = MaxPooling1D(pool_size=pool_size)(x_out)\r\n x_out = TwoSepC1D(x_out, filters=filters*2)\r\n x_out = GlobalAveragePooling1D()(x_out)\r\n x_out = Dropout(rate=dropout_rate)(x_out)\r\n\r\n encoder = Model(inp, x_out)\r\n\r\n\r\n pre_input = Input(shape=input_shape)\r\n hyp_input = Input(shape=input_shape)\r\n\r\n pre_out = encoder(pre_input)\r\n hyp_out = encoder(hyp_input)\r\n\r\n out = layers.concatenate([pre_out, hyp_out], axis=-1)\r\n out = Dense(32, 'relu')(out)\r\n op_units, op_activation, loss = (1, 'sigmoid', 'binary_crossentropy') if num_classes==2 else (num_classes, 'softmax', 'sparse_categorical_crossentropy')\r\n out = Dense(op_units, activation=op_activation)(out)\r\n\r\n model = Model([pre_input, hyp_input], out)\r\n model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])\r\n return model\r\n\r\ndef train_model(model, x_train, train_labels, epochs=1000, val_split=0.1, batch_size=32, filename='mnli_mlp_model', tensorboard=False):\r\n\r\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2)]\r\n if tensorboard:\r\n callbacks.append(TensorBoard(log_dir=\"tensorboard/mnli/{}\".format(time()), histogram_freq=1, batch_size=32, write_graph=True))\r\n\r\n # Train and validate model.\r\n history = model.fit(\r\n x_train,\r\n train_labels,\r\n epochs=epochs,\r\n callbacks=callbacks,\r\n validation_split=val_split,\r\n verbose=1,\r\n batch_size=batch_size)\r\n\r\n # Print results.\r\n history = history.history\r\n logging.info('Validation accuracy: {acc}, loss: {loss}'.format(acc=history['val_acc'][-1], loss=history['val_loss'][-1]))\r\n\r\n # Save model.\r\n model.save('serial/'+filename+'.h5')\r\n return history['val_acc'][-1], history['val_loss'][-1]\r\n\r\ndef run_test(model, vectorizer, selector, save_stats=False):\r\n test_pre_raw, test_hyp_raw, test_lab = load_mnli(filename=\"multinli_1.0_dev_matched.jsonl\")\r\n test_pre = selector.transform(vectorizer.transform(test_pre_raw))\r\n test_hyp = selector.transform(vectorizer.transform(test_hyp_raw))\r\n logging.info(model.evaluate([test_pre,test_hyp], test_lab))\r\n\r\n if save_stats:\r\n predictions = model.predict([test_pre,test_hyp])\r\n wrong, right = [], []\r\n for i in range(len(test_pre_raw)):\r\n pred = np.argmax(predictions[i])\r\n if pred==test_lab[i]:\r\n right.append([test_pre_raw[i], test_hyp_raw[i], str(pred), str(test_lab[i])])\r\n else:\r\n wrong.append([test_pre_raw[i], test_hyp_raw[i], str(pred), str(test_lab[i])])\r\n path = \"stats/mnli/\"\r\n with open(path+\"right_samples_raw.csv\",\"w\") as f:\r\n f.write(\"premise\\thypothesis\\tprediction\\tlabel\\n\")\r\n f.write(\"\\n\".join([\"\\t\".join(e) for e in right]))\r\n with open(path+\"wrong_samples_raw.csv\",\"w\") as f:\r\n f.write(\"premise\\thypothesis\\tprediction\\tlabel\\n\")\r\n f.write(\"\\n\".join([\"\\t\".join(e) for e in wrong]))\r\n\r\ndef runBow():\r\n t_pre, t_hyp, t_lab = load_mnli(lim=15000)\r\n pre_train, hyp_train, v, s = ngram_vectorize(t_pre, t_hyp, t_lab)\r\n\r\n model = siamese_mlp_model(units=[8], input_shape=pre_train.shape[1:], num_classes=3, optimizer=tf.keras.optimizers.Adam(lr=1e-3))\r\n train_model(model, [pre_train, hyp_train], t_lab, epochs=3, tensorboard=True)\r\n\r\nTOP_K = 20000\r\nt_pre, t_hyp, t_lab = load_mnli(lim=10000)\r\npre_train, hyp_train, t = sequence_vectorize(t_pre, t_hyp, TOP_K)\r\n\r\nmodel = sepcnn_model(input_shape=pre_train.shape[1:],\r\n num_features=min(len(t.word_index) + 1, TOP_K),\r\n blocks=2,\r\n filters=64,\r\n kernel_size=5,\r\n embedding_dim=50,\r\n dropout_rate=0.2,\r\n pool_size=3,\r\n num_classes=3,\r\n optimizer=tf.keras.optimizers.Adam(lr=1e-3))\r\n\r\ntrain_model(model, [pre_train, hyp_train], t_lab, epochs=1000, tensorboard=False, batch_size=512, filename='mnli_sepcnn_model')\r\n" }, { "alpha_fraction": 0.5440917015075684, "alphanum_fraction": 0.5493826866149902, "avg_line_length": 62.85714340209961, "blob_id": "c3f053e92eaa6271ffb724668d9c1308d0ec8de1", "content_id": "92dca746583d2b2c851e95d82334153c0250a6c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2268, "license_type": "no_license", "max_line_length": 499, "num_lines": 35, "path": "/tests/test_mnli_reader.py", "repo_name": "t-cadet/simil", "src_encoding": "UTF-8", "text": "# pylint: disable=no-self-use,invalid-name\r\nimport pytest\r\n\r\nfrom allennlp.data.dataset_readers import SnliReader\r\nfrom allennlp.common.util import ensure_list\r\nfrom allennlp.common.testing import AllenNlpTestCase\r\n\r\nclass TestMnliReader():\r\n @pytest.mark.parametrize(\"lazy\", (True, False))\r\n def test_read_from_file(self, lazy):\r\n reader = SnliReader(lazy=lazy)\r\n instances = reader.read('tests/fixtures/multinli_1.0_train.jsonl')\r\n instances = ensure_list(instances)\r\n\r\n instance0 = {\"premise\": [\"Conceptually\", \"cream\", \"skimming\", \"has\", \"two\", \"basic\", \"dimensions\", \"-\", \"product\", \"and\", \"geography\", \".\"],\r\n \t\t\t \"hypothesis\": [\"Product\", \"and\", \"geography\", \"are\", \"what\", \"make\", \"cream\", \"skimming\", \"work\", \".\"],\r\n \t\t\t \"label\": \"neutral\"}\r\n\r\n instance1 = {\"premise\": [\"you\", \"know\", \"during\", \"the\", \"season\", \"and\", \"i\", \"guess\", \"at\", \"at\", \"your\", \"level\", \"uh\", \"you\", \"lose\", \"them\", \"to\", \"the\", \"next\", \"level\", \"if\", \"if\", \"they\", \"decide\", \"to\", \"recall\", \"the\", \"the\", \"parent\", \"team\", \"the\", \"Braves\", \"decide\", \"to\", \"call\", \"to\", \"recall\", \"a\", \"guy\", \"from\", \"triple\", \"A\", \"then\", \"a\", \"double\", \"A\", \"guy\", \"goes\", \"up\", \"to\", \"replace\", \"him\", \"and\", \"a\", \"single\", \"A\", \"guy\", \"goes\", \"up\", \"to\", \"replace\", \"him\"],\r\n \"hypothesis\": [\"You\", \"lose\", \"the\", \"things\", \"to\", \"the\", \"following\", \"level\", \"if\", \"the\", \"people\", \"recall\", \".\"],\r\n \"label\": \"entailment\"}\r\n\r\n instance2 = {\"premise\": [\"One\", \"of\", \"our\", \"number\", \"will\", \"carry\", \"out\", \"your\", \"instructions\", \"minutely\", \".\"],\r\n \"hypothesis\": [\"A\", \"member\", \"of\", \"my\", \"team\", \"will\", \"execute\", \"your\", \"orders\", \"with\", \"immense\", \"precision\", \".\"],\r\n \"label\": \"entailment\"}\r\n\r\n assert len(instances) == 3\r\n def equals(fields, instance): \r\n \tassert [t.text for t in fields[\"premise\"].tokens] == instance[\"premise\"]\r\n \tassert [t.text for t in fields[\"hypothesis\"].tokens] == instance[\"hypothesis\"]\r\n \tassert fields[\"label\"].label == instance[\"label\"]\r\n\r\n equals(instances[0].fields, instance0)\r\n equals(instances[1].fields, instance1)\r\n equals(instances[2].fields, instance2)" } ]
6
exhuma/pickup
https://github.com/exhuma/pickup
05f8d271de95d76b337a6994dcd21799fe0e4b34
688b05d0ae1276dcc386b45c8ddb1cea71b15cb1
7da6ecf172b3e9354d93ddfe06f87b930fad90b3
refs/heads/master
2016-09-06T01:21:08.343607
2011-07-15T15:09:10
2011-07-15T15:09:10
1,059,260
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.6974790096282959, "alphanum_fraction": 0.7037814855575562, "avg_line_length": 26.985294342041016, "blob_id": "6a216a6c97ee1f589e811029275ec47776c610ba", "content_id": "03d227e1f0e32bece808cf4bd187e2237c1ac2fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 79, "num_lines": 68, "path": "/docs/source/index.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. pickup documentation master file, created by\n sphinx-quickstart on Sun Nov 7 19:27:05 2010.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nPickup\n==================================\n\nContents\n--------\n\n.. toctree::\n :maxdepth: 2\n\n installation\n configuration\n available_plugins\n writing_plugins\n logging\n how_and_why\n glossary\n\n\nIntroducing Pickup\n------------------\n\nPickup is a **modular backup script** written completely in Python.\n\nThe source code is available on `the github project page\n<https://github.com/exhuma/pickup>`_\n\nThe core of the application is the executable ``pickup.py`` and a python script\nused as config file. This core does not include *any* code related as to *how*\na backup from a given source should be created. This logic is stashed away in\nmodules. This has the advantage that it's very easy to add support for a new\n\"data source\" or to change the behaviour of an existing component.\n\nThe backup target is created in the exact same way. For the exact same reason.\nThe only drawback, is that backups need to be created in a \"staging area\" first\nbefore they are deployed to a target. This is done because some targets (like\nrsync) work best if you can feed them one folder containing everything. It\nwould be a waste to run rsync on each file separately.\n\nExample Configuration\n---------------------\n\n.. include:: config_examples/basic.rst\n\nSee :ref:`configuration` for more details and examples.\n\nExample Execution\n-----------------\n\nTake the above configuration and save it anywhere you like. You can execute it\nby running::\n\n python pickup.py -c /path/to/config_file.py\n\nOr, if you installed it into you system (see :ref:`installation`)::\n\n /path/to/pickup -c /path/to/config_file.py\n\nIndices and Tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n" }, { "alpha_fraction": 0.6212091445922852, "alphanum_fraction": 0.6241823434829712, "avg_line_length": 30.329193115234375, "blob_id": "7a6fcd97dde6f649ae0717ee0cc944f928c2970a", "content_id": "2c6b2795b29514da4321ab3e49cb1f9622fcc72d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5045, "license_type": "no_license", "max_line_length": 88, "num_lines": 161, "path": "/pickup/generator_profile/remote_tar.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"\nThe remote_tar plugin runs a tar command on a remote host and retrieves the\ngenerated tar file. In this initial version, the details are specified in the\nconfig variable \"tar_params\".\n\nConfiguration\n~~~~~~~~~~~~~\n\nThe following fields are used by this plugin:\n\n **username** (string)\n The username to connect as to the remote host\n\n **hostname** (string)\n The hostname to connect to.\n\n **tar_params** (string)\n The parameters passed to the tar command (f.ex.: ``-cz /path/to/files``)\n\n .. warning:: Do not specify a target filename with the ``-f`` parameter!\n This plugin uses ``mktemp`` to create a file as securely and\n uniquely as possible! On the other hand, when using the\n ``-f`` parameter, this plugin will not know which file to\n transfer over! Use the config option \"target_filename\"\n instead!\n\n **target_filename** (string)\n The **local** filename (that is: The filename that is created inside the\n staging area)\n\n **port** (int) *optional*\n The port to connect to (default=22)\n\n **password** (string) *optional*\n The password for the user. (default=None)\n\n .. note:: Leave this empty if you want to use privat/public key\n authentication (see: \"key_filename\")\n\n **key_filename** (string|list of strings) *optional*\n A filename (or list of filenames) which is/are used as private/public key\n authentication.\n\n .. note:: Leave this empty if you want to use password authentication\n (see: \"password\")\n\n **tmpfolder** (string) *optional*\n A folder on the **remote** machine! If this is left empty, the default\n system location is used (usually ``/tmp``).\n\n .. warning:: Leaving this empty *may* be a potential security risk as\n everybody has read access to ``/tmp``! This value is used\n with ``mktemp``. ``mktemp`` usually creates files with mode\n ``0600`` so this is less of a concern. It may be useful,\n where this is not ensured.\n\nConfiguration Example\n~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n dict(\n name = 'My home folder',\n profile = 'remote_tar',\n config = dict(\n username = 'ninjamonkey',\n hostname = 'batcave',\n port = 64222,\n key_filename = [\"/home/ninjamonkey/ssh_keys/batcave.rsa\"],\n tmpfolder = \"\",\n tar_params = \"-cz /home/ninjamonkey\",\n target_filename = \"home_ninjamonkey.tar.gz\"\n )\n ),\n\"\"\"\n\nimport paramiko\nimport logging\nfrom os.path import join\n\nLOG = logging.getLogger(__name__)\nAPI_VERSION = (2,0)\nCONFIG = {}\nSOURCE = {}\n\ndef init(source):\n CONFIG.update(source['config'])\n SOURCE.update(source)\n LOG.debug(\"Initialised '%s' with %r\" % ( __name__, CONFIG))\n\ndef connect():\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n LOG.info(\"Connecting to remote host %r\" % CONFIG['hostname'])\n client.connect(\n hostname = CONFIG['hostname'],\n port = CONFIG.get('port', 22),\n username = CONFIG['username'],\n password = CONFIG.get('password', None),\n key_filename = CONFIG.get('key_filename', None)\n )\n return client\n\ndef exec_ssh(client, command):\n LOG.info(\"Executing remote command %r\" % command)\n _, stdout, stderr = client.exec_command(command)\n stdout = stdout.read().strip()\n stderr = stderr.read().strip()\n LOG.debug(\"Remote STDOUT\")\n LOG.debug(stdout)\n\n if stderr:\n LOG.error(\"Remote STDERR\")\n LOG.error(stderr)\n\n return stdout, stderr\n\ndef cleanup(client, tar_name):\n LOG.debug( \"Removing %r on remote site % tar_name\" )\n exec_ssh(client, \"rm -v %s\" % tar_name)\n client.close()\n\ndef create_tar(client):\n # create a temporary file\n stdout, stderr = exec_ssh(client, \"mktemp --tmpdir=%s\" % CONFIG.get(\"tmpfolder\", \"\"))\n tmpfile = stdout\n LOG.debug(\"Remote temp file: %r\" % tmpfile)\n\n if not tmpfile:\n raise ValueError(\"No tempfile name received. Cannot continue!\")\n\n # create the tar file\n exec_ssh(client, \"tar %s > %s\" % (CONFIG['tar_params'], tmpfile))\n return tmpfile\n\ndef download_tar(client, tar_name, target_folder):\n LOG.info(\"Downloading %r into %r\" % (tar_name, target_folder))\n sftp = client.open_sftp()\n sftp.get( tar_name, join(target_folder, CONFIG[\"target_filename\"]) )\n sftp.close()\n\ndef run(staging_area):\n if not \"target_filename\" in CONFIG:\n LOG.error(\"Config key 'target_filename' is required!\")\n return\n\n if not \"tar_params\" in CONFIG:\n LOG.error(\"Config key 'tar_params' is required!\")\n\n client = connect()\n tar_name = create_tar(client)\n download_tar(client, tar_name, staging_area)\n cleanup(client, tar_name)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n logging.getLogger(\"paramiko.transport.sftp\").setLevel(logging.DEBUG)\n run(\".\")\n\n" }, { "alpha_fraction": 0.7035278081893921, "alphanum_fraction": 0.7035278081893921, "avg_line_length": 33.27906799316406, "blob_id": "bb0f2c1e2c929d0a21b5e80082d1bf690da0a0db", "content_id": "455657fa37f2e78b2a8283f833c0b5fb2257400c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1474, "license_type": "no_license", "max_line_length": 79, "num_lines": 43, "path": "/docs/source/glossary.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _glossary:\n\nGlossary\n========\n\n.. glossary::\n :sorted:\n\n generator\n A generator is a plugin responsible to create a backup file.\n\n target\n A target is a location where the created backup files are stored. Each\n target is provided by a :term:`target profile <target_profile>`\n\n target_profile\n A target profile is a simple python script which will take files out of\n the :term:`staging area` and put it into a :term:`target`\n\n generator_profile\n A generator profile is a simple python script which will create files\n inside the :term:`staging area`. The primary use of a generator profile\n is to create files containing the backup data. But they could also create\n other files.\n\n staging area\n The staging area is nothing more than a temporary location. The files\n created by generators will be stored in this folder. Once all generators\n have finished, the target profiles will take over and :term:`publish`\n these files to their destinations.\n\n publish\n Publishing files does not mean that the files will be visible to the\n grand public. But rather it describes the process of putting (uploading,\n copying, ...) the files to a location defined as backup destination.\n\n module\n A general synonym for either :term:`generator_profile` or\n :term:`target_profile`\n\n profile\n A general synonym for either :term:`generator_profile` or\n :term:`target_profile`\n" }, { "alpha_fraction": 0.5417536497116089, "alphanum_fraction": 0.5456680655479431, "avg_line_length": 26.95620346069336, "blob_id": "3b67129d5180cc2af1f6c6cb9d631b88e8010f00", "content_id": "bf6e9b19b5b13acedc9355558d558e6dff12886d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3832, "license_type": "no_license", "max_line_length": 77, "num_lines": 137, "path": "/docs/source/config_examples/advanced.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _advanced_config:\n\nAs the config file is a python script, you can do pretty much everything you\nwant with it. What this will show you:\n\n - Use of comments (Lines starting with ``#``)\n\n - Usage of python modules from the standard library (in this case:\n ``timedelta``, ``os.path``, ``os``)\n\n - using the ``dict()`` notation instead of ``{}`` literals. I personally\n find this a lot more comfortable to write and read. But other than that,\n both options are identical.\n\n - programatically adding entries to the ``GENERATORS`` list.\n\n This script will, at each execution, look for folders containing a file\n ``do_backup`` under an example \"projects\" folder. If it fines one, that\n folder is added to the generators.\n\n As you can see, the config files can\n become arbitrarily complex.\n\nAdvanced config file::\n\n import os\n import os.path\n\n # Config version (major, minor)\n CONFIG_VERSION = (2,1)\n\n # Use the first target profile as staging area\n FIRST_TARGET_IS_STAGING\n\n # A custom variable. Not used by the application itself, but used here, in\n # the config script!\n THE_BACKUP_DIR = \"/var/backups/data\"\n\n # All backups will be created in this folder before being deployed to the\n # targets\n STAGING_AREA = \"staging\"\n\n # Backup Sources. They will be processed in order\n #\n # Details on the config values should be documented in the source modules\n GENERATORS = [\n dict(\n name = 'MySQL',\n profile = 'mysql',\n config = dict(\n # user should have full priviledges on everything\n database = '*',\n port = \"3306\",\n host = \"localhost\",\n user = \"root\",\n password = \"mysecretpassword\"\n ),\n ),\n dict(\n name = 'PostgreSQL 8.4',\n profile = 'postgres',\n config = dict(\n host = 'localhost',\n database = 'killerdb',\n port = 5432\n ),\n ),\n dict(\n name = '/var/www',\n profile = 'folder',\n config = dict(\n path = '/var/www',\n split = True,\n )\n ),\n dict(\n name = '/var/mail',\n profile = 'folder',\n config = dict(\n path = '/var/mail',\n )\n ),\n ]\n\n #\n # Append each folder inside \"/path/with/projects\" which also contains a\n # \"special\" file \"do_backup\"\n #\n projects_root = \"/path/with/projects\"\n for entry in os.listdir(projects_root):\n trigger_filename = \"do_backup\"\n entrypath = os.path.abspath(\n os.path.join(projects_root, entry))\n\n if not os.path.isdir(entrypath):\n # This entry is not a folder. So we'll skip it\n continue\n\n if not os.path.exists(os.path.join(entrypath, trigger_filename)):\n # This folder does not contain a file named \"do_backup\"\n # We'll skip this too.\n continue\n\n # Everything remaining is a directory containing \"do_backup\"\n # Let's add it to the GENERATORS list.\n GENERATORS.append(dict(\n name = 'Project folder: %s' % entrypath,\n profile = 'folder',\n config = dict(\n path = entrypath,\n )\n ))\n\n #\n # Backup targets. They will be processed in order.\n #\n TARGETS = [\n dict(\n name = \"local\",\n profile = \"dailyfolder\",\n config = dict(\n retention = dict( days=7 ),\n path = THE_BACKUP_DIR,\n ),\n ),\n dict(\n name = \"ftp\",\n profile = \"ftp\",\n config = dict(\n host=\"my.ftp.host\",\n username=\"ftpuser\",\n password=\"asis! Light!\",\n remote_folder=\"backups\",\n retention = dict(weeks=52),\n )\n ),\n ]\n\n\n" }, { "alpha_fraction": 0.6991906762123108, "alphanum_fraction": 0.7018885016441345, "avg_line_length": 36.06666564941406, "blob_id": "cadd80178f988c44b602f027cda57d8c38c4b755", "content_id": "33572a0c2b2a55690c4ffc001e0a5800be0568cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2224, "license_type": "no_license", "max_line_length": 79, "num_lines": 60, "path": "/docs/source/config_examples/walkthrough.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "Let's construct a configuration file statement-by-statement.\n\n.. code-block:: python\n\n CONFIG_VERSION = (2,1)\n\nThis tells pickup that this configuration script conforms to the version \"2.1\".\nThis is used to detect if pickup can \"understand\" the directives in this config\nfile. If the minor number differs, it usually means, that pickup is still able\nto work with this config, but new optional features have been added. Pickup\nwill still continue to work, but issue a warning to make you aware of possible\nnew features.\n\nIf, on the other hand the major number differs, pickup will be unable to\nunderstand this config script and will abort with a critical error message.\n\n.. code-block:: python\n\n STAGING_AREA = \"staging\"\n\nThis is the temporary folder name for the backups. If, like in this case the\nfolder is relative, then it will be relative to the folder from which pickup\nwas executed. Not the folder where pickup is installed. If you want to be safe,\nuse an absolute foldername.\n\n.. code-block:: python\n\n GENERATORS = [{\n 'name': 'local home folders',\n 'profile': 'folder',\n 'config': {\n 'path': '/home',\n 'split': True,\n }}]\n\nThe ``GENERATORS`` list contains all the data \"sources\". In other words, it\ncontains all the modules which will generate backup data. These \"profiles\" will\nbe executed in the order as they appear in the list.\n\nIn this case, we will back-up data contained inside ``/home`` and for each\nfolder contained therein, we will create a separate tarball. For more\ninformation, see :ref:`available_plugins`\n\n.. code-block:: python\n\n TARGETS = [{\n 'name': \"local\",\n 'profile': \"dailyfolder\",\n 'config': {\n 'path': \"/var/backups/daily\",\n 'retention': { 'days': 7 }\n }}]\n\nThe ``TARGETS`` list contains all the destinations in which the created backup\ndata should be distributed to. Again, the entries are process in the same order\nas they appear in the list.\n\nIn this case, this will store the backups in a folder with the current date\ninside a \"container\" folder (``/var/backups/daily``). Folders older than 7 days\nwill be deleted. See :ref:`available_plugins` for more information.\n" }, { "alpha_fraction": 0.5919308066368103, "alphanum_fraction": 0.5946205854415894, "avg_line_length": 25.545917510986328, "blob_id": "46758023c99d43e4b9e7e095be26e990a9a9ac45", "content_id": "24eae4c05cab549ff2ea18342228a0d13787af9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5205, "license_type": "no_license", "max_line_length": 77, "num_lines": 196, "path": "/pickup/target_profile/ftp.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "\"\"\"\nUploads the staging folder to an FTP host. On the remote host a new subfolder\nwith the current date will be created (f.ex.: '2010-11-01'). The staging area\nwill be stored in that folder.\n\nConfiguration\n~~~~~~~~~~~~~\n\nThe following fields are used by this plugin:\n\n **host** (string)\n The FTP hostname\n\n **username** (string)\n The username\n\n **password** (string)\n The password for the user\n\n **remote_folder** (string) *optional*\n If specified, the backups will be rooted in this folder. If not\n specified, the backups will be created on the default folder.\n\n **retention** (dict) *optional*\n How long the data should be kept. Everything older than this will be\n deleted. The dictionary values will be passed as keyword arguments to\n `datetime.timedelta\n <http://docs.python.org/library/datetime.html#datetime.timedelta>`_. If\n set to ``None``, the data will be kept indefinitely!\n\n **Default:** ``None``\n\n .. note:: This script uses the folder name to determine the date! All\n folders that have a name not expected by this script, will\n issue a warning.\n\n **dry_run** (boolean) *optional*\n If set to ``True`` no files will be uploaded or deleted. Instead, the\n operations will only be reported to stdout.\n\n .. note:: Folders will still be created on the remote host to have an\n accurate simulation.\n\nConfiguration Example\n~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n dict(\n name = \"my ftp host\",\n profile = \"ftp\",\n config = dict(\n host=\"ftp.myhost.net\",\n username=\"itsame\",\n password=\"maario\",\n dry_run=True,\n remote_folder=\"tube/coins\",\n retention=dict(\n weeks=52\n ),\n )\n ),\n\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport logging\nimport os\nimport os.path\nfrom ftplib import FTP, error_perm\n\nLOG = logging.getLogger(__name__)\nAPI_VERSION = (2,0)\nCONFIG = {}\nFOLDER_FORMAT = \"%Y-%m-%d\"\n\ndef init(target):\n CONFIG.update(target['config'])\n LOG.debug(\"Initialised '%s' with %r\" % ( __name__, CONFIG))\n\ndef try_mkd( conn, foldername ):\n try:\n conn.mkd(foldername)\n except Exception, e:\n if \"File exists\" in str(e):\n pass\n else:\n raise\n\ndef folder():\n return\n\ndef rmrf(conn, path):\n LOG.debug('Recursively deleting %s' % path)\n conn.cwd(path)\n for entry in conn.nlst():\n if entry in (\".\", \"..\"):\n continue\n\n try:\n conn.delete(entry)\n except error_perm, exc:\n # Permission Denied (most likely a directory)\n try:\n rmrf(conn, entry)\n except Exception, exc2:\n # Probably not a directory. Skip this entry\n LOG.warning(str(exc2))\n pass\n conn.cwd(\"..\")\n conn.rmd(path)\n\ndef remove_old_files(conn, timedelta_params):\n delta = timedelta(**timedelta_params)\n threshold_date = datetime.now() - delta\n LOG.info(\"Removing files created before %s\" % threshold_date)\n for entry in conn.nlst():\n if entry in ('.', '..'):\n continue\n\n try:\n entry_date = datetime.strptime(entry, FOLDER_FORMAT)\n LOG.debug(\"Inspecting %s (threshold=%s, todelete=%s)\" % (\n entry, threshold_date, entry_date<threshold_date ))\n if entry_date < threshold_date:\n LOG.info(\"Deleting %s\" % entry)\n if not CONFIG.get(\"dry_run\", False):\n rmrf(conn, entry)\n except ValueError, e:\n LOG.warning( str(e) )\n else:\n LOG.info(\"All obsolete files successfully removed.\")\n\ndef run_ftp(staging_area):\n \"\"\"\n Run the ftp profile\n\n I put this in a separate method to make error-handling and work-dir\n restoration easier to read in the \"run\" method.\n \"\"\"\n os.chdir(staging_area)\n current_date_folder = datetime.now().strftime(FOLDER_FORMAT)\n\n ftp = FTP(CONFIG['host'],\n user=CONFIG['username'],\n passwd=CONFIG['password']\n )\n\n if CONFIG.get('remote_folder', None):\n try_mkd( ftp, CONFIG['remote_folder'] )\n ftp.cwd(CONFIG['remote_folder'])\n\n # delete old files\n timedelta_params = CONFIG.get('retention', None)\n if timedelta_params:\n remove_old_files(ftp, timedelta_params)\n\n try_mkd( ftp, current_date_folder )\n ftp.cwd( current_date_folder )\n\n backup_root = ftp.pwd()\n LOG.info(\"Current FTP folder: %r\" % backup_root)\n\n for root, dirs, files in os.walk(\".\"):\n ftp.cwd(backup_root)\n\n if root == '.':\n continue\n\n # create required folder structure\n for node in os.path.split(root):\n if node == '.':\n continue\n\n try_mkd(ftp, node)\n ftp.cwd(node)\n\n # upload files\n for filename in files:\n LOG.info( \"Uploading %s to %s\" % (\n filename, ftp.pwd()))\n if not CONFIG.get(\"dry_run\", False):\n ftp.storbinary( \"STOR %s\" % filename,\n open(os.path.join(root,filename), \"rb\") )\n\n ftp.quit()\n\ndef run(staging_area):\n workdir_bak = os.getcwd()\n\n try:\n run_ftp(staging_area)\n except Exception, e:\n LOG.exception(e)\n\n os.chdir(workdir_bak)\n\n\n" }, { "alpha_fraction": 0.5019011497497559, "alphanum_fraction": 0.5057034492492676, "avg_line_length": 26.63157844543457, "blob_id": "95bdef10c7290355ffff67b333a528f6a37429cd", "content_id": "9b91368af388858a0b6a06096c56a4e7b349b7bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 526, "license_type": "no_license", "max_line_length": 74, "num_lines": 19, "path": "/docs/source/config_examples/basic.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "The following configuration script will create a backup of each local home\nfolder. Each folder will be stored as separate tarball::\n\n CONFIG_VERSION = (2,1)\n STAGING_AREA = \"staging\"\n GENERATORS = [{\n 'name': 'local home folders',\n 'profile': 'folder',\n 'config': {\n 'path': '/home',\n 'split': True,\n }}]\n\n TARGETS = [{\n 'name': \"local\",\n 'profile': \"dailyfolder\",\n 'config': {\n 'path': \"/var/backups/daily\",\n }}]\n\n" }, { "alpha_fraction": 0.585106372833252, "alphanum_fraction": 0.5914893746376038, "avg_line_length": 25.11111068725586, "blob_id": "8156f4de73c598061c619e820d2481c3a4b7c409", "content_id": "c56017334c9c7355be284570add943af122209df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 470, "license_type": "no_license", "max_line_length": 73, "num_lines": 18, "path": "/setup.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\nsetup(\n name = \"pickup\",\n version = \"1.4\",\n packages = find_packages(),\n entry_points = { 'console_scripts': ['pickup = pickup.pickup:main'] },\n install_requires = [\n 'paramiko',\n 'mysql-python',\n 'psycopg2',\n ],\n author = \"Michel Albert\",\n author_email = \"[email protected]\",\n description = \"Modular backup script\",\n license = \"BSD\",\n keywords = \"backup\",\n url = \"http://exhuma.github.com/pickup\",\n)\n" }, { "alpha_fraction": 0.6517857313156128, "alphanum_fraction": 0.6517857313156128, "avg_line_length": 13.710526466369629, "blob_id": "90506470a94527e5d497ece7443837eacd17dee0", "content_id": "4464f51f4fe57e56fe740e74405c828c9ca706fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 560, "license_type": "no_license", "max_line_length": 51, "num_lines": 38, "path": "/docs/source/available_plugins.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _available_plugins:\n\nAvailable Plugins\n=================\n\nGenerators\n----------\n\nfolder\n~~~~~~\n.. automodule:: pickup.generator_profile.folder\n\npostgres\n~~~~~~~~\n.. automodule:: pickup.generator_profile.postgres\n\nmysql\n~~~~~\n.. automodule:: pickup.generator_profile.mysql\n\ncommand\n~~~~~~~\n.. automodule:: pickup.generator_profile.command\n\nremote_tar\n~~~~~~~~~~\n.. automodule:: pickup.generator_profile.remote_tar\n\nTargets\n-------\n\ndailyfolder\n~~~~~~~~~~~\n.. automodule:: pickup.target_profile.dailyfolder\n\nftp\n~~~\n.. automodule:: pickup.target_profile.ftp\n\n" }, { "alpha_fraction": 0.6794384121894836, "alphanum_fraction": 0.6811933517456055, "avg_line_length": 28.991228103637695, "blob_id": "a879d68d847be83da98f889345fb12270e696557", "content_id": "11db91b3ae4ede9d265625ee082056f87e1d463b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3419, "license_type": "no_license", "max_line_length": 79, "num_lines": 114, "path": "/docs/source/configuration.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _configuration:\n\nConfiguration\n=============\n\nThe configuration file is a python file itself and can be placed wherever you\nsee fit.\n\nThis page explains the general configuration structure. Each :term:`module` may\nprovide additional configuration options. They can be defined in each module's\n``config`` dictionary. The details for each of these module-level configs can\nbe found in :ref:`available_plugins`.\n\nBasic example\n-------------\n\n.. include:: config_examples/basic.rst\n\nA configuration walkthrough\n---------------------------\n\n.. include:: config_examples/walkthrough.rst\n\nRequired values\n---------------\n\nThe following values must be specified:\n\n**CONFIG_VERSION**\n\n This is used by the core application to determine if it knows how to read\n the config file. If this value is incorrect, the core will issue\n errors/warnings.\n\n The value is a tuple representing a major and minor number.\n\n It follows the following rule:\n\n - If an application change *requires* a change in the config, the major\n number will increase.\n - If a change is made in the application which will still be able to\n function with an old config version, but may benefit from new fields,\n then the minor number will increase.\n\n**FIRST_TARGET_IS_STAGING** (optional)\n\n .. versionadded:: 1.3\n\n If this is set to \"True\", then the first target profile will be used as\n staging area. Using this, you can avoid storing the data more than once on a\n local machine during the backup.\n\n .. note:: This will override the value of ``STAGING_AREA``!\n\n Restrictions apply though:\n\n - The profile must me a local folder (currently, only ``dailyfolder`` is\n supported)\n\n - The profile must return it's target path using the ``folder()`` method.\n\n**STAGING_AREA**\n A *temporary* folder. All backup files will be created in that folder before\n pushed into the targets.\n\n .. note:: If ``FIRST_TARGET_IS_STAGING`` is used, this value is ignored.\n\n**GENERATORS**\n\n .. versionadded:: 1.1\n\n A list of generators. The generators will be processed in the same order as\n they appear in the config file. Each generator must have the following\n fields:\n\n ``name``\n The name of the generator. This is used to generate folder and\n filenames for the backup files.\n\n ``profile``\n The name of the :term:`module` used for this generator. See\n :ref:`available_plugins` for a list of available profiles.\n\n ``config``\n Config values for the generator profile. These fields depend on the\n underlying plugin. The values should be documented in\n :ref:`available_plugins`\n\n**TARGETS**\n A list of backup targets. The targets will be processed in the same order as\n they appear in the config file. Each target must have the following fields:\n\n ``name``\n The name of the target (Mainly used to display it in the logs)\n\n ``profile``\n The name of the :term:`module` used for this target. See\n :ref:`available_plugins` for a list of available profiles.\n\n ``config``\n Config values for the target profile. These fields depend on the\n underlying plugin. The values should be documented in\n :ref:`available_plugins`\n\n**SOURCES**\n\n .. deprecated:: 1.1\n\n Use ``GENERATORS`` instead\n\nAdvanced Example\n----------------\n\n.. include:: config_examples/advanced.rst\n" }, { "alpha_fraction": 0.6513761281967163, "alphanum_fraction": 0.6513761281967163, "avg_line_length": 23.22222137451172, "blob_id": "28dd0435046979b696d1f3983efe03d04f61025c", "content_id": "5438e17b657f6c32a6a97aa393213ca360acb776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 49, "num_lines": 9, "path": "/pickup/target_profile/__init__.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "def create(plugname):\n\n if plugname in globals():\n module = reload(globals()[plugname])\n return module\n\n module = 'pickup.target_profile.%s' % plugname\n __import__(module)\n return globals()[plugname]\n" }, { "alpha_fraction": 0.6210969090461731, "alphanum_fraction": 0.6253045201301575, "avg_line_length": 33.29620361328125, "blob_id": "04bcd04bbd04a86f9f1b837fc1f8c4d408015842", "content_id": "297eafd03f53f777616d2d0d7c7c8da5284d7376", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13547, "license_type": "no_license", "max_line_length": 116, "num_lines": 395, "path": "/pickup/pickup.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# This is a backup utility, archiving mysql, postgres, mail and web-data\n# Copyright (C) 2010 Michel Albert\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 59 Temple\n# Place, Suite 330, Boston, MA 02111-1307 USA\n#\n#-----------------------------------------------------------------------------\n\nfrom datetime import datetime\nfrom logging.handlers import RotatingFileHandler\nfrom optparse import OptionParser\nfrom os.path import exists, abspath, join, dirname, expanduser\nfrom shutil import rmtree\nimport logging\nimport os\nimport sys\nimport re\n\nimport generator_profile\nimport target_profile\nimport config\nfrom lib.term import TerminalController\n\nLOG = logging.getLogger(__name__)\nOPTIONS = {}\nARGS = []\nconfig_instance = None\n\n#-----------------------------------------------------------------------------\n\nEXPECTED_CONFIG_VERSION = (2,2)\nTERM = TerminalController()\n\nclass ReverseLevelFilter(logging.Filter):\n \"\"\"\n Filter out messages *above* a specific level. (In other words: log only\n messages *below* maxlevel)\n \"\"\"\n\n def __init__(self, maxlevel, *args, **kwargs):\n logging.Filter.__init__(self, *args, **kwargs)\n self.maxlevel = maxlevel\n\n def filter( self, record ):\n if record.levelno <= self.maxlevel:\n return True\n else:\n return False\n\ndef check_config():\n \"\"\"\n Makes some sanity checks on the config file. And gives warnings/errors if\n important conditions are not met (config version too old, ...)\n \"\"\"\n\n if not config_instance:\n LOG.error(\"Failed to load the config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"CONFIG_VERSION\"):\n LOG.warning( \"The config file does not specify CONFIG_VERSION! I will \"\n \"try to continue anyway, but this field is recommended to allow \"\n \"some internal tests to work. I will assume the value '(1,0)'!\" )\n config_instance.CONFIG_VERSION = (1, 0)\n\n major, minor = config_instance.CONFIG_VERSION\n expected_major, expected_minor = EXPECTED_CONFIG_VERSION\n\n if major < expected_major:\n LOG.critical(\"The config system has undergone a major change! \"\n \"I cannot continue without an upgrade!\")\n sys.exit(9)\n\n if minor < expected_minor:\n LOG.warning(\"The config system has undergone a minor change! \"\n \"It should work, but you still should review the docs!\")\n\n if major == expected_major and minor == expected_minor:\n LOG.debug( \"Config version OK!\" )\n\n if not hasattr(config_instance, \"GENERATORS\"):\n LOG.critical(\"Variable 'GENERATORS' not found in config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"TARGETS\"):\n LOG.critical(\"Variable 'TARGETS' not found in config!\")\n sys.exit(9)\n\ndef setup_logging():\n \"\"\"\n Log everything below warning to stdout, and\n everything above warning to stderr (including warning)\n \"\"\"\n\n # make sure all messages are propagated to the top-level logger\n LOG.setLevel(logging.DEBUG)\n err_format = logging.Formatter(TERM.RED + \"%(asctime)s | %(name)s | %(levelname)s | %(message)s\" + TERM.NORMAL)\n out_format = logging.Formatter(\"%(asctime)s | %(name)s | %(levelname)s | %(message)s\")\n\n gen_log = logging.getLogger(\"pickup.generator_profile\")\n tgt_log = logging.getLogger(\"pickup.target_profile\")\n\n if not OPTIONS.quiet:\n stdout_handler = logging.StreamHandler(sys.stdout)\n\n if OPTIONS.debug:\n stdout_handler.setLevel(logging.DEBUG)\n else:\n stdout_handler.setLevel(logging.INFO)\n\n stdout_handler.addFilter( ReverseLevelFilter(logging.INFO) )\n stdout_handler.setFormatter(out_format)\n LOG.addHandler(stdout_handler)\n gen_log.addHandler(stdout_handler)\n tgt_log.addHandler(stdout_handler)\n\n stderr_handler = logging.StreamHandler(sys.stderr)\n stderr_handler.setLevel(logging.WARNING)\n stderr_handler.setFormatter(err_format)\n\n if not exists(\"logs\"):\n os.makedirs(\"logs\")\n os.chmod(\"logs\", 0700)\n\n LOG_FILE = join(\"logs\", \"pickup.log\")\n debug_handler = RotatingFileHandler(LOG_FILE,\n maxBytes=100000, backupCount=5)\n\n if exists(LOG_FILE):\n os.chmod(LOG_FILE, 0600)\n\n debug_handler.setLevel(logging.DEBUG)\n debug_handler.setFormatter(out_format)\n\n LOG.addHandler(stderr_handler)\n LOG.addHandler(debug_handler)\n\n # plugin loggers\n gen_log.setLevel(logging.DEBUG)\n gen_log.addHandler(stderr_handler)\n gen_log.addHandler(debug_handler)\n\n tgt_log.setLevel(logging.DEBUG)\n tgt_log.addHandler(stderr_handler)\n tgt_log.addHandler(debug_handler)\n\ndef api_is_compatible(module, api_version):\n \"\"\"\n Check if a plugin module is compatible with this version of the application.\n\n @param module: The module\n \"\"\"\n\n if not hasattr(module, \"API_VERSION\"):\n LOG.error(\"Module '%s' does not specify an API version! Skipping!\" %\n module.__name__)\n return False\n\n major, minor = module.API_VERSION\n expected_major, expected_minor = api_version\n if major != expected_major:\n LOG.error(\"Module '%s' is out of date (major API version \"\n \"number is %d, but it should be %s). Skipping!\" %\n ( module.__name__, major, expected_major))\n return False\n\n if minor < expected_minor:\n LOG.warning(\"Module '%s' is out of date (minor API version \"\n \"number is %d, but it should be %s). Will continue anyway...\" %\n ( module.__name__, minor, expected_minor))\n return True\n\ndef get_profile_folder(container, profile_config):\n \"\"\"\n Create a unique foldername for a profile based on it's name\n \"\"\"\n\n # replace non-ascii characters with underscores\n profile_folder = re.sub( r'[^a-zA-Z0-9_-]', '_', profile_config['name'] )\n\n # now remove all leading/trainling underscores\n profile_folder = profile_folder.strip(\"_\")\n\n # prepend the container\n profile_folder = join(container, profile_folder)\n\n # prevent accidental overwrites\n counter = 0\n while exists(profile_folder):\n counter += 1\n LOG.debug( \"File %s exists. Adding a counter.\" % profile_folder )\n profile_folder = \"%s-%d\" % (profile_folder, counter)\n return profile_folder\n\ndef load_profile(package, profile_config):\n LOG.debug(\"Loading profile '%(name)s' [%(profile)s]\" % profile_config )\n\n profile = None\n try:\n profile = package.create(profile_config[\"profile\"])\n if not api_is_compatible(profile, (2,0)):\n return\n profile.init(profile_config)\n except ImportError, exc:\n LOG.error( \"Unable to instantiate target profile %s. \"\n \"Error message was: %s\" % (profile_config[\"profile\"], exc) )\n\n return profile\n\ndef run_profile(package, profile_config):\n \"\"\"\n Run the generator/target profile\n\n @param package: The profile package\n @param profile_config: The profile settings (from the config)\n \"\"\"\n\n LOG.info(\"Running '%(name)s' [%(profile)s]\" % profile_config )\n\n profile = load_profile(package, profile_config)\n if not profile:\n return\n\n # create a subfolder for generator profiles\n if package.__name__ == \"pickup.generator_profile\":\n\n # first folder level is the module name. Append this to the staging area\n module_folder = profile.__name__.split(\".\")[-1]\n module_folder = join(config_instance.STAGING_AREA, module_folder)\n\n # into the module folder we put a folder based on the profile's name\n staging_folder = get_profile_folder(module_folder, profile_config)\n\n # just in case it does not exist, we'll create all required folders\n if not exists( staging_folder ):\n os.makedirs( staging_folder )\n LOG.debug( \"Created directory %r\" % staging_folder )\n else:\n staging_folder = config_instance.STAGING_AREA\n\n try:\n profile.run(staging_folder)\n except Exception, exc:\n LOG.error(\"Error staging '%s'. Error message: %s\" %\n (profile_config['name'], exc))\n LOG.exception(exc)\n\ndef get_lock_file():\n \"\"\"\n Returns a lock file.\n \"\"\"\n if OPTIONS.pidfile:\n return expanduser(OPTIONS.pidfile)\n\n if os.name == 'posix':\n return '/var/run/pickup.pid'\n elif os.name == 'nt':\n lock_file = join(os.environ['APPDATA'], 'pickup', 'pickup.pid')\n os.makedirs(dirname(lock_file))\n return lock_file\n else:\n LOG.error('Unable to create the lock file on this OS (%r)' % os.name)\n sys.exit(9)\n\ndef acquire_lock():\n \"\"\"\n This method is used to prevent multiple instances running at the same time.\n It creates a lock file containing the current PID (if available).\n\n If the file exists, the application will exit with an error.\n \"\"\"\n\n lock_file = get_lock_file()\n if exists(lock_file):\n LOG.critical('Lock file %r exists already. Is the process still running? Exiting with error...' % lock_file)\n sys.exit(9)\n\n LOG.info('Creating lock file: %r' % lock_file)\n with open(lock_file, 'w') as fptr:\n fptr.write(\"%d\" % os.getpid())\n\ndef release_lock():\n \"\"\"\n Releases the process lock acquired via `acquire_lock`.\n \"\"\"\n lock_file = get_lock_file()\n if exists(lock_file):\n LOG.info('Removing lock file %r' % lock_file)\n os.unlink(lock_file)\n else:\n LOG.warning('Lock file %r did not exist.' % lock_file)\n\ndef init():\n global OPTIONS, ARGS, config_instance\n\n OPTIONS, ARGS = parse_cmd_args()\n setup_logging()\n\n LOG.info(\"Backup session starting...\")\n\n try:\n config_instance = config.create(OPTIONS.config)\n except ImportError, exc:\n LOG.critical( \"Error loading the config module %r! \"\n \"This file is required. If you just made a clean checkout, have a \"\n \"look at config/config.py.dist for an example.\" % OPTIONS.config )\n LOG.exception(exc)\n sys.exit(9)\n\n check_config()\n\n first_target = None\n if (hasattr(config_instance, \"FIRST_TARGET_IS_STAGING\") and\n config_instance.FIRST_TARGET_IS_STAGING):\n first_target = config_instance.TARGETS.pop(0)\n if first_target.get(\"profile\") not in ('dailyfolder',):\n LOG.error(\"When using the first target as staging, it must be a local folder!\")\n sys.exit(9)\n\n # retrieve the folder where the module will put the files\n profile = load_profile(target_profile, first_target)\n if not profile.folder():\n LOG.error(\"The target %s cannot be used as staging area (it's not\"\n \" returning a local folder path )\" % profile.__name__)\n config_instance.STAGING_AREA = profile.folder()\n\n if not exists(config_instance.STAGING_AREA):\n os.makedirs(config_instance.STAGING_AREA)\n LOG.info(\"Staging folder '%s' created\" % abspath(config_instance.STAGING_AREA))\n if not os.path.isdir(config_instance.STAGING_AREA):\n LOG.critical(\"Staging folder '%s' is not a folder!\" % abspath(config_instance.STAGING_AREA))\n sys.exit(9)\n LOG.info(\"Staging area is: %s\" % abspath(config_instance.STAGING_AREA))\n\ndef main():\n\n init()\n\n acquire_lock()\n\n now = datetime.now()\n LOG.info(\"Fetching from generators\")\n for generator in config_instance.GENERATORS:\n run_profile(generator_profile, generator)\n\n LOG.info(\"Pushing to targets\")\n for target in config_instance.TARGETS:\n run_profile(target_profile, target)\n\n if (not hasattr(config_instance, \"FIRST_TARGET_IS_STAGING\") or\n not config_instance.FIRST_TARGET_IS_STAGING):\n LOG.info(\"Deleting staging area\")\n rmtree(config_instance.STAGING_AREA)\n\n release_lock()\n\n LOG.info(\"Backup session finished.\")\n\ndef parse_cmd_args():\n parser = OptionParser()\n parser.add_option(\"-p\", \"--pid-file\", dest=\"pidfile\",\n help=(\"Store the PID of the process in FILE. \"\n \"Defaults to /var/run/pickup.pid (Posix) or \"\n \"%APPDATA%/pickup/pickup.pid (Windows)\"),\n action=\"store\", default=None,\n metavar = \"FILE\")\n parser.add_option(\"-c\", \"--config\", dest=\"config\",\n help=\"The config file to use\",\n action=\"store\", default=\"config\")\n parser.add_option(\"-d\", \"--debug\", dest=\"debug\",\n help=\"enable debug messages on stdout\",\n action=\"store_true\", default=False)\n parser.add_option(\"-q\", \"--quiet\",\n action=\"store_true\", dest=\"quiet\",\n default=False,\n help=\"Suppress stdout (stderr will still enabled)\")\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6392778158187866, "alphanum_fraction": 0.6414885520935059, "avg_line_length": 29.460674285888672, "blob_id": "9369af6c06a06ab45258dc648690337f880c8841", "content_id": "d03bc601b05989f9673467a7cd74087f6c0b87f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2714, "license_type": "no_license", "max_line_length": 140, "num_lines": 89, "path": "/pickup/generator_profile/command.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "\"\"\"\nThe command plugin executes a shell command. It will capture stdout and stderr\nto stdout.txt and stderr.txt respectively.\n\n.. note::\n Piping multiple commands together does not work due to the simple usage of\n Popen! If you want to do this it is recommended to write an intermediary\n shell script.\n\nConfiguration\n~~~~~~~~~~~~~\n\nThe following fields are used by this plugin:\n\n **command** (string)\n The command\n\n **returncodes_ok** (string) *optional*\n A list of expected return codes. All return codes in this list are\n considered to indicate successful process termination. If a different\n return code is received, the plugin will issue an error message including\n the capture stderr text.\n Default: [0]\n\n **popen_params** (dict) *optional*\n This dictionary is passed directly as keyword parameters to `Popen <http://docs.python.org/library/subprocess.html#subprocess.Popen>`_\n The most interesting parameters may be ``cwd`` and ``env``.\n\n .. warning::\n If you specify ``stderr`` or ``stdout`` in this parameter, it will\n override the default redirection. Which means, that the default files\n in the staging area will not be created!\n\nConfiguration Example\n~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n dict(\n name = 'test command',\n profile = 'command',\n config = dict(\n command = \"find .\",\n returncodes_ok = 1,\n popen_params = { 'cwd': '/tmp' }\n ),\n ),\n\n\"\"\"\nimport logging\nfrom os.path import join\nfrom subprocess import Popen, PIPE\nimport shlex\n\nLOG = logging.getLogger(__name__)\nAPI_VERSION = (2,0)\nCONFIG = {}\nSOURCE = {}\n\ndef init(source):\n \"\"\"\n Initialise the plugin\n \"\"\"\n CONFIG.update(source['config'])\n SOURCE.update(source)\n LOG.debug(\"Initialised '%s' with %r\" % ( __name__, CONFIG))\n\ndef run(staging_area):\n\n LOG.info( \"Capturing output of command %r\" % CONFIG['command'] )\n LOG.debug( \" shlex.split result: %r\" % shlex.split(CONFIG['command']) )\n stdout = open( join(staging_area, \"stdout.txt\"), \"w+\" )\n stderr = open( join(staging_area, \"stderr.txt\"), \"w+\" )\n popen_params = CONFIG.get( 'popen_params', {} )\n process = Popen( shlex.split( CONFIG['command']),\n stdout=stdout,\n stderr=stderr,\n **popen_params)\n retcode = process.wait()\n expected_codes = CONFIG.get(\"returncodes_ok\", [0])\n if isinstance(expected_codes, int):\n expected_codes = [expected_codes]\n if retcode not in expected_codes:\n LOG.error( \"Process terminated with non-expected return code: %r\"\n % retcode )\n stderr.seek(0)\n LOG.error( \"STDERR data:\\n%s\" % stderr.read() )\n stdout.close()\n stderr.close()\n\n\n\n" }, { "alpha_fraction": 0.6821345686912537, "alphanum_fraction": 0.6867749691009521, "avg_line_length": 29.714284896850586, "blob_id": "ded235022f65c4433223f4bf6e41c4d31d423867", "content_id": "f9a02db37bb4f2227cc6b66a1f041d5502244826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 57, "num_lines": 14, "path": "/pickup/config/__init__.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "from os.path import expanduser, exists, dirname, basename\nimport sys\n\ndef create(filename):\n user_config = expanduser(filename)\n user_config_folder = dirname(filename)\n module_name = basename(filename).rsplit(\".\", 1)[0]\n\n if exists(filename):\n sys.path.append(user_config_folder)\n the_instance = __import__(module_name)\n return the_instance\n else:\n raise ImportError(\"File %r not found!\" % filename)\n\n" }, { "alpha_fraction": 0.5862292051315308, "alphanum_fraction": 0.5939995646476746, "avg_line_length": 30.944828033447266, "blob_id": "8405ae051fb7f1eb7a8f7510ed1d0884997a12d1", "content_id": "49d32496e4b0cbcad0fe23fce773aad17ea67961", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4633, "license_type": "no_license", "max_line_length": 79, "num_lines": 145, "path": "/pickup/generator_profile/mysql.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module will dump MySQL databases running ``mysqldump``. The output will be\nrun through ``bzip2``.\n\nPrerequisites\n~~~~~~~~~~~~~\n\n - ``mysql-python`` must be installed (``apt-get install python-mysqldb``)\n\nConfiguration\n~~~~~~~~~~~~~\n\nThe following fields are used by this plugin:\n\n **database** (string)\n The database to backup. This can be ``'*'`` to backup all databases\n (excluding ``information_schema``)\n\n .. note:: In order for the wildcard ``\"*\"`` to work in the config file,\n the user must be able to connect to \"mysql\" and must have\n read access to the table \"db\".\n\n **host** (string) *optional* (default=\"localhost\")\n The host on which the database is running\n\n **user** (string) *optional* (default=\"root\")\n The user used to connect to the DB\n\n **password** (string) *optional* (default=\"\")\n The password used to connect to the DB\n\n **port** (string/int) *optional* (default=3306)\n The port on which the database is running\n\n **mysqldump_params** (string) *optional*\n These parameters are passed directly to ``mysqldump``.\n\n .. warning:: The parameters for host, user, port and password (``-h``,\n ``-u``, ``-p`` and ``-P`` respectively) should be\n **avoided**! The plugin uses the settings ``host``, ``user``\n and ``port`` to set these automatically.\n\n The plugin uses two types of connection: A programmatic\n connection using ``libmysql`` and indirect connection using\n the ``mysqldump`` executables. The params specified in this\n config variable will **only** be passed to ``mysqldump``. So\n if you specify other host/user/password/port variables as\n specified in the dedicated config variables this may have\n unexpected results.\n\nConfiguration Example\n~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n dict(\n name = 'MySQL',\n profile = 'mysql',\n config = dict(\n database = \"*\",\n port = \"3306\",\n host = \"localhost\",\n user = \"backupuser\",\n password = \"foobar\"\n mysqldump_params = \"\",\n connection_params = dict(\n charset='utf8',\n compress=True\n )\n ),\n ),\n\n\"\"\"\nimport logging\nimport shlex\nimport MySQLdb\nfrom subprocess import Popen, PIPE\nfrom os.path import join\nLOG = logging.getLogger(__name__)\nAPI_VERSION = (2,0)\nCONFIG = {}\n\ndef init(source_dict):\n LOG.debug(\"Hello, I was initialised with %s\" % source_dict)\n CONFIG.update(source_dict[\"config\"])\n\ndef dump_all_dbs(conn, staging_area):\n # get a list of all available dbs\n cur = conn.cursor()\n cur.execute(\"SHOW databases\")\n for row in cur.fetchall():\n # Database \"mysql\" is *always* included in the backup. It contains\n # critical data like usernames and passwords. Without it a backup is\n # worthless. So we ignore it here, and create it separately in the main\n # \"run\" method.\n if row[0] not in [\"information_schema\", \"mysql\"]:\n dump_one_db(conn, row[0], staging_area)\n cur.close()\n\ndef dump_one_db(conn, db, staging_area):\n LOG.info(\"Dumping %s\" % db)\n\n command = [ 'mysqldump',\n \"-P\", str(CONFIG.get('port', 3306)),\n \"-h\", CONFIG.get('host', \"localhost\"),\n \"-u\", CONFIG.get('user', \"user\"),\n \"-p%s\" % CONFIG.get('password', \"\") ]\n\n if \"mysqldump_params\" in CONFIG and CONFIG[\"mysqldump_params\"]:\n command.extend( shlex.split(CONFIG[\"mysqldump_params\"]) )\n command.append( db )\n LOG.debug(\"Running command %r\" % command)\n\n p1 = Popen( command, stdout=PIPE, stderr=PIPE )\n p2 = Popen( \"bzip2\", stdin=p1.stdout, stdout=open(\n join(staging_area, \"%s.bz2\" % db), \"wb\"), stderr=PIPE )\n\n p1.wait()\n p2.wait()\n\n if p1.returncode != 0:\n LOG.error(\"Error while running mysql_dump: %s\" % p1.stderr.read())\n\n if p2.returncode != 0:\n LOG.error(\"Error while running bzip2: %s\" % p2.stderr.read())\n\ndef run(staging_area):\n\n # so far so good. connect...\n conn = MySQLdb.connect( db=\"mysql\",\n user = CONFIG.get(\"user\", \"root\"),\n passwd = CONFIG.get('password', \"\"),\n host = CONFIG.get('host', \"localhost\"),\n port = CONFIG.get('port', 3306),\n **CONFIG.get('connection_params', {})\n )\n\n # always create a backup of \"mysql\" if possible\n dump_one_db(conn, \"mysql\", staging_area)\n if CONFIG['database'] == '*':\n dump_all_dbs(conn, staging_area)\n else:\n dump_one_db(conn, CONFIG['database'], staging_area)\n\n conn.close()\n\n" }, { "alpha_fraction": 0.6700000166893005, "alphanum_fraction": 0.6700000166893005, "avg_line_length": 25.302631378173828, "blob_id": "69e5fcf2ccccf756e1604e76fadd73b4a8d73500", "content_id": "54dd7c6e29934dc6a89c72195fa65891d770fb2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 79, "num_lines": 76, "path": "/docs/source/installation.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _installation:\n\nInstallation\n============\n\n.. note:: I **highly** recommend using virtualenv, but nothing prevents you\n installing it into the root system\n\nRequirements\n------------\n\nWhen installing this package using easy_install, it will build the MySQL and\nPostgres clients. So you'll need the necessary headers, plus gcc on your\nmachine.\n\nFor Ubuntu, run the following::\n\n sudo apt-get install libmysqlclient-dev libpq-dev python-dev \\\n build-essential\n\nInstallation procedure\n----------------------\n\n- Download the latest package from http://www.github.com/exhuma/pickup\n I recommend using the latest tagged version, but if you want bleeding\n edge, you may also download the \"master\" branch.\n\n- untar the package::\n\n tar xzf exhuma-pickup-<version number+hash>.tar.gz\n\n- enter the folder::\n\n cd exhuma-pickup-<version number+hash>\n\nWhen not using virtualenv, you may skip this section\n----------------------------------------------------\n\n.. note:: If you don't have virtualenv,\n run the following::\n\n apt-get install python-setuptools && easy_install virtualenv\n\n- create a virtualenv::\n\n virtualenv --no-site-packages /path/to/your/env\n\n- activate the environment::\n\n source /path/to/your/env/bin/activate\n\nWithout virtualenv\n------------------\n\n- run the installer::\n\n python setup.py install\n\nFinished & Trying things out\n----------------------------\n\nThe script is now installed in you system's binary path as \"pickup\". When using\nvirtualenv, this will be ``/path/to/your/env/bin``, otherwise it will most\nlikely be ``/usr/local/bin``\n\nYou may now deactivate the virtualenv by entering ``deactivate``. In the future\nit will no longer be necessary to activate the environment manually. The\nexecutable script will run automatically in the proper environment.\n\nTo see if everything worked as expected, you may run::\n\n /path/to/your/env/pickup --help\n\nor simply (if it's on your ``$PATH``)::\n\n pickup --help\n\n" }, { "alpha_fraction": 0.7086455821990967, "alphanum_fraction": 0.7097422480583191, "avg_line_length": 31.176469802856445, "blob_id": "7b621cd89f39314a0e192557d0b2e13ad01f5413", "content_id": "287d6007355cf144c28eb255eb11d1532745d353", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5471, "license_type": "no_license", "max_line_length": 79, "num_lines": 170, "path": "/docs/source/writing_plugins.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _writing_plugins:\n\nUsing Plugins\n=============\n\nThe core uses the plugin filename as profile name. So if you create a new\nplugin named ``foobar.py``, then you can use it in the config file as ``profile\n= \"foobar\"``. It's as simple as that.\n\nWriting Plugins\n===============\n\nYou can do pretty much anything with these plugins. As you can chain multiple\nsources or targets, nothing holds you back for writing a plugin that won't\ncreate real backups. This might sound useless, but, you could for example write\na source plugin which lists all files in a folder, and save that list in the\nstaging area. This won't be a valid backup of the data, but may prove useful\nnonetheless.\n\nBoth source and target plugins follow the same standard. In a nutshell:\n\n - A \"init\" function initialises the plugin. This is called for each\n source/target\n\n - A \"run\" function performs the actual job of creating/publishing the backup\n\n - A generator plugin is resposible to create backup files inside the\n \"staging area\".\n\n - A target plugin is responsible to publish/push the files inside the\n staging area to another location.\n\nGeneral housekeeping\n--------------------\n\nFile Names\n~~~~~~~~~~\n\nWhen creating filenames, it is recommended to use `os.path.join\n<http://docs.python.org/library/os.path.html#os.path.join>`_, which will ensure\nplatform independent filenames.\n\nThe minimal example below makes use of this module.\n\nDocumentation\n~~~~~~~~~~~~~\n\nThe config file used by the end-users strongly depends on the config values\nused in the plugins. As such, it would be very nice if these values are well\ndocumented. The project uses the module level docstrings in the auto-generated\ndocumentation (the one you are reading just now). So everything needed to setup\nthe plugin should be documented there.\n\nIt is recommended to add a code-block depicting an example configuration\nsection for the module. This will make the generated documentation easier to\nread and will provide simple copy/paste templates to users.\n\nLook at the available module's source code for examples.\n\nLogging\n~~~~~~~\n\nAvoid ``print`` statements at all costs. *Especially* for error messages. The\nproper way to print output is to use the logging module. Everything related to\nlogging is configured for you in the core. So all you need to do is::\n\n import logging\n LOG = logging.getLogger(__name__)\n\nand then make the appropriate calls to::\n\n LOG.debug(message)\n LOG.info(message)\n LOG.warning(message)\n LOG.error(message)\n LOG.critical(message)\n LOG.exception(the_exception_instance)\n\n\"Debug\" and \"info\" messages will be sent to ``stdout``, whereas everything else\nis sent to ``stderr``. Additionally, everything is logged into a log-file.\n\nAPI version\n~~~~~~~~~~~\n\nBefore executing a plugin, the core checks the API version against it was\ndeveloped. If the core changes, and if, as a consequence, changes are necessary\nor recommended in the plugin, the application will let you know:\n\n - Plugin execution is aborted for major changes\n - Warnings are issued for minor changes\n\nThe version information needs to be defined in a field named ``API_VERSION``\nand must be a tuple of (major_number, minor_number).\n\nExample minimal setup\n~~~~~~~~~~~~~~~~~~~~~\n\n.. note:: This example also makes use of ``os.path.join``. It is recommended to\n add this method to create filename strings, which will ensure platform\n independent file names. See `os.path.join\n <http://docs.python.org/library/os.path.html#os.path.join>`_ for more info!\n\n.. code-block:: python\n\n \"\"\"\n This is the module-level docstring\n \"\"\"\n import logging\n from os.path import join\n LOG = logging.getLogger(__name__)\n API_VERSION = (1,0)\n\n def init(source_dict):\n LOG.debug(\"Hello, I was initialised with %s\" % source_dict)\n\n def run(staging_area):\n LOG.info(\"Running on %s\" % staging_area)\n file_handle = open(join(staging_area, \"helloworld.txt\"), \"w\")\n file_handle.write( \"Hello World!\\n\" )\n file_handle.close()\n\nConfiguration Values\n--------------------\n\ninit\n~~~~\n\nThe core will pass a configuration dictionary from the config-file to the\n``init`` method before executing ``run``.\nSo, if a generator (or target) is configured as follows::\n\n [ ...,\n dict(\n name = \"mysource\",\n profile = \"my_plugin_module\",\n config = dict(\n a = 1,\n b = 2\n )\n ),\n ... ]\n\nThen the dictionary passed to the ``init`` method will be::\n\n { 'a': 1, 'b': 2 }\n\nrun\n~~~\n\nWhen executing ``run``, the core will pass a folder name inside the \"staging\narea\". This folder is based on the module name, and the generator/target name\ngiven by the user. So for the above example, this would be\n``/path/to/staging/my_plugin_module/mysource``. This ensures that each\nexecutiong of this profile will have it's own \"private\" storage space to avoid\naccidental file overwrites.\n\nWriting Source Plugins\n----------------------\n\nA source plugin represents a type of data that needs to be included in the\nbackup. After calling ``run``, the plugin must have created a file inside the\nstaging area. Otherwise the backup will be lost.\n\nWriting Target Plugins\n----------------------\n\nA target plugin represents a destination into which the backups will be\n\"published\" (or \"pushed\"). The ``run`` method receives the root \"staging area\"\nas parameter. At the end of the run, all files inside that folder should be in\nthe target location (as specified in the config).\n\n" }, { "alpha_fraction": 0.5968701243400574, "alphanum_fraction": 0.6032342314720154, "avg_line_length": 31.157718658447266, "blob_id": "0c1eb155858f26ee4b6006ef47f41a2305fc3e88", "content_id": "50e67b22c881d9742aa4c76e73c5cc99fbd7480f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9585, "license_type": "no_license", "max_line_length": 79, "num_lines": 298, "path": "/pickup/generator_profile/postgres.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module will dump the databases running ``pg_dump``. The output will be run\nthrough ``gzip``.\n\nPrerequisites\n~~~~~~~~~~~~~\n\n - ``psycopg2`` must be installed\n\nConfiguration\n~~~~~~~~~~~~~\n\nThe following fields are used by this plugin:\n\n **host**\n The host on which the database is running\n\n **user**\n The username as whom to connect.\n\n **port**\n The port on which the database is running\n\n **database**\n This can be either a list of database names to backup, or simply one\n database name to backup. This can also be ``'*'`` to backup all databases\n (excluding ``template0``, ``template1`` and ``postgres``)\n\n .. note:: In order for the wildcard ``\"*\"`` to work in the config file,\n the user must be able to connect to \"template1\" and must have\n read access to the system table \"pg_database\".\n\n **compress_command**\n If specified and non-empty, this command is used to compress the data.\n The command will receive the data as standard input via a pipe. So it\n must support this (gzip and bzip2 come to mind...).\n\n As the command is used as first parameter to Popen, it must be specified\n as list!\n\n Examples:\n\n * ``['gzip']``\n * ``['gzip', '-5']``\n\n **ignore_dbs**\n A list of databases to ignore (mostly useful when using ``'*'`` as\n database source.\n\n **pg_dump_params** (string) *optional*\n These parameters are passed directly to ``pg_dump``.\n\n .. warning:: The parameters for host, user and port (``-h``, ``-U``,\n ``-p`` respectively) should be **avoided**! The plugin uses\n the settings ``host``, ``user`` and ``port`` to set these\n automatically.\n\n The plugin uses two\n types of connection: A programmatic connection using\n ``libpq`` and indirect connection using the ``pg_dump`` and\n ``pg_dumpall`` executables. The params specified in this\n config variable will **only** be passed to ``pg_dump`` and\n ``pg_dumpall``. So if you specify other host/user/port\n variables as specified in the dedicated config variables\n this may have unexpected results.\n\n Additionally, the parameter ``-w`` (never prompt for\n password) is automatically added. See the section\n :ref:`postgres_passwords` for more info.\n\n **pg_dumpall_params** (string) *optional*\n Same as ``pg_dump_params``, but for the command ``pg_dumpall``\n\nConfiguration Example\n~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n dict(\n name = 'PostgreSQL 8.4',\n profile = 'postgres',\n config = dict(\n host = 'localhost',\n user = 'backup',\n database = '*', # using '*' will dump all dbs\n compress_command = ['gzip'],\n ignore_dbs = ['my_test_db'],\n port = 5432,\n pg_dump_params = \"-Ft -c\",\n ),\n ),\n\n.. _postgres_passwords:\n\nA note on passwords\n~~~~~~~~~~~~~~~~~~~\n\nAs a security precaution login credentials should be stored in \"~/.pgpass\".\nSetting up \"trust\" connections works as well, but is far less secure!\n\n.. warning:: It's not recommended to use \"trust\" connections. For example,\n assume the following conditions are met:\n\n - A user has shell access\n - The ``pg_hba.conf`` file allows trust connections for the user\n postgres on local connections (a common setup).\n\n Then all the user needs to do is run the following command::\n\n $ psql -U postgres <dbname>\n\n to get access to the system! Using a ~/.pgpass file allows for\n convenient passwordless connections (as used by this script), while\n being a lot more secure than trust connections. Just keep in mind to\n set a chmod 600 on that file!\n\nHere's a copy of the relevant docs:\n\n The file ``.pgpass`` in a user's home directory is a file that can contain\n passwords to be used if the connection requires a password (and no password\n has been specified otherwise). This file should have lines of the following\n format::\n\n hostname:port:database:username:password\n\n Each of the first four fields may be a literal value, or ``*``, which\n matches anything. The password field from the first line that matches the\n current connection parameters will be used. (Therefore, put more-specific\n entries first when you are using wildcards.) If an entry needs to contain\n ``:`` or ``\\``, escape this character with ``\\``.\n\"\"\"\n\nimport logging\nimport psycopg2\nimport shlex\nfrom subprocess import Popen, PIPE\nfrom os.path import join\n\nLOG = logging.getLogger(__name__)\nAPI_VERSION = (2,0)\nCONFIG = {}\nSOURCE = {}\n\nFORMAT_PLAIN = 0\nFORMAT_TAR = 1\nFORMAT_CUSTOM = 2\n\ndef init(source):\n CONFIG.update(source['config'])\n SOURCE.update(source)\n CONFIG.setdefault('ignore_dbs', [])\n CONFIG.setdefault('compress_command', [])\n\n LOG.debug(\"Initialised '%s' with %r\" % ( __name__, CONFIG))\n\ndef list_dbs():\n conn = psycopg2.connect(\n database = 'template1',\n user = CONFIG['user'],\n host = CONFIG['host'],\n port = CONFIG['port'],\n )\n cursor = conn.cursor()\n cursor.execute(\"SELECT datname FROM pg_database WHERE datname NOT IN \"\n \"('template0', 'template1', 'postgres')\")\n output = [row[0] for row in cursor.fetchall()]\n cursor.close()\n conn.close()\n return output\n\ndef get_params(command):\n \"\"\"\n Construct a list of command-line params and return it.\n To be used in ``pg_dump`` and ``pg_dumpall``\n\n @param command: The command name (either \"pg_dumpall\" or \"pg_dump\")\n \"\"\"\n key = \"%s_params\" % command\n out = []\n if \"port\" in CONFIG and CONFIG['port']:\n out.extend([ \"-p\", str(CONFIG['port']) ])\n if \"host\" in CONFIG and CONFIG['host']:\n out.extend([ \"-h\", CONFIG['host'] ])\n if \"user\" in CONFIG and CONFIG['user']:\n out.extend([ \"-U\", CONFIG['user'] ])\n if key in CONFIG and CONFIG[key]:\n out.extend( shlex.split(CONFIG[key]) )\n return out\n\ndef get_format_type(command):\n \"\"\"\n Try to guess the dump format by inspecting the command elements\n \"\"\"\n format_string = \"\"\n\n for i,element in enumerate(command):\n\n # the format was specified as a sepearate element\n if element == '-F' or element == '--format':\n format_string = command[i+1]\n break\n\n # the format was specified using the abbreviated form (one element)\n if element.startswith('-F') and len(element) == 3:\n format_string = element[-1]\n break\n\n if format_string in ('c', 'custom'):\n return FORMAT_CUSTOM\n elif format_string in ('t', 'tar'):\n return FORMAT_TAR\n else:\n return FORMAT_PLAIN\n\ndef dump_one_db(staging_area, dbname):\n LOG.info(\"Dumping %s\" % dbname)\n command = [ 'pg_dump', '-w' ]\n command.extend( get_params(\"pg_dump\") )\n command.append( dbname )\n\n # change dump file suffix depending on dump type\n dump_format = get_format_type(command)\n if dump_format == FORMAT_TAR:\n file_suffix = 'tar'\n elif dump_format == FORMAT_CUSTOM:\n file_suffix = 'c'\n else:\n file_suffix = 'sql'\n\n filename = \"%s.%s\" % (dbname, file_suffix)\n\n if CONFIG['compress_command']:\n\n if CONFIG['compress_command'][0] == 'gzip':\n compress_suffix = 'gz'\n elif CONFIG['compress_command'][0] == 'bzip2':\n compress_suffix = 'bz2'\n elif CONFIG['compress_command'][0] == 'compress':\n compress_suffix = 'z'\n else:\n compress_suffix = CONFIG['compress_command'][0]\n\n p1 = Popen( command, stdout=PIPE, stderr=PIPE )\n p2 = Popen( CONFIG['compress_command'], stdin=p1.stdout, stdout=open(\n join(staging_area, \"%s.%s\" % (filename, compress_suffix)), \"wb\"),\n stderr=PIPE )\n\n p1.wait()\n p2.wait()\n\n if p1.returncode != 0:\n LOG.error(\"Error while running pg_dump: %s\" % p1.stderr.read())\n\n if p2.returncode != 0:\n LOG.error(\"Error while running gzip: %s\" % p2.stderr.read())\n\n else:\n target_file = join(staging_area, \"%s\" % filename)\n p1 = Popen( command + ['-f', target_file] )\n stdout, stderr = p1.communicate()\n if p1.returncode != 0:\n LOG.error(\"Error while running pg_dump: %s\" % stderr)\n\ndef dump_globals(staging_area):\n LOG.info(\"Dumping posgtres globals\")\n command = [ 'pg_dumpall', '-g' ]\n command.extend( get_params(\"pg_dumpall\") )\n\n p1 = Popen( command, stdout=PIPE, stderr=PIPE )\n p2 = Popen( \"gzip\", stdin=p1.stdout, stdout=open(\n join(staging_area, \"globals.gz\" ), \"wb\"), stderr=PIPE )\n\n p1.wait()\n p2.wait()\n\n if p1.returncode != 0:\n LOG.error(\"Error while running pg_dump: %s\" % p1.stderr.read())\n\n if p2.returncode != 0:\n LOG.error(\"Error while running gzip: %s\" % p2.stderr.read())\n\ndef run(staging_area):\n\n dump_globals(staging_area)\n\n if isinstance(CONFIG['database'], basestring):\n if CONFIG['database'] == '*':\n for dbname in list_dbs():\n if CONFIG['ignore_dbs'] and dbname in CONFIG['ignore_dbs']:\n LOG.info(\"Database %r has been explicitly ignored \"\n \"via the config file\" % dbname)\n continue\n dump_one_db(staging_area, dbname)\n else:\n dump_one_db(staging_area, CONFIG['database'])\n elif isinstance(CONFIG['database'], list):\n for dbname in CONFIG['database']:\n dump_one_db(staging_area, dbname)\n\n\n" }, { "alpha_fraction": 0.730611264705658, "alphanum_fraction": 0.730611264705658, "avg_line_length": 33.483333587646484, "blob_id": "eb16247f03a16cac23a9a63b27d68eda866be1c6", "content_id": "d9a91d47be684fdca115c53f37e126f473cc5b90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4139, "license_type": "no_license", "max_line_length": 84, "num_lines": 120, "path": "/docs/source/how_and_why.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _how_and_why:\n\nWhy (History)\n=============\n\n Why am I writing this script and why would you want to use it?\n\nI needed a simple way to create backups of a couple of folders, a few MySQL and\nPostgreSQL databases and store the created files on a remote FTP site. I\nimplemented this using a small bash script using ``ncftp`` to store them on the\nFTP site.\n\n`zmanda backup <http://www.zmanda.com/>`_ Seemed to be a bit too much at the\ntime.\n\nOver time the script became more and more generalised. And as a result more and\nmore unreadable. After it started using bash arrays quite heavily the resulting\nsyntax really got on my nerves. Recently I needed to create backups in a\nsimilar fashion on another server. And there the fun started. A couple of\nproblems:\n\n - One server used a standard debian PostgreSQL installation, the other\n didn't. But the script used a ``psql`` parameter (``--cluster``) which is\n only available on the default debian installation.\n\n - A couple of minor things did not work exactly the same way on both\n servers.\n\nAdditionally, the script had one major annoyance from the beginning:\n\n``ncftp`` writes all server messages to ``stdout``. So, to see errors, I had to\nrefrain from redirecting ``ncftp``'s output to ``/dev/null``. But that in turn\nmeant that I received a cron-mail on each execution. Eventually, I redirected\nthe FTP output to ``/dev/null``. That way I only got cron-mails when something\nelse went wrong. But I had to check the FTP regularly to see if the backups\narrived correctly.\n\nI always wanted to fix these issues, and add a couple more functionalities to\nthe script. But given the complexity it already had, I thought it be time to\nrewrite it using a different scripting language. I've chosen Python as language\nof choice. A couple of reasons:\n\n - It's the language I am currently most proficient in\n\n - It's available on pretty much any standard Linux installation (other\n choice could have been perl.)\n\n - It is very dynamic and let's me write a modular application easily.\n\n - The syntax is very concise!\n\nExecution Concept\n=================\n\nThe application works in two main steps:\n\n - Generate the backup data (DB dumps, archive generation, ...) inside a\n temporary \"staging area\"\n\n See :term:`generator`\n\n - \"publish\" the generated files to one or more targets.\n\n See :term:`target`\n\nUsage\n=====\n\nThe config file specifies what to backup (``GENERATORS``), and where to store\nit (``TARGETS``). See :ref:`configuration` for more information on the general\nconfiguration.\n\nThe generators and targets are defined in python module inside generator_profile and\ntarget_profiles. For a list of available generators and targets see\n:ref:`available_plugins`\n\nIf something is missing the list of plugins, you can easily write a new plugin\nwith only minimal python knowledge. See :ref:`writing_plugins` for more\ninformation.\n\nApplication output uses the default python logging module. All informational\nmessages are routed to stdout, and all errors/warnings are routed to stderr.\nThis is useful for cron jobs: redirecting stdout to /dev/null still lets\nimportant messages through, so cron can take the appropriate steps (send\nmails?)\n\nDebugging messages are logged to a auto-rotating file inside the \"logs\"\ndirectory. This provides some semi-persistent storage. If something went wrong\nand you redirected stdout (or deleted the cron-mails), you still may find some\nuseful info in that file.\n\nRough Roadmap\n=============\n\nThe main goal is to have a script which has all the features that I had in my\nprevious bash script. These features are:\n\n - **[DONE]** Folder backups (split and simple)\n\n - **[DONE]** Push to folder\n\n - **[DONE]** Backup PostgreSQL\n\n - **[DONE]** Docs\n\n - **[DONE]** Backup MySQL\n\n - **[DONE]** Push to FTP\n\n - **[DONE]** Remove old files in target \"dailyfolder\" (using the retention\n value)\n\n - **[DONE]** Remove old files in target \"ftp\" (using the retention value)\n\nAdditional items on the todo list\n---------------------------------\n\n - Push to a remote host via SSH\n\n - Move logging configuration out of the code\n\n" }, { "alpha_fraction": 0.6298552751541138, "alphanum_fraction": 0.6321401596069336, "avg_line_length": 27.532608032226562, "blob_id": "929a14467b93c3bfa737187c8f5848ea31b6a74d", "content_id": "28a27b4d9192891831fe48f03f001c068b347177", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2626, "license_type": "no_license", "max_line_length": 79, "num_lines": 92, "path": "/pickup/target_profile/dailyfolder.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "\"\"\"\nCreates a subfolder with the current date (YYYY-MM-DD) in the target folder and\ncopies everything inside the staging area into that folder\n\nConfiguration\n~~~~~~~~~~~~~\n\nThe following fields are used by this plugin:\n\n **path** (string)\n The target folder\n\n **retention** (dict) *optional*\n How long the data should be kept. Everything older than this will be\n deleted. The dictionary values will be passed as keyword arguments to\n `datetime.timedelta\n <http://docs.python.org/library/datetime.html#datetime.timedelta>`_. If\n set to ``None``, the data will be kept indefinitely!\n\n **Default:** ``None``\n\n .. note:: This script uses the OSs ``mtime`` value to determine the\n folder's date. Refer to you OS reference to see if this is what\n you want!\n\nConfiguration Example\n~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n dict(\n name = \"local\",\n profile = \"folder\",\n config = dict(\n path = \"/var/backups\",\n retention = dict(\n days=30,\n hours=12\n )\n ),\n ),\n\n\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom os.path import exists, join\nfrom os import listdir, stat\nfrom shutil import copytree, rmtree\nimport stat as stat_info\nimport logging\nimport os\n\nLOG = logging.getLogger(__name__)\nAPI_VERSION = (2,0)\nCONFIG = {}\nTARGET = {}\n\ndef init(target):\n CONFIG.update(target['config'])\n TARGET.update(target)\n LOG.debug(\"Initialised '%s' with %r\" % ( __name__, CONFIG))\n\ndef folder():\n return join(CONFIG['path'], datetime.now().strftime('%Y-%m-%d'))\n\ndef remove_old_files(root, timedelta_params):\n delta = timedelta(**timedelta_params)\n threshold_date = datetime.now() - delta\n LOG.info(\"Removing files created before %s\" % threshold_date)\n for entry in listdir(root):\n file_meta = stat(join(root, entry))\n mtime = datetime.fromtimestamp(file_meta[stat_info.ST_MTIME])\n LOG.debug(\"Inspecting %s (mtime=%s, threshold=%s, todelete=%s)\" % (\n entry, mtime, threshold_date, mtime<threshold_date ))\n if mtime < threshold_date:\n LOG.info(\"Deleting %s\" % entry)\n rmtree(join(root, entry))\n else:\n LOG.info(\"All obsolete files successfully removed.\")\n\ndef run(staging_area):\n if not exists(CONFIG['path']):\n os.makedirs(CONFIG['path'])\n LOG.info(\"Path '%s' created.\" % CONFIG['path'])\n\n # delete old files\n timedelta_params = CONFIG.get('retention', None)\n if timedelta_params:\n remove_old_files(CONFIG['path'], timedelta_params)\n\n # store new files\n copytree(staging_area, folder())\n\n" }, { "alpha_fraction": 0.6888453960418701, "alphanum_fraction": 0.6927592754364014, "avg_line_length": 30.9375, "blob_id": "75572513eb30414e5bdfedc2af0c83da9e7c922c", "content_id": "06b438a9dc3089419e9694b9f49a8dd601d00d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1022, "license_type": "no_license", "max_line_length": 79, "num_lines": 32, "path": "/docs/source/logging.rst", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": ".. _logging:\n\nLogging\n=======\n\nPickup uses `the standard python logging module\n<http://docs.python.org/library/logging.html>`_. While not *yet* possible, you\nwill be able to take control over how and where messages are logged eventually.\n\nCurrent Configuration\n---------------------\n\nCurrently, the configuration is hardcoded inside ``pickup.py`` with the\nfollowing settings.\n\nTwo handlers:\n\n - Console Handler\n\n Always writes messages *above* severity **INFO** to ``stderr``.\n **DEBUG** and **INFO** messages will be sent to ``stdout`` unless the\n command line flag ``-q`` is specified.\n\n - An auto-rotating file handler\n\n This handler always write **everything** to a file ``logs/pickup.log``.\n The path is relative to **the current working folder**. This means, if you\n run the script from a cronjob, the current working folder will be the home\n folder of the user which executes the script.\n\n For security reasons, the file is set to mode 0600 (only accessible by\n it's owner).\n" }, { "alpha_fraction": 0.6244909167289734, "alphanum_fraction": 0.6280206441879272, "avg_line_length": 26.28148078918457, "blob_id": "602534b2b5640835f91753fe88773c67f8116f05", "content_id": "8b6f79f8d7e9a8b25b0989cacbb483ad3f55c10e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3683, "license_type": "no_license", "max_line_length": 89, "num_lines": 135, "path": "/pickup/generator_profile/folder.py", "repo_name": "exhuma/pickup", "src_encoding": "UTF-8", "text": "\"\"\"\nThe folder plugin create a bzipped tar file for a specific folder. It is also\npossible to specify a parent folder and create individual tarballs for each\nfolder and one for files beneath that folder.\n\nConfiguration\n~~~~~~~~~~~~~\n\nThe following fields are used by this plugin:\n\n **path** (string)\n The folder\n\n **split** (boolean) *optional*\n If set to \"True\", this module will create individual tarballs (Default =\n False).\n\nConfiguration Example\n~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n dict(\n name = 'My home folder',\n profile = 'folder',\n config = dict(\n path = '/home/me',\n split = True,\n )\n ),\n\"\"\"\nimport logging\nimport tarfile\nimport re\nfrom os.path import exists, join, abspath, isdir\nimport os\n\nLOG = logging.getLogger(__name__)\nAPI_VERSION = (2,0)\nCONFIG = {}\nSOURCE = {}\n\ndef init(source):\n \"\"\"\n If split is set, this strategy will create one folder per subfolder in the\n given path.\n \"\"\"\n CONFIG.update(source['config'])\n SOURCE.update(source)\n LOG.debug(\"Initialised '%s' with %r\" % ( __name__, CONFIG))\n\ndef run(staging_area):\n if not exists(CONFIG['path']):\n LOG.error(\"Path '%s' does not exist! Skipping!\" % CONFIG['path'])\n return\n\n if CONFIG.get(\"split\", False):\n create_split_tar(staging_area)\n else:\n create_simple_tar(staging_area)\n\ndef create_split_tar(staging_area):\n \"\"\"\n Creates one tar file for each folder found in CONFIG['path']. If normal\n files reside in that folder, they will be collected into a special tarfile\n named \"__PICKUP_FILES__.tar.bz2\"\n\n @param staging_area: The target folder\n \"\"\"\n\n if not isdir(CONFIG['path']):\n LOG.error(\"Impossible to create a split tar! %s is not a folder!\" % CONFIG['path'])\n return\n\n LOG.info(\"Creating tarball for each folder inside %s\" % CONFIG['path'])\n if not exists(staging_area):\n os.makedirs( staging_area )\n elif not isdir(staging_area):\n LOG.error(\"'%s' exists and is not a folder! Skipping\" % staging_area)\n return\n\n files = []\n for entry in os.listdir(CONFIG['path']):\n entrypath = join(CONFIG['path'], entry)\n\n # Add directories directly, and add normal files into a special filename\n if not isdir(entrypath):\n files.append(entrypath)\n continue\n\n tarname = join(staging_area, \"%s.tar.bz2\" % entry)\n LOG.info(\"Writing to '%s'\" % abspath(tarname))\n tar = tarfile.open(abspath(tarname), \"w:bz2\")\n tar.add(entrypath)\n tar.close()\n\n if files:\n tarname = join(staging_area, \"__PICKUP_FILES__.tar.bz2\")\n LOG.info(\"Writing remaining files to '%s'\" % abspath(tarname))\n tar = tarfile.open(abspath(tarname), \"w:bz2\")\n for file in files:\n LOG.info(\" Adding %s\" % file)\n tar.add(file)\n tar.close()\n\ndef get_basename():\n \"\"\"\n Create a 'clean' filename\n \"\"\"\n\n # replace non-ascii characters with underscores\n basename = re.sub( r'[^a-zA-Z0-9]', \"_\", SOURCE['name'] )\n\n # now remove all leading/trainling underscores\n basename = basename.strip(\"_\")\n\n # prevent accidental overwrites\n counter = 0\n while exists(basename):\n counter += 1\n LOG.debug( \"File %s exists. Adding a counter.\" % basename )\n basename = \"%s-%d\" % (basename, counter)\n return basename\n\ndef create_simple_tar(staging_area):\n LOG.info(\"Creating tarball for path %s\" % CONFIG['path'])\n tarname = \"%s.tar.bz2\" % get_basename()\n\n # put it into the staging area\n tarname = join(staging_area, tarname)\n\n LOG.info(\"Writing to '%s'\" % abspath(tarname))\n tar = tarfile.open(abspath(tarname), \"w:bz2\")\n tar.add( CONFIG['path'] )\n tar.close()\n" } ]
22
cuongpianna/PIML__CV
https://github.com/cuongpianna/PIML__CV
2dfa770a74d83921d243de9ad48c2e913660e65a
20b5d18a0b82bbef61e8bb45ce884df25d319d7e
385e98ae5be93c613b0185f6adfab254791fa150
refs/heads/master
2020-04-13T10:13:31.219584
2019-01-04T10:12:48
2019-01-04T10:12:48
163,133,844
0
1
null
2018-12-26T03:53:07
2018-12-26T03:53:10
2019-01-04T10:12:48
null
[ { "alpha_fraction": 0.663551390171051, "alphanum_fraction": 0.663551390171051, "avg_line_length": 20.399999618530273, "blob_id": "e826223be3a0356cffeaaf86cb6361c02c833968", "content_id": "2301721d701e1e058b6f25f0f27b3b6a13fcd98d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/backend/app/hello_world/views.py", "repo_name": "cuongpianna/PIML__CV", "src_encoding": "UTF-8", "text": "from app.hello_world import bp\n\[email protected]('/hello', methods=['GET'])\ndef hello():\n return 'Hello world'\n" }, { "alpha_fraction": 0.6660808324813843, "alphanum_fraction": 0.6871704459190369, "avg_line_length": 18.620689392089844, "blob_id": "3e28ef2c53ed861cd863c26f94351b5ed8ce83e9", "content_id": "ed6a7418dfecc54342fe867372e4ff9ab3107f37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/backend/config.py", "repo_name": "cuongpianna/PIML__CV", "src_encoding": "UTF-8", "text": "\"\"\"\nConfig for this project\n\"\"\"\n\nclass Config:\n \"\"\"\n Base config for the project\n \"\"\"\n CRSF_ENABLED = True\n SECRET_KEY = 'djkqu893u189dklasdklaj'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\nclass DevelopementConfig(Config):\n\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:123456@localhost/pyml'\n\n\nclass ProductionConfig(Config):\n DEBUG = False\n SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:123456@localhost/pyml'\n\n\nconfig = {\n 'development': DevelopementConfig,\n 'default': DevelopementConfig,\n 'production': ProductionConfig\n}\n" }, { "alpha_fraction": 0.737864077091217, "alphanum_fraction": 0.737864077091217, "avg_line_length": 13.571428298950195, "blob_id": "7e7e69f6a6a204ba35ee781671b16424e2d7c4f9", "content_id": "8a24d6cf933dcb526ef6da2d336a2af933af0cab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/backend/app/helpers/extensions.py", "repo_name": "cuongpianna/PIML__CV", "src_encoding": "UTF-8", "text": "\"\"\"\nRegister extensions for flask app\n\"\"\"\n\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\n" }, { "alpha_fraction": 0.7120000123977661, "alphanum_fraction": 0.7120000123977661, "avg_line_length": 12.777777671813965, "blob_id": "b7bd7a36c2926f527e73bc822995897542d081e8", "content_id": "8eb8c28d66764d0578d62a7bc71812501c318909", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/backend/app/hello_world/models.py", "repo_name": "cuongpianna/PIML__CV", "src_encoding": "UTF-8", "text": "\n\"\"\"\nRegister models of hello_world module here\n\"\"\"\n\nfrom app.helpers.extensions import db\n\n\nclass Hello(db.Model):\n pass\n" }, { "alpha_fraction": 0.6816092133522034, "alphanum_fraction": 0.6816092133522034, "avg_line_length": 22.513513565063477, "blob_id": "c005021ec1dad8261da43a16143808c486a860a3", "content_id": "42563fbd7ebe9e9a4296898ec869e884e01b70c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/backend/app/__init__.py", "repo_name": "cuongpianna/PIML__CV", "src_encoding": "UTF-8", "text": "\"\"\"\nManage extensions and return the flask app (followed factory pattern)\n\"\"\"\nfrom flask import Flask\nfrom flask_cors import CORS\n\n\nfrom config import config\nfrom app.helpers.extensions import db\n\n\ndef create_app(config_name):\n \"\"\"\n Manage flask app object and return this\n :param config_name: The name of config. Ex: development, production, default\n :return: Flask app\n \"\"\"\n\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n register_extensions(app)\n\n \"Register views fucntion\"\n from app.hello_world import bp as hello_world_blueprint\n app.register_blueprint(hello_world_blueprint)\n return app\n\n\ndef register_extensions(app):\n \"\"\"\n Register extensions for flask app\n :param app: The flask app\n :return: app\n \"\"\"\n db.init_app(app)\n CORS(app, resources={r\"*\": {\"origins\": \"*\"}})\n return app\n" }, { "alpha_fraction": 0.6984127163887024, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 13, "blob_id": "f41337c9c52dca5587b5901ae005a5e2a7df360e", "content_id": "792d4dd4d2561b57cd271a58ba95a7dbe9e79683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 33, "num_lines": 9, "path": "/backend/app/hello_world/__init__.py", "repo_name": "cuongpianna/PIML__CV", "src_encoding": "UTF-8", "text": "\"\"\"\nModule hello_world\n\"\"\"\n\nfrom flask import Blueprint\n\n\nbp = Blueprint('posts', __name__)\nfrom app.hello_world import views\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 9.666666984558105, "blob_id": "3a38013f5edd321e2274b7731927f2db9850f05b", "content_id": "9cb5629dc8f08e891486555751d6d7f1674426f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/backend/app/hello_world/serializers.py", "repo_name": "cuongpianna/PIML__CV", "src_encoding": "UTF-8", "text": "\"\"\"\nValidate models\n\"\"\"\n\nfrom app.hello_world.models import Hello\n\n\nclass HelloSchema:\n pass\n" } ]
7
jikhanjung/spiralia
https://github.com/jikhanjung/spiralia
014cee2511c16517d3c1533c280c0d680c6b00a2
1d7c4f23f2ad60e85d5af12ac144fd89039bda40
fb4ead288d91bd1e03e36c18f12a83d07e72b313
refs/heads/master
2020-03-25T22:32:08.312771
2018-09-03T14:22:46
2018-09-03T14:22:46
144,227,735
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.39575088024139404, "alphanum_fraction": 0.49993929266929626, "avg_line_length": 33.844329833984375, "blob_id": "f92d8a8c1a467333c359a0e4894b69d46ef4c143", "content_id": "be17dc0dd91620ce9c5d6971c840c118485dcfd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41185, "license_type": "no_license", "max_line_length": 256, "num_lines": 1182, "path": "/contour_simplifier.py", "repo_name": "jikhanjung/spiralia", "src_encoding": "UTF-8", "text": "# import numpy as np\nimport cv2\n# import contour_pair_list\n\n\nSCALE_FACTOR = 100.0\nCONTOUR_POINT_COUNT = 16\nret_str = \"\"\n\ncoord_correction = []\nfor i in range(18):\n coord_correction.append([-1028, -774])\n\ncoord_correction[4][1] += -2\ncoord_correction[5][1] += 0\ncoord_correction[6][1] += 0\ncoord_correction[7][1] += 5\ncoord_correction[8][1] += 10\ncoord_correction[9][1] += 0\n\nall_contours = []\nall_centroids = []\nfor i in range(18):\n num = '0' + str(i+1)\n filename = 'images/Spiriferella-spiralia-' + num[-2:]\n print(filename)\n im = cv2.imread(filename + '.png')\n\n imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(imgray, 127, 255, 0)\n image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n j = 0\n centroids_in_a_section = []\n contours_in_a_section = []\n x_correction = coord_correction[i][0]\n y_correction = coord_correction[i][1]\n\n for c in contours:\n max_x = [0, 0]\n min_x = [9999, 9999]\n idx = 0\n max_idx = -1\n min_idx = -1\n num_point = CONTOUR_POINT_COUNT\n idx_list = []\n for pt in c:\n if pt[0][0] > max_x[0]:\n max_x = pt[0]\n max_idx = idx\n if pt[0][0] < min_x[0]:\n min_x = pt[0]\n min_idx = idx\n idx += 1\n idx_diff1 = int((max_idx - min_idx) / (num_point / 2))\n idx_diff2 = int((min_idx + (len(c) - max_idx)) / (num_point / 2))\n for k in range(int(num_point / 2)):\n idx_list.append(int(min_idx + idx_diff1 * k))\n for k in range(int(num_point / 2)):\n idx_list.append(int(max_idx + idx_diff2 * k) % len(c))\n # print(max_x, min_x, max_idx, min_idx, idx_list, len(c))\n simp_cont = []\n for k in range(num_point):\n simp_cont.append([c[idx_list[k]][0][0] + x_correction, -1 * (c[idx_list[k]][0][1] + y_correction), (i - 9) * 40.3])\n # print(simp_cont)\n\n contours_in_a_section.append(simp_cont)\n # for idx in range(len(simp_cont)):\n # cv2.putText(im,str(idx) ,(simp_cont[idx][0],simp_cont[idx][1]),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,255)\n\n M = cv2.moments(c)\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n centroids_in_a_section.append([cx + x_correction, -1 * (cy + y_correction), (i - 9) * 40.3])\n verts = []\n for v in simp_cont:\n verts.append(\"(\" + \",\".join([str(coord) for coord in v]) + \")\")\n # contour_list.append( \"[\" + \",\\n\".join( verts ) + \"]\" )\n j += 1\n\n all_centroids.append(centroids_in_a_section)\n all_contours.append(contours_in_a_section)\n\n# convert contours into vertices\nall_contours_str_list = []\nfor contours_in_a_section in all_contours:\n contour_str_list = []\n for contour in contours_in_a_section:\n v_str_list = []\n for v in contour:\n # print(\"v:\",v)\n v_str = \"(\" + \",\".join([str(x/SCALE_FACTOR) for x in v]) + \")\"\n v_str_list.append(v_str)\n contour_str = \"[\" + \",\".join(v_str_list) + \"]\"\n # print( \"contour_str:\", contour_str )\n contour_str_list.append(contour_str)\n contours_in_a_section_str = \"[\" + \",\\n\".join(contour_str_list) + \"]\"\n all_contours_str_list.append(contours_in_a_section_str)\nret_str += \"contour_list=[\" + \",\\n\".join(all_contours_str_list) + \"]\\n\"\n# ret_str += \"centroids=[\" + \",\\n\".join(centroids)+\"]\\n\"\n\n# convert centroids into vertices\nall_centroids_str_list = []\n\nfor centroids_in_a_section in all_centroids:\n centroids_str_list = []\n centroids_in_a_section_str = \"\"\n for centroid in centroids_in_a_section:\n centroid_str = \"(\" + \",\".join([str(x/SCALE_FACTOR) for x in centroid]) + \")\"\n # print(centroid_str)\n centroids_str_list.append(centroid_str)\n centroids_in_a_section_str = \"[\" + \",\\n\".join(centroids_str_list) + \"]\"\n all_centroids_str_list.append(centroids_in_a_section_str)\nret_str += \"centroid_list=[\" + \",\".join(all_centroids_str_list) + \"]\\n\"\n\n\nret_str += '''\nimport bpy\nimport bmesh\nimport math\n#import contour_pair_list\n\nSCALE_FACTOR = 100.0\nSPIRALIA_RADIUS = 500 / SCALE_FACTOR\n\nmesh = bpy.data.meshes.new(\"mesh\") # add a new mesh\nobj = bpy.data.objects.new(\"MyObject\", mesh) # add a new object using the mesh\n\nscene = bpy.context.scene\nscene.objects.link(obj) # put the object into the scene (link)\nscene.objects.active = obj # set as the active object in the scene\nobj.select = True # select object\n\nmesh = bpy.context.object.data\nbm = bmesh.new()\n\ncontour_pair_list = [\n[[1,0],[2,3]],\n[[2,0],[3,3]],\n[[2,1],[3,4]],\n[[2,2],[3,6]],\n[[2,2],[3,5]],\n[[2,3],[3,8]],\n[[2,3],[3,9]],\n[[2,5],[3,10]],\n[[2,5],[3,12]],\n[[2,4],[3,13]],\n[[2,6],[3,14]],\n[[2,7],[3,15]],\n[[3,0],[4,0]],\n[[3,1],[4,1]],\n[[3,2],[4,2]],\n[[3,3],[4,5]],\n[[3,3],[4,3]],\n[[3,4],[4,7]],\n[[3,6],[4,10]],\n[[3,8],[4,11]],\n[[3,10],[4,13]],\n[[3,13],[4,16]],\n[[3,14],[4,19]],\n[[3,16],[4,22]],\n[[3,17],[4,24]],\n[[3,4],[4,6]],\n[[3,5],[4,8]],\n[[3,9],[4,12]],\n[[3,12],[4,17]],\n[[3,15],[4,23]],\n[[3,20],[4,27]],\n[[3,22],[4,29]],\n[[4,0 ],[5,2 ]],\n[[4,0 ],[5,0 ]],\n[[4,1 ],[5,6 ]],\n[[4,1 ],[5,1 ]],\n[[4,2 ],[5,8 ]],\n[[4,2 ],[5,4 ]],\n[[4,5 ],[5,10]],\n[[4,7 ],[5,14]],\n[[4,10],[5,18]],\n[[4,11],[5,20]],\n[[4,13],[5,22]],\n[[4,16],[5,25]],\n[[4,19],[5,27]],\n[[4,22],[5,30]],\n[[4,24],[5,33]],\n[[4,26],[5,34]],\n[[4,3 ],[5,7 ]],\n[[4,6 ],[5,11]],\n[[4,8 ],[5,17]],\n[[4,12],[5,21]],\n[[4,17],[5,29]],\n[[4,23],[5,32]],\n[[4,27],[5,35]],\n[[4,29],[5,37]],\n[[4,30],[5,38]],\n[[5,2 ],[6,3 ]],\n[[5,6 ],[6,6 ]],\n[[5,8 ],[6,11]],\n[[5,10],[6,14]],\n[[5,14],[6,17]],\n[[5,18],[6,20]],\n[[5,20],[6,23]],\n[[5,22],[6,24]],\n[[5,25],[6,28]],\n[[5,27],[6,29]],\n[[5,30],[6,33]],\n[[5,33],[6,36]],\n[[5,34],[6,37]],\n[[5,0 ],[6,0 ]],\n[[5,1 ],[6,1 ]],\n[[5,4 ],[6,5 ]],\n[[5,7 ],[6,10]],\n[[5,11],[6,15]],\n[[5,17],[6,22]],\n[[5,21],[6,26]],\n[[5,29],[6,32]],\n[[5,32],[6,35]],\n[[5,35],[6,38]],\n[[5,37],[6,40]],\n[[5,38],[6,41]],\n[[6,0 ],[7,1 ]],\n[[6,1 ],[7,4 ]],\n[[6,5 ],[7,11]],\n[[6,10],[7,17]],\n[[6,15],[7,23]],\n[[6,22],[7,27]],\n[[6,26],[7,31]],\n[[6,32],[7,36]],\n[[6,35],[7,39]],\n[[6,38],[7,42]],\n[[6,40],[7,43]],\n[[6,41],[7,44]],\n[[6,42],[7,45]],\n[[6,3 ],[7,6 ]],\n[[6,6 ],[7,10]],\n[[6,11],[7,13]],\n[[6,14],[7,16]],\n[[6,17],[7,20]],\n[[6,20],[7,24]],\n[[6,23],[7,25]],\n[[6,24],[7,29]],\n[[6,28],[7,30]],\n[[6,29],[7,34]],\n[[6,33],[7,35]],\n[[6,36],[7,38]],\n[[6,37],[7,40]],\n[[7,1 ],[8,1]],\n[[7,4 ],[8,5]],\n[[7,11],[8,11]],\n[[7,17],[8,17]],\n[[7,23],[8,22]],\n[[7,27],[8,26]],\n[[7,31],[8,29]],\n[[7,36],[8,34]],\n[[7,39],[8,38]],\n[[7,42],[8,42]],\n[[7,43],[8,43]],\n[[7,44],[8,44]],\n[[7,45],[8,45]],\n[[7,6 ],[8,6]],\n[[7,10],[8,9]],\n[[7,13],[8,13]],\n[[7,16],[8,16]],\n[[7,20],[8,19]],\n[[7,24],[8,23]],\n[[7,25],[8,25]],\n[[7,29],[8,27]],\n[[7,30],[8,31]],\n[[7,34],[8,33]],\n[[7,35],[8,35]],\n[[7,38],[8,39]],\n[[7,40],[8,40]],\n[[8,6 ],[9,5 ]],\n[[8,9 ],[9,9 ]],\n[[8,13],[9,13]],\n[[8,16],[9,15]],\n[[8,19],[9,19]],\n[[8,23],[9,22]],\n[[8,25],[9,25]],\n[[8,27],[9,28]],\n[[8,31],[9,29]],\n[[8,33],[9,33]],\n[[8,35],[9,36]],\n[[8,39],[9,37]],\n[[8,40],[9,40]],\n[[8,1 ],[9,1 ]],\n[[8,5 ],[9,6 ]],\n[[8,11],[9,12]],\n[[8,17],[9,18]],\n[[8,22],[9,23]],\n[[8,26],[9,26]],\n[[8,29],[9,31]],\n[[8,34],[9,35]],\n[[8,38],[9,39]],\n[[8,42],[9,41]],\n[[8,43],[9,42]],\n[[8,44],[9,43]],\n[[8,45],[9,44]],\n[[9,5 ],[10,6 ]],\n[[9,9 ],[10,8 ]],\n[[9,13],[10,12]],\n[[9,15],[10,15]],\n[[9,19],[10,18]],\n[[9,22],[10,22]],\n[[9,25],[10,24]],\n[[9,28],[10,27]],\n[[9,29],[10,30]],\n[[9,33],[10,32]],\n[[9,36],[10,35]],\n[[9,37],[10,38]],\n[[9,40],[10,41]],\n[[9,1 ],[10,1 ]],\n[[9,6 ],[10,3 ]],\n[[9,12],[10,10]],\n[[9,18],[10,16]],\n[[9,23],[10,21]],\n[[9,26],[10,25]],\n[[9,31],[10,29]],\n[[9,35],[10,33]],\n[[9,39],[10,37]],\n[[9,41],[10,40]],\n[[9,42],[10,42]],\n[[9,43],[10,43]],\n[[9,44],[10,44]],\n[[10,1 ],[11,1 ]],\n[[10,3 ],[11,5 ]],\n[[10,10],[11,13]],\n[[10,16],[11,19]],\n[[10,21],[11,23]],\n[[10,25],[11,27]],\n[[10,29],[11,32]],\n[[10,33],[11,35]],\n[[10,37],[11,39]],\n[[10,40],[11,42]],\n[[10,42],[11,43]],\n[[10,43],[11,44]],\n[[10,6 ],[11,3 ]],\n[[10,8 ],[11,6 ]],\n[[10,12],[11,9 ]],\n[[10,15],[11,12]],\n[[10,18],[11,15]],\n[[10,22],[11,18]],\n[[10,24],[11,22]],\n[[10,27],[11,26]],\n[[10,30],[11,28]],\n[[10,32],[11,31]],\n[[10,35],[11,33]],\n[[10,38],[11,36]],\n[[10,41],[11,40]],\n[[11,1 ],[12,1 ]],\n[[11,5 ],[12,5 ]],\n[[11,13],[12,12]],\n[[11,19],[12,19]],\n[[11,23],[12,23]],\n[[11,27],[12,26]],\n[[11,32],[12,30]],\n[[11,35],[12,35]],\n[[11,39],[12,39]],\n[[11,42],[12,41]],\n[[11,43],[12,43]],\n[[11,44],[12,44]],\n[[11,3 ],[12,3 ]],\n[[11,6 ],[12,6 ]],\n[[11,9 ],[12,9 ]],\n[[11,12],[12,11]],\n[[11,15],[12,15]],\n[[11,18],[12,18]],\n[[11,22],[12,22]],\n[[11,26],[12,25]],\n[[11,28],[12,28]],\n[[11,31],[12,31]],\n[[11,33],[12,34]],\n[[11,36],[12,37]],\n[[11,40],[12,40]],\n[[11,41],[12,42]],\n[[12,3 ],[13,4]],\n[[12,6 ],[13,7]],\n[[12,9 ],[13,10]],\n[[12,11],[13,13]],\n[[12,15],[13,17]],\n[[12,18],[13,21]],\n[[12,22],[13,24]],\n[[12,25],[13,28]],\n[[12,28],[13,30]],\n[[12,31],[13,34]],\n[[12,34],[13,38]],\n[[12,37],[13,39]],\n[[12,40],[13,40]],\n[[12,1 ],[13,1 ]],\n[[12,5 ],[13,6 ]],\n[[12,12],[13,11]],\n[[12,19],[13,16]],\n[[12,23],[13,20]],\n[[12,26],[13,25]],\n[[12,30],[13,29]],\n[[12,35],[13,33]],\n[[12,39],[13,36]],\n[[12,41],[13,38]],\n[[12,43],[13,39]],\n[[12,44],[13,40]],\n[[13,1 ],[14,4 ]],\n[[13,6 ],[14,6 ]],\n[[13,11],[14,9 ]],\n[[13,16],[14,12]],\n[[13,20],[14,15]],\n[[13,25],[14,19]],\n[[13,29],[14,21]],\n[[13,33],[14,24]],\n[[13,36],[14,27]],\n[[13,38],[14,28]],\n[[13,39],[14,29]],\n[[13,7 ],[14,4 ]],\n[[13,10],[14,6 ]],\n[[13,13],[14,9 ]],\n[[13,17],[14,12]],\n[[13,21],[14,15]],\n[[13,24],[14,19]],\n[[13,28],[14,21]],\n[[13,30],[14,24]],\n[[13,34],[14,27]],\n[[3,7],[4,9]],\n[[3,11],[4,15]],\n[[3,18],[4,14]],\n[[3,18],[4,20]],\n[[3,19],[4,18]],\n[[3,21],[4,25]],\n[[4,4],[5,12]],\n[[4,9],[5,13]],\n[[4,9],[5,16]],\n[[4,15],[5,15]],\n[[4,15],[5,24]],\n[[4,14],[5,19]],\n[[4,20],[5,28]],\n[[4,18],[5,23]],\n[[4,25],[5,31]],\n[[4,21],[5,26]],\n[[4,28],[5,36]],\n[[5,3],[6,4]],\n[[5,5],[6,8]],\n[[5,9],[6,7]],\n[[5,9],[6,12]],\n[[5,12],[6,9]],\n[[5,12],[6,16]],\n[[5,13],[6,13]],\n[[5,16],[6,19]],\n[[5,15],[6,18]],\n[[5,24],[6,27]],\n[[5,19],[6,21]],\n[[5,28],[6,31]],\n[[5,23],[6,25]],\n[[5,31],[6,34]],\n[[5,26],[6,30]],\n[[5,36],[6,39]],\n[[6,2],[7,0]],\n[[6,2],[7,3]],\n[[6,4],[7,2]],\n[[6,4],[7,8]],\n[[6,8],[7,5]],\n[[6,8],[7,12]],\n[[6,7],[7,7]],\n[[6,12],[7,15]],\n[[6,9],[7,9]],\n[[6,16],[7,18]],\n[[6,13],[7,14]],\n[[6,19],[7,22]],\n[[6,18],[7,19]],\n[[6,27],[7,28]],\n[[6,21],[7,21]],\n[[6,31],[7,33]],\n[[6,25],[7,26]],\n[[6,34],[7,37]],\n[[6,30],[7,32]],\n[[6,39],[7,41]],\n[[7, 0],[8, 0]],\n[[7, 3],[8, 4]],\n[[7, 2],[8, 2]],\n[[7, 8],[8, 8]],\n[[7, 5],[8, 3]],\n[[7,12],[8,12]],\n[[7, 7],[8,7 ]],\n[[7,15],[8,14]],\n[[7, 9],[8,10]],\n[[7,18],[8,18]],\n[[7,14],[8,15]],\n[[7,22],[8,21]],\n[[7,19],[8,20]],\n[[7,28],[8,28]],\n[[7,21],[8,24]],\n[[7,33],[8,32]],\n[[7,26],[8,30]],\n[[7,37],[8,37]],\n[[7,32],[8,36]],\n[[7,41],[8,41]],\n[[8, 0],[9, 0]],\n[[8, 4],[9, 4]],\n[[8, 2],[9, 2]],\n[[8, 8],[9, 8]],\n[[8, 3],[9, 3]],\n[[8,12],[9,11]],\n[[8,7 ],[9, 7]],\n[[8,14],[9,14]],\n[[8,10],[9,10]],\n[[8,18],[9,17]],\n[[8,15],[9,16]],\n[[8,21],[9,21]],\n[[8,20],[9,20]],\n[[8,28],[9,27]],\n[[8,24],[9,24]],\n[[8,32],[9,30]],\n[[8,30],[9,32]],\n[[8,37],[9,34]],\n[[8,41],[9,38]],\n[[9, 0],[10, 0]],\n[[9, 2],[10, 2]],\n[[9, 3],[10, 5]],\n[[9, 7],[10, 9]],\n[[9,10],[10,13]],\n[[9,16],[10,19]],\n[[9,20],[10,23]],\n[[9,24],[10,28]],\n[[9,32],[10,36]],\n[[9, 4],[10, 4]],\n[[9, 8],[10, 7]],\n[[9,11],[10,11]],\n[[9,14],[10,14]],\n[[9,17],[10,17]],\n[[9,21],[10,20]],\n[[9,27],[10,26]],\n[[9,30],[10,31]],\n[[9,34],[10,34]],\n[[9,38],[10,39]],\n[[10, 0],[11, 0]],\n[[10, 2],[11, 2]],\n[[10, 5],[11, 7]],\n[[10, 9],[11,10]],\n[[10,13],[11,16]],\n[[10,19],[11,21]],\n[[10,23],[11,24]],\n[[10,28],[11,30]],\n[[10,36],[11,37]],\n[[10, 4],[11, 4]],\n[[10, 7],[11, 8]],\n[[10,11],[11,11]],\n[[10,14],[11,14]],\n[[10,17],[11,17]],\n[[10,20],[11,20]],\n[[10,26],[11,25]],\n[[10,31],[11,29]],\n[[10,34],[11,34]],\n[[10,39],[11,38]],\n[[11, 0],[12, 0]],\n[[11, 2],[12, 2]],\n[[11, 7],[12, 8]],\n[[11,10],[12,13]],\n[[11,16],[12,16]],\n[[11,21],[12,21]],\n[[11,24],[12,27]],\n[[11,30],[12,32]],\n[[11,37],[12,38]],\n[[11, 4],[12, 4]],\n[[11, 8],[12, 7]],\n[[11,11],[12,10]],\n[[11,14],[12,14]],\n[[11,17],[12,17]],\n[[11,20],[12,20]],\n[[11,25],[12,24]],\n[[11,29],[12,29]],\n[[11,34],[12,33]],\n[[11,38],[12,36]],\n[[12, 0],[13, 0]],\n[[12, 2],[13, 2]],\n[[12, 8],[13, 9]],\n[[12,13],[13,14]],\n[[12,16],[13,18]],\n[[12,21],[13,22]],\n[[12,27],[13,27]],\n[[12,32],[13,32]],\n[[12,38],[13,37]],\n[[12, 4],[13, 3]],\n[[12, 7],[13, 5]],\n[[12,10],[13, 8]],\n[[12,14],[13,12]],\n[[12,17],[13,15]],\n[[12,20],[13,19]],\n[[12,24],[13,23]],\n[[12,29],[13,26]],\n[[12,33],[13,31]],\n[[12,36],[13,35]],\n[[13, 0],[14, 0]],\n[[13, 2],[14, 3]],\n[[13, 9],[14, 7]],\n[[13,14],[14,10]],\n[[13,18],[14,13]],\n[[13,22],[14,17]],\n[[13,27],[14,20]],\n[[13,32],[14,23]],\n[[13,37],[14,26]],\n[[13, 3],[14, 1]],\n[[13, 5],[14, 2]],\n[[13, 8],[14, 5]],\n[[13,12],[14, 8]],\n[[13,15],[14,11]],\n[[13,19],[14,14]],\n[[13,23],[14,16]],\n[[13,26],[14,18]],\n[[13,31],[14,22]],\n[[13,35],[14,25]],\n[[14, 0],[15, 0]],\n[[14, 3],[15, 2]],\n[[14, 7],[15, 5]],\n[[14,10],[15, 6]],\n[[14,13],[15, 8]],\n[[14,17],[15,11]],\n[[14,20],[15,13]],\n[[14,23],[15,15]],\n[[14,26],[15,16]],\n[[14, 2],[15, 1]],\n[[14, 5],[15, 3]],\n[[14, 8],[15, 4]],\n[[14,11],[15, 7]],\n[[14,14],[15, 9]],\n[[14,16],[15,10]],\n[[14,18],[15,12]],\n[[14,22],[15,14]],\n[[14,25],[15,16]],\n[[15, 0],[16, 0]],\n[[15, 2],[16, 3]],\n[[15, 5],[16, 5]],\n[[15, 6],[16, 7]],\n[[15, 8],[16, 9]],\n[[15,11],[16,11]],\n[[15,13],[16,12]],\n[[15,15],[16,13]],\n[[15, 1],[16, 1]],\n[[15, 3],[16, 2]],\n[[15, 4],[16, 4]],\n[[15, 7],[16, 6]],\n[[15, 9],[16, 8]],\n[[15,10],[16,10]],\n[[15,12],[16,12]],\n[[15,14],[16,13]],\n[[16, 0],[17,0]],\n[[16, 3],[17,2]],\n[[16, 5],[17,5]],\n[[16, 7],[17,6]],\n[[16, 9],[17,7]],\n[[16,11],[17,8]],\n[[16, 1],[17,1]],\n[[16, 2],[17,3]],\n[[16, 4],[17,4]],\n[[16, 6],[17,6]],\n[[16, 8],[17,7]],\n[[16,10],[17,8]],\n[[17,0],[18,0]],\n[[17,2],[18,2]],\n[[17,5],[18,3]],\n[[17,6],[18,4]],\n[[17,1],[18,1]],\n[[17,3],[18,2]],\n[[17,4],[18,3]]\n]\n\n#centroid links\n#centoid_vertex_list = []\n#for cs in centroids:\n# centroids_in_a_layer = []\n# for v in cs:\n# centroids_in_a_layer.append(bm.verts.new(v))\n# centoid_vertex_list.append(centroids_in_a_layer)\n\n\n# contour \nall_contour_model = []\nsection_index = 0\nfor contours_in_a_section in contour_list:\n contour_model_in_a_section = []\n\n contour_index = 0\n for contour in contours_in_a_section:\n contour_model = { 'index':[section_index+1,contour_index],'v':[], 'e':[], 'bottom':True, \n 'top':True, 'above':[], 'below':[], 'coords':[], 'branching_processed': False, 'centroid':[], \n 'contour_length':-1, 'fused_top':False, 'fused_bottom': False }\n contour_model['contour_length'] = len( contour )\n for v in contour:\n contour_model['v'].append( bm.verts.new(v) )\n contour_model['coords'].append( v )\n for i in range( len( contour_model['v'] ) ):\n contour_model['e'].append( bm.edges.new( ( contour_model['v'][i], \n contour_model['v'][(i+1)%len(contour_model['v'])] ) ) )\n contour_model['centroid'] = centroid_list[section_index][contour_index]\n contour_model_in_a_section.append( contour_model )\n contour_index += 1\n all_contour_model.append( contour_model_in_a_section )\n section_index += 1\n\n# contour model connectivity processing\nfor contour_pair in contour_pair_list:\n contouridx1 = contour_pair[0]\n contouridx2 = contour_pair[1]\n \n contour1 = all_contour_model[contouridx1[0]-1][contouridx1[1]]\n contour2 = all_contour_model[contouridx2[0]-1][contouridx2[1]]\n contour1['above'].append( contour2 )\n contour2['below'].append( contour1 )\n # print( contour1['index'], contour2['index'] )\n\n# check if fused\nfor contours_in_a_section in all_contour_model:\n for contour in contours_in_a_section:\n if len( contour['below'] ) == 2: \n contour['fused_top'] = True\n # print( \"fused when checked from below 1\", contour['index'], [ c['index'] for c in contour['below'] ] )\n if len( contour['below'] ) == 1 and contour['below'][0]['fused_top'] == True: \n contour['fused_top'] = True\n # print( \"fused when checked from below 2\", contour['index'], [ c['index'] for c in contour['below'] ] )\n if len( contour['above'] ) == 2:\n contour['fused_bottom'] = True\n # print( \"fused when checked from above 1\", contour['index'], [ c['index'] for c in contour['above'] ] )\n temp = contour\n while len( temp['below'] ) == 1:\n temp = temp['below'][0]\n temp['fused_bottom'] = True\n # print( \"fused when checked from above 2\", temp['index'], [ c['index'] for c in contour['above'] ] )\n \n #print( contour1['index'], contour2['index'] )\n\ndef get_distance( coord1, coord2 = [0,0,0]):\n x_diff = coord1[0] - coord2[0]\n y_diff = coord1[1] - coord2[1]\n z_diff = coord1[2] - coord2[2]\n sum_diff_squared = x_diff*x_diff + y_diff*y_diff + z_diff*z_diff\n distance = math.sqrt(sum_diff_squared) \n return distance\n \n# close top and bottom\nfor contour_model_in_a_section in all_contour_model:\n for contour_model in contour_model_in_a_section:\n if len( contour_model['below'] ) == 0:\n if contour_model['fused_bottom'] == False:\n face1 = bm.faces.new( tuple( reversed( contour_model['v'] ) ) )\n print( \"bottom not fused\", contour_model['index'], [ x['index'] for x in contour_model['above'] ] )\n else:\n # print( \"fused bottom\", contour_model['index'], [ x['index'] for x in contour_model['above'] ] )\n above_centroid = [0,0,0]\n above_left = [0,0,0]\n above_right = [0,0,0]\n #print( contour_model['index'] )\n left_idx = 0\n right_idx = int(contour_model['contour_length']/2)\n #print( left_idx, right_idx )\n \n if len( contour_model['above'] ) == 2:\n above1 = contour_model['above'][0]\n above2 = contour_model['above'][1]\n if above1['coords'][0][0] > above2['coords'][0][0] :\n temp = above2\n above2 = above1\n above1 = temp\n above_centroid = list( above1['centroid'] ).copy()\n for i in range(3):\n above_centroid[i] = ( above1['centroid'][i] + above2['centroid'][i] ) / 2\n above_left = above1['coords'][0]\n above_right = above2['coords'][int(above2['contour_length']/2)]\n else:\n above_centroid = contour_model['above'][0]['centroid']\n #print( \"top cover\", contour_model['index'] )\n #print( \"current centroid, below centroid\", contour_model['centroid'], below_centroid )\n \n curr_left = contour_model['coords'][0]\n curr_centroid = contour_model['centroid']\n curr_right = contour_model['coords'][int(contour_model['contour_length']/2)]\n \n left_dist = get_distance( curr_left, curr_centroid )\n right_dist = get_distance( curr_right, curr_centroid )\n mean_dist = ( left_dist + right_dist ) / 2\n #print( \"left, right, mean dist\", left_dist, right_dist, mean_dist )\n sin_theta = mean_dist / SPIRALIA_RADIUS\n theta = math.asin( sin_theta )\n z_diff = SPIRALIA_RADIUS - SPIRALIA_RADIUS * math.cos( theta )\n #print( \"sin theta, theta, z_diff\", sin_theta, theta, z_diff )\n \n delta_centroid = []\n for k in range(3):\n delta_centroid.append( ( contour_model['centroid'][k] - above_centroid[k] ) * (z_diff / (40.3/SCALE_FACTOR)) )\n #delta_centroid = [ contour_model['centroid'][x] - above_centroid[x] for x in range(3) ]\n #delta_centroid[2] = z_diff\n \n #delta_left = [ contour_model['coords'][0][x] - below_left[x] for x in range(3) ]\n #delta_right = [ contour_model['coords'][int(contour_model['contour_length']/2)][x] - below_right[x] for x in range(3) ]\n \n new_centroid = [ contour_model['centroid'][x] + delta_centroid[x] for x in range(3) ] \n #new_left = [ contour_model['coords'][0][x] + delta_left[x] for x in range(3) ] \n #new_right = [ contour_model['coords'][int(contour_model['contour_length']/2)][x] + delta_right[x] for x in range(3) ]\n \n #print( below_centroid, below_left, below_right )\n #print( contour_model['centroid'], contour_model['coords'][0], contour_model['coords'][int(contour_model['contour_length']/2)] )\n #print( delta_centroid, delta_left, delta_right )\n #print( new_centroid, new_left, new_right )\n \n v_centroid = bm.verts.new(new_centroid)\n #v_left = bm.verts.new(new_left) \n #v_right = bm.verts.new(new_right) \n #contour_model['e'].append( bm.edges.new( ( v_left, v_centroid ) ) )\n #contour_model['e'].append( bm.edges.new( ( v_right, v_centroid ) ) )\n for i in range( contour_model['contour_length'] ):\n face1 = bm.faces.new( ( contour_model['v'][i], v_centroid, contour_model['v'][(i+1)%contour_model['contour_length']] ) )\n\n elif len( contour_model['above'] ) == 0:\n if contour_model['fused_top'] == False:\n face1 = bm.faces.new( tuple( contour_model['v'] ) )\n print( \"top not fused\", contour_model['index'], [ x['index'] for x in contour_model['below'] ] )\n else:\n #print( \"fused\", contour_model['index'], [ x['index'] for x in contour_model['below'] ] )\n below_centroid = [0,0,0]\n below_left = [0,0,0]\n below_right = [0,0,0]\n #print( contour_model['index'] )\n left_idx = 0\n right_idx = int(contour_model['contour_length']/2)\n #print( left_idx, right_idx )\n \n if len( contour_model['below'] ) == 2:\n below1 = contour_model['below'][0]\n below2 = contour_model['below'][1]\n if below1['coords'][0][0] > below2['coords'][0][0] :\n temp = below2\n below2 = below1\n below1 = temp\n below_centroid = list( below1['centroid'] ).copy()\n for i in range(3):\n below_centroid[i] = ( below1['centroid'][i] + below2['centroid'][i] ) / 2\n below_left = below1['coords'][0]\n below_right = below2['coords'][int(below2['contour_length']/2)]\n else:\n below_centroid = contour_model['below'][0]['centroid']\n #print( \"top cover\", contour_model['index'] )\n #print( \"current centroid, below centroid\", contour_model['centroid'], below_centroid )\n \n curr_left = contour_model['coords'][0]\n curr_centroid = contour_model['centroid']\n curr_right = contour_model['coords'][int(contour_model['contour_length']/2)]\n \n left_dist = get_distance( curr_left, curr_centroid )\n right_dist = get_distance( curr_right, curr_centroid )\n mean_dist = ( left_dist + right_dist ) / 2\n # print( \"left, right, mean dist\", left_dist, right_dist, mean_dist )\n \n sin_theta = mean_dist / SPIRALIA_RADIUS\n theta = math.asin( sin_theta )\n z_diff = SPIRALIA_RADIUS - SPIRALIA_RADIUS * math.cos( theta )\n #print( \"sin theta, theta, z_diff\", sin_theta, theta, z_diff )\n \n delta_centroid = []\n for k in range(3):\n delta_centroid.append( ( contour_model['centroid'][k] - below_centroid[k] ) * (z_diff / (40.3/SCALE_FACTOR)) )\n #delta_centroid = [ contour_model['centroid'][x] - below_centroid[x] for x in range(3) ]\n #delta_centroid[2] = z_diff\n \n #delta_left = [ contour_model['coords'][0][x] - below_left[x] for x in range(3) ]\n #delta_right = [ contour_model['coords'][int(contour_model['contour_length']/2)][x] - below_right[x] for x in range(3) ]\n \n new_centroid = [ contour_model['centroid'][x] + delta_centroid[x] for x in range(3) ] \n #new_left = [ contour_model['coords'][0][x] + delta_left[x] for x in range(3) ] \n #new_right = [ contour_model['coords'][int(contour_model['contour_length']/2)][x] + delta_right[x] for x in range(3) ]\n \n #print( below_centroid, below_left, below_right )\n #print( contour_model['centroid'], contour_model['coords'][0], contour_model['coords'][int(contour_model['contour_length']/2)] )\n #print( delta_centroid, delta_left, delta_right )\n #print( new_centroid, new_left, new_right )\n \n v_centroid = bm.verts.new(new_centroid)\n #v_left = bm.verts.new(new_left) \n #v_right = bm.verts.new(new_right) \n #contour_model['e'].append( bm.edges.new( ( v_left, v_centroid ) ) )\n #contour_model['e'].append( bm.edges.new( ( v_right, v_centroid ) ) )\n for i in range( contour_model['contour_length'] ):\n face1 = bm.faces.new( ( contour_model['v'][i], contour_model['v'][(i+1)%contour_model['contour_length']], v_centroid ) )\n \n\n# create faces\nfor contour_pair in contour_pair_list:\n contouridx1 = contour_pair[0]\n contouridx2 = contour_pair[1]\n \n contour1 = all_contour_model[contouridx1[0]-1][contouridx1[1]]\n contour2 = all_contour_model[contouridx2[0]-1][contouridx2[1]]\n\n if len( contour1['above'] ) == 1 and len( contour2['below'] ) == 1:\n for i in range( len( contour1['v'] ) ):\n idx1 = i \n idx2 = ( idx1 + 1 ) % len( contour1['v'] )\n \n #face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], contour2['v'][idx1] ) )\n #face2 = bm.faces.new( ( contour1['v'][idx2], contour2['v'][idx2], contour2['v'][idx1] ) )\n face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], contour2['v'][idx2], contour2['v'][idx1] ) )\n elif len( contour1['above'] ) == 2 and len( contour2['below'] ) == 1:\n if contour1['branching_processed'] == False:\n above1 = contour1['above'][0]\n above2 = contour1['above'][1]\n if above1['coords'][0][0] > above2['coords'][0][0] :\n temp = above2\n above2 = above1\n above1 = temp\n #print( contour1['index'], above1['index'] )\n quarter_length = int( len( contour1['v'] ) / 4 )\n for i in range( quarter_length ):\n idx1 = i \n idx2 = idx1+1\n idx3 = idx1*2\n idx4 = idx1*2+1\n idx5 = idx1*2+2\n face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], above1['v'][idx4], above1['v'][idx3] ) )\n face1 = bm.faces.new( ( contour1['v'][idx2], above1['v'][idx5], above1['v'][idx4] ) )\n for i in range( quarter_length) :\n idx1 = i +quarter_length\n idx2 = idx1+1\n idx3 = i*2\n idx4 = i*2+1\n idx5 = i*2+2\n face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], above2['v'][idx4], above2['v'][idx3] ) )\n face1 = bm.faces.new( ( contour1['v'][idx2], above2['v'][idx5], above2['v'][idx4] ) )\n for i in range( quarter_length) :\n idx1 = i +quarter_length*2\n idx2 = idx1+1\n idx3 = ( quarter_length*2 +(i*2) ) % len( contour1['v'] )\n idx4 = ( quarter_length*2 +(i*2)+1 )% len( contour1['v'] )\n idx5 = ( quarter_length*2 +(i*2)+2 ) % len( contour1['v'] ) \n face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], above2['v'][idx4], above2['v'][idx3] ) )\n face1 = bm.faces.new( ( contour1['v'][idx2], above2['v'][idx5], above2['v'][idx4] ) )\n for i in range( quarter_length) :\n idx1 = i +quarter_length*3\n idx2 = ( idx1+1 ) % len( contour1['v'] )\n idx3 = ( quarter_length*2 +(i*2) ) % len( contour1['v'] )\n idx4 = ( quarter_length*2 +(i*2)+1 )% len( contour1['v'] )\n idx5 = ( quarter_length*2 +(i*2)+2 ) % len( contour1['v'] ) \n face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], above1['v'][idx4], above1['v'][idx3] ) )\n face1 = bm.faces.new( ( contour1['v'][idx2], above1['v'][idx5], above1['v'][idx4] ) )\n idx1 = quarter_length\n idx2 = quarter_length * 3\n idx3 = quarter_length * 2\n idx4 = 0\n #print( idx1, idx2, idx3, idx4 )\n face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], above1['v'][idx3] ) )\n face1 = bm.faces.new( ( contour1['v'][idx2], contour1['v'][idx1], above2['v'][0] ) )\n contour1['branching_processed'] = True\n elif len( contour1['above'] ) == 1 and len( contour2['below'] ) == 2:\n if contour2['branching_processed'] == False:\n below1 = contour2['below'][0]\n below2 = contour2['below'][1]\n if below1['coords'][0][0] > below2['coords'][0][0] :\n temp = below2\n below2 = below1\n below1 = temp\n #print( contour2['index'], below1['index'], below2['index'] )\n quarter_length = int( len( contour1['v'] ) / 4 )\n for i in range( quarter_length ):\n idx1 = i \n idx2 = idx1+1\n idx3 = idx1*2\n idx4 = idx1*2+1\n idx5 = idx1*2+2\n #print( \"1\", idx1, idx2, idx3, idx4 )\n face1 = bm.faces.new( ( contour2['v'][idx2], contour2['v'][idx1], below1['v'][idx3], below1['v'][idx4] ) )\n face1 = bm.faces.new( ( contour2['v'][idx2], below1['v'][idx4], below1['v'][idx5] ) )\n for i in range( quarter_length) :\n idx1 = i +quarter_length\n idx2 = idx1+1\n idx3 = i*2\n idx4 = i*2+1\n idx5 = i*2+2\n #print( \"2\", idx1, idx2, idx3, idx4, idx5 )\n face1 = bm.faces.new( ( contour2['v'][idx2], contour2['v'][idx1], below2['v'][idx3], below2['v'][idx4] ) )\n face1 = bm.faces.new( ( contour2['v'][idx2], below2['v'][idx4], below2['v'][idx5] ) )\n for i in range( quarter_length) :\n idx1 = i +quarter_length*2\n idx2 = idx1+1\n idx3 = ( quarter_length*2 +(i*2) ) % len( contour1['v'] )\n idx4 = ( quarter_length*2 +(i*2)+1 )% len( contour1['v'] )\n idx5 = ( quarter_length*2 +(i*2)+2 ) % len( contour1['v'] ) \n #print( \"3\", idx1, idx2, idx3, idx4, idx5 )\n face1 = bm.faces.new( ( contour2['v'][idx2], contour2['v'][idx1], below2['v'][idx3], below2['v'][idx4] ) )\n face1 = bm.faces.new( ( contour2['v'][idx2], below2['v'][idx4], below2['v'][idx5] ) )\n for i in range( quarter_length) :\n idx1 = i +quarter_length*3\n idx2 = ( idx1+1 ) % len( contour1['v'] )\n idx3 = ( quarter_length*2 +(i*2) ) % len( contour1['v'] )\n idx4 = ( quarter_length*2 +(i*2)+1 )% len( contour1['v'] )\n idx5 = ( quarter_length*2 +(i*2)+2 ) % len( contour1['v'] ) \n #print( \"4\", idx1, idx2, idx3, idx4, idx5 )\n face1 = bm.faces.new( ( contour2['v'][idx2], contour2['v'][idx1], below1['v'][idx3], below1['v'][idx4] ) )\n face1 = bm.faces.new( ( contour2['v'][idx2], below1['v'][idx4], below1['v'][idx5] ) )\n idx1 = quarter_length\n idx2 = quarter_length * 3\n idx3 = quarter_length * 2\n idx4 = 0\n #print( idx1, idx2, idx3, idx4 )\n face1 = bm.faces.new( ( contour2['v'][idx2], contour2['v'][idx1], below1['v'][idx3] ) )\n face1 = bm.faces.new( ( contour2['v'][idx1], contour2['v'][idx2], below2['v'][0] ) )\n contour2['branching_processed'] = True\n\nCONTOUR_LENGTH = 106 / SCALE_FACTOR\nCONTOUR_WIDTH = 16 / SCALE_FACTOR\nCONTOUR_POINT_COUNT = 16\n\ntoppair_list = [ [ [18,0],[18,1] ], [ [12,42], [10,44] ] ]\n# create imaginary spiral based on cross-section\ndef cross(a, b):\n c = [a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0]]\n\n return c\n \nfor toppair in toppair_list:\n contour1 = all_contour_model[toppair[0][0]-1][toppair[0][1]]\n contour2 = all_contour_model[toppair[1][0]-1][toppair[1][1]]\n #print(\"toppair\", contour1['index'], contour2['index'])\n num_point = contour1['contour_length']\n \n below_centroid = [0,0,0]\n below1 = contour1['below'][0]\n below2 = contour2['below'][0]\n for i in range(3):\n below_centroid[i] = ( below1['centroid'][i] + below2['centroid'][i] ) / 2\n #print(\"below:\", below1['index'], below2['index'])\n \n curr_centroid = [0,0,0]\n for i in range(3):\n curr_centroid[i] = ( contour1['centroid'][i] + contour2['centroid'][i] ) / 2\n \n left_to_right_vector = [0,0,0]\n left_to_right_unit = [0,0,0]\n for i in range(3):\n left_to_right_vector[i] = below2['centroid'][i] - below1['centroid'][i]\n left_to_right_length = get_distance( left_to_right_vector )\n print( left_to_right_length, left_to_right_vector )\n for k in range(3):\n left_to_right_unit[k] = left_to_right_vector[k] / left_to_right_length\n \n left_dist = get_distance( contour1['centroid'], curr_centroid )\n right_dist = get_distance( contour2['centroid'], curr_centroid )\n mean_dist = ( left_dist + right_dist ) / 2\n #print( \"left, right, mean dist\", left_dist, right_dist, mean_dist )\n \n sin_theta = mean_dist / SPIRALIA_RADIUS\n theta = math.asin( sin_theta )\n z_diff = SPIRALIA_RADIUS - SPIRALIA_RADIUS * math.cos( theta )\n #print( \"sin theta, theta, z_diff\", sin_theta, theta, z_diff )\n \n delta_centroid = []\n for k in range(3):\n delta_centroid.append( curr_centroid[k] - below_centroid[k] )\n \n delta_length = get_distance( delta_centroid )\n #print( \"delta\", delta_centroid, delta_length )\n unit_delta = [0,0,0]\n for k in range(3):\n unit_delta[k] = delta_centroid[k] / delta_length\n \n new_centroid = [ curr_centroid[x] + unit_delta[x] * z_diff for x in range(3) ]\n outer = [ curr_centroid[x] + unit_delta[x] * delta_length + unit_delta[x] * ( CONTOUR_LENGTH / 4 ) for x in range(3) ] \n inner = [ curr_centroid[x] + unit_delta[x] * delta_length - unit_delta[x] * ( CONTOUR_LENGTH / 4 ) for x in range(3) ] \n \n v_centroid = bm.verts.new(new_centroid)\n #v_outer = bm.verts.new(outer)\n #v_inner = bm.verts.new(inner)\n #bm.edges.new( ( v_outer, v_centroid ) )\n #bm.edges.new( ( v_inner, v_centroid ) )\n \n perpendicular_vector = cross( left_to_right_vector, unit_delta )\n perpendicular_length = get_distance( perpendicular_vector )\n unit_perpendicular = [0,0,0]\n for k in range(3):\n unit_perpendicular[k] = perpendicular_vector[k] / perpendicular_length\n \n v_list = []\n for i in range( CONTOUR_POINT_COUNT ):\n rotation_in_radian = ( math.pi * 2 / CONTOUR_POINT_COUNT )\n radius_displacement= math.cos( rotation_in_radian * i ) * (CONTOUR_LENGTH / 4)\n perpendicular_displacement = math.sin( rotation_in_radian * i ) * (CONTOUR_WIDTH / 4)\n new_vert = [0,0,0]\n for j in range(3):\n new_vert[j] = new_centroid[j] + unit_delta[j] * radius_displacement + unit_perpendicular[j] * perpendicular_displacement \n\n v_list.append( bm.verts.new(new_vert) )\n for i in range( len( v_list ) ):\n bm.edges.new( ( v_list[i], v_list[(i+1)%CONTOUR_POINT_COUNT] ) )\n \n for i in range( contour1['contour_length'] ):\n face1 = bm.faces.new( ( contour1['v'][i], contour1['v'][(i+1)%CONTOUR_POINT_COUNT], v_list[(i+1)%CONTOUR_POINT_COUNT], v_list[i] ) )\n for i in range( contour2['contour_length'] ):\n face1 = bm.faces.new( ( contour2['v'][i], contour2['v'][(i+1)%CONTOUR_POINT_COUNT], v_list[(int(CONTOUR_POINT_COUNT/2)+CONTOUR_POINT_COUNT-i-1)%CONTOUR_POINT_COUNT], v_list[(int(CONTOUR_POINT_COUNT/2)+CONTOUR_POINT_COUNT-i)%CONTOUR_POINT_COUNT] ) )\n\nbottompair_list = [ [[2,4],[2,7]], [[2,6],[3,20]],[[3,16],[3,22]],[[3,17],[4,30]],[[4,26],[6,42]],[[3,19],[3,21]],[[4,21],[4,28]] ]\n\nfor bottompair in bottompair_list:\n contour1 = all_contour_model[bottompair[0][0]-1][bottompair[0][1]]\n contour2 = all_contour_model[bottompair[1][0]-1][bottompair[1][1]]\n print(\"bottompair\", contour1['index'], contour2['index'])\n num_point = contour1['contour_length']\n \n above_centroid = [0,0,0]\n above1 = contour1['above'][0]\n above2 = contour2['above'][0]\n for i in range(3):\n above_centroid[i] = ( above1['centroid'][i] + above2['centroid'][i] ) / 2\n #print(\"above:\", above1['index'], above2['index'])\n \n curr_centroid = [0,0,0]\n for i in range(3):\n curr_centroid[i] = ( contour1['centroid'][i] + contour2['centroid'][i] ) / 2\n \n left_to_right_vector = [0,0,0]\n for i in range(3):\n left_to_right_vector[i] = above2['centroid'][i] - above1['centroid'][i]\n \n left_dist = get_distance( contour1['centroid'], curr_centroid )\n right_dist = get_distance( contour2['centroid'], curr_centroid )\n mean_dist = ( left_dist + right_dist ) / 2\n print( \"radius, left, right, mean dist\", SPIRALIA_RADIUS, left_dist, right_dist, mean_dist )\n \n sin_theta = mean_dist / SPIRALIA_RADIUS\n theta = math.asin( sin_theta )\n z_diff = SPIRALIA_RADIUS - SPIRALIA_RADIUS * math.cos( theta )\n print( \"RADIUS, RADIUS * cos(theta)\", SPIRALIA_RADIUS,SPIRALIA_RADIUS * math.cos( theta )) \n print( \"sin theta, theta, theta in degree, cos(theta), z_diff\", sin_theta, theta, math.degrees( theta ), math.cos(theta), z_diff )\n \n delta_centroid = []\n for k in range(3):\n delta_centroid.append( curr_centroid[k] - above_centroid[k] )\n \n delta_length = get_distance( delta_centroid )\n print( \"delta\", delta_centroid, delta_length )\n unit_delta = [0,0,0]\n for k in range(3):\n unit_delta[k] = delta_centroid[k] / delta_length\n \n new_centroid = [ curr_centroid[x] + unit_delta[x] * z_diff for x in range(3) ]\n outer = [ curr_centroid[x] + unit_delta[x] * delta_length + unit_delta[x] * ( CONTOUR_LENGTH / 4 ) for x in range(3) ] \n inner = [ curr_centroid[x] + unit_delta[x] * delta_length - unit_delta[x] * ( CONTOUR_LENGTH / 4 ) for x in range(3) ] \n \n v_centroid = bm.verts.new(new_centroid)\n #v_outer = bm.verts.new(outer)\n #v_inner = bm.verts.new(inner)\n #bm.edges.new( ( v_outer, v_centroid ) )\n #bm.edges.new( ( v_inner, v_centroid ) )\n \n perpendicular_vector = cross( left_to_right_vector, unit_delta )\n perpendicular_length = get_distance( perpendicular_vector )\n unit_perpendicular = [0,0,0]\n for k in range(3):\n unit_perpendicular[k] = perpendicular_vector[k] / perpendicular_length\n \n v_list = []\n for i in range( CONTOUR_POINT_COUNT ):\n rotation_in_radian = ( math.pi * 2 / CONTOUR_POINT_COUNT )\n radius_displacement= math.cos( rotation_in_radian * i ) * (CONTOUR_LENGTH / 4)\n perpendicular_displacement = math.sin( rotation_in_radian * i ) * (CONTOUR_WIDTH / 4)\n new_vert = [0,0,0]\n for j in range(3):\n new_vert[j] = new_centroid[j] + unit_delta[j] * radius_displacement + unit_perpendicular[j] * perpendicular_displacement \n\n v_list.append( bm.verts.new(new_vert) )\n for i in range( len( v_list ) ):\n bm.edges.new( ( v_list[i], v_list[(i+1)%CONTOUR_POINT_COUNT] ) )\n \n for i in range( contour1['contour_length'] ):\n face1 = bm.faces.new( ( contour1['v'][(i+1)%CONTOUR_POINT_COUNT], contour1['v'][i], v_list[(+CONTOUR_POINT_COUNT-i)%CONTOUR_POINT_COUNT], v_list[(CONTOUR_POINT_COUNT-i-1)%CONTOUR_POINT_COUNT] ) )\n for i in range( contour2['contour_length'] ):\n face1 = bm.faces.new( ( contour2['v'][(i+1)%CONTOUR_POINT_COUNT], contour2['v'][i], v_list[(int(CONTOUR_POINT_COUNT/2)+i)%CONTOUR_POINT_COUNT], v_list[(int(CONTOUR_POINT_COUNT/2)+i+1)%CONTOUR_POINT_COUNT] ) )\n\n\n# make the bmesh the object's mesh\nbm.to_mesh(mesh) \n\nbm.free() # always do this when finished\n'''\n\nfile = open(\"Spiriferella_spiralia_simplified_contour_.py\", \"w\")\nfile.write(ret_str)\nfile.close()\n\n#print( contour_pair_list.contour_pair_list )" }, { "alpha_fraction": 0.4571855068206787, "alphanum_fraction": 0.5467498898506165, "avg_line_length": 34.941463470458984, "blob_id": "842c91164cc6b43de2f13efaa9519803cf03d481", "content_id": "343bf5afa6929be7b741c2477cafa865601ad137", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7369, "license_type": "no_license", "max_line_length": 133, "num_lines": 205, "path": "/valve_extractor.py", "repo_name": "jikhanjung/spiralia", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n#import contour_pair_list\n\nret_str = \"\"\n\ncoord_correction = []\nfor i in range(18):\n coord_correction.append( [-1028,-774])\n\ncoord_correction[4][1] += -2\ncoord_correction[5][1] += 0\ncoord_correction[6][1] += 0\ncoord_correction[7][1] += 5\ncoord_correction[8][1] += 10\ncoord_correction[9][1] += 0\nnum_point = 100\nSCALE_FACTOR = 100.0\n\nend_point_list = [[[[ 40, 949],[1959, 977]],[[109,1056],[1956, 974]]],\n[[[ 28, 948],[1942,1015]],[[ 73,1063],[1951,1019]]],\n[[[ 19,1005],[1936,1023]],[[ 67,1082],[1937,1021]]],\n[[[ 19, 995],[1923,1048]],[[ 83,1078],[1924,1046]]],\n[[[ 8,1003],[1928, 997]],[[ 77,1069],[1927, 995]]],\n[[[110,1095],[1935, 930]],[[129,1103],[1904, 945]]],\n[[[ 67, 991],[1926, 822]],[[ 67, 988],[1934, 828]]],\n[[[ 43,1005],[1924, 904]],[[ 45,1001],[1925, 902]]],\n[[[ 32, 957],[1927, 890]],[[ 32, 952],[1930, 889]]],\n[[[ 36, 966],[1863, 938]],[[ 41, 965],[1863, 936]]],\n[[[ 71,1016],[1923, 918]],[[ 59, 988],[1924, 916]]],\n[[[ 65, 929],[1832, 964]],[[ 99, 916],[1830, 960]]],\n[[[ 64, 870],[1846, 918]],[[116, 945],[1850, 915]]],\n[[[105, 861],[1854, 877]],[[102, 856],[1855, 875]]],\n[[[121, 836],[1843, 780]],[[122, 827],[1847, 775]]],\n[[[140, 826],[1779, 774]],[[140, 817],[1784, 771]]],\n[[[199, 842],[1772, 760]],[[194, 839],[1776, 753]]],\n[[[187, 817],[1754, 763]],[[187, 812],[1756, 760]]]]\n\nall_contours = []\nfor i in range(18):\n num = '0' + str(i+1)\n filename = 'images/Spiriferella-valves-' + num[-2:]\n print(filename)\n im = cv2.imread( filename + '.png')\n\n imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n ret,thresh = cv2.threshold(imgray,127,255,0)\n image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n j = 0\n contours_in_a_section = []\n x_correction = coord_correction[i][0]\n y_correction = coord_correction[i][1]\n\n for c in contours:\n print(\"contour\",j,\"len:\",len(c))\n new_contour = []\n interval = len(c) / num_point\n k = 0\n idx1 = -1\n idx2 = -1\n for pt in c:\n if pt[0][0] == end_point_list[i][j][0][0] and pt[0][1] == end_point_list[i][j][0][1]:\n idx1 = k\n #print( \"idx1:\", pt[0])\n elif pt[0][0] == end_point_list[i][j][1][0] and pt[0][1] == end_point_list[i][j][1][1]:\n #print( \"idx2:\", pt[0])\n idx2 = k\n k+=1\n #print(i,j,idx1,idx2)\n if c[idx1][0][0] > c[idx2][0][0]:\n left_idx = idx2\n right_idx = idx1\n else:\n left_idx = idx1\n right_idx = idx2\n #print( c[min_idx], c[max_idx])\n idx_list = []\n if left_idx < right_idx:\n idx_diff1 = int( ( right_idx - left_idx ) / ( num_point / 2))\n idx_diff2 = int( ( left_idx + ( len(c) - right_idx ) ) / (num_point /2))\n else:\n idx_diff1 = int((left_idx - right_idx) / (num_point / 2))\n idx_diff2 = int((right_idx + (len(c) - left_idx)) / (num_point / 2))\n\n for k in range(int(num_point /2)):\n idx_list.append( int( left_idx + idx_diff1 * k ) %len(c) )\n for k in range(int(num_point /2)):\n idx_list.append(int(right_idx + idx_diff2 * k )%len(c))\n\n #print( left_idx, idx_diff1, right_idx, idx_diff2, idx_list )\n\n cv2.drawContours(im, [c], -1, (0, 255, 0), 3)\n for idx in idx_list:\n cx = c[idx][0][0]\n cy = c[idx][0][1]\n x = ( cx + x_correction )\n y = ( cy + y_correction )\n z = ( (i-9)*40.3 )\n new_contour.append( [ x, y * -1 , z ] )\n cv2.putText(im, str(idx), (cx, cy), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, 255)\n contours_in_a_section.append( new_contour )\n M = cv2.moments(c)\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n cv2.putText(im,str(j),(cx,cy),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,255)\n j+=1\n\n all_contours.append( contours_in_a_section )\n cv2.imwrite('images/Spiriferella-valves-labeled-' + num[-2:] +'.png', im)\nfor cis in all_contours:\n print( \"no of contours:\", len(cis), \"length each\", [ len(x) for x in cis ] )\n\nall_contours_str_list = []\nfor contours_in_a_section in all_contours:\n contour_str_list = []\n for contour in contours_in_a_section:\n v_str_list = []\n for v in contour:\n #print(\"v:\",v)\n v_str = \"(\" + \",\".join( [str(x/SCALE_FACTOR) for x in v]) + \")\"\n v_str_list.append( v_str )\n contour_str = \"[\" + \",\".join( v_str_list ) + \"]\"\n #print( \"contour_str:\", contour_str )\n contour_str_list.append( contour_str )\n contours_in_a_section_str = \"[\" + \",\\n\".join( contour_str_list ) + \"]\"\n all_contours_str_list.append( contours_in_a_section_str )\nret_str += \"contour_list=[\"\nret_str += \",\\n\".join( all_contours_str_list )\nret_str += \"]\"\n\nret_str += \"\"\"\nimport bpy\nimport bmesh\nimport time \n\nmesh = bpy.data.meshes.new(\"mesh\") # add a new mesh\nobj = bpy.data.objects.new(\"MyObject\", mesh) # add a new object using the mesh\n\nscene = bpy.context.scene\nscene.objects.link(obj) # put the object into the scene (link)\nscene.objects.active = obj # set as the active object in the scene\nobj.select = True # select object\n\nmesh = bpy.context.object.data\nbm = bmesh.new()\n\n# contour \nall_contour_model = []\nsection_index = 0\nfor contours_in_a_section in contour_list:\n contour_model_in_a_section = []\n\n contour_index = 0\n for contour in contours_in_a_section:\n contour_model = { 'index':[section_index+1,contour_index],'coords':[],'v':[], 'e':[] }\n #print( \"contour model\", section_index, contour_index, contour_model )\n for v in contour:\n contour_model['v'].append( bm.verts.new(v) )\n contour_model['coords'].append( v )\n #for i in range( len( contour_model['v'] ) ):\n # contour_model['e'].append( bm.edges.new( ( contour_model['v'][i],contour_model['v'][(i+1)%len(contour_model['v'])] ) ) )\n contour_model_in_a_section.append( contour_model )\n contour_index += 1\n all_contour_model.append( contour_model_in_a_section )\n section_index += 1\n\n\n\n\n\n# close top and bottom\nface1 = bm.faces.new( tuple( reversed( all_contour_model[0][0]['v'] ) ) )\nface1 = bm.faces.new( tuple( reversed( all_contour_model[0][1]['v'] ) ) )\nface1 = bm.faces.new( tuple( all_contour_model[-1][0]['v'] ) )\nface1 = bm.faces.new( tuple( all_contour_model[-1][1]['v'] ) )\n\n# create faces\nfor i in range( len( all_contour_model ) - 1 ): \n \n contours_in_below_section = all_contour_model[i]\n contours_in_above_section = all_contour_model[i+1]\n\n for j in range(2):\n contour1 = contours_in_below_section[j]\n contour2 = contours_in_above_section[j]\n print( contour1['coords'] )\n print( contour2['coords'] )\n for k in range( len( contour1['v'] ) ):\n idx1 = k \n idx2 = ( idx1 + 1 ) % len( contour1['v'] )\n print( i, j, idx1, idx2 )\n face1 = bm.faces.new( ( contour1['v'][idx1], contour1['v'][idx2], contour2['v'][idx2], contour2['v'][idx1] ) )\n # make the bmesh the object's mesh\n bm.to_mesh(mesh) \n #time.sleep(2)\n\n\nbm.free() # always do this when finished\n\"\"\"\n\nfile = open(\"Spiriferella_valve_contour_.py\", \"w\")\nfile.write(ret_str)\nfile.close()\n\n#print( all_contours )\n\n" }, { "alpha_fraction": 0.585686445236206, "alphanum_fraction": 0.609785795211792, "avg_line_length": 45.681819915771484, "blob_id": "8bb608b5b18313e5ce42eab8ba424e2ce03522fa", "content_id": "69410c2fc2a12fb7f6b68fbba34993110a85c75b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4108, "license_type": "no_license", "max_line_length": 199, "num_lines": 88, "path": "/ideal_spiralia_model.py", "repo_name": "jikhanjung/spiralia", "src_encoding": "UTF-8", "text": "import math\nimport bpy\nimport bmesh\n\nNUMBER_OF_WHORL = 10\nRADIUS = 300\nTRANSLATION = [0, 0, 20]\nCONTOUR_LENGTH = 106\nCONTOUR_WIDTH = 16\nNUMBER_OF_SECTION = 48\nCENTER = [350, 0]\ndisplacement_list = [ [ 7, 0 ], ]\nCONTOUR_POINT_COUNT = 16\nSCALE_FACTOR = 1000\nCLOCKWISE = 1\nCOUNTER_CLOCKWISE = -1\n\ndef get_spiral_contours( center, number_of_whorl, number_of_section, contour_width, contour_length, contour_point_count, scale_factor, radius, translation, start_angle, whorl_direction):\n contour_list = []\n centroid_list = []\n for i in range( number_of_whorl):\n for j in range( number_of_section ):\n angle_in_degree = ( 360 / number_of_section) * j * whorl_direction + start_angle\n print( \"degree\", j, whorl_direction, start_angle, angle_in_degree )\n angle_in_radian = math.radians( angle_in_degree )\n x = math.cos( angle_in_radian ) * radius + translation[0] * ( j/number_of_section) + translation[0] * i\n y = math.sin( angle_in_radian ) * radius + translation[1] * ( j/number_of_section) + translation[1] * i\n z = translation[2] * (j / number_of_section) + translation[2] * i\n centroid_list.append([(center[0] + x) / scale_factor, (center[1] + y) / scale_factor, z / scale_factor])\n contour = []\n for k in range( contour_length ):\n rotation_in_radian = ( math.pi * 2 / contour_point_count )\n radius_displacement= math.cos( rotation_in_radian * k ) * (contour_length / 2)\n z_displacement = math.sin( rotation_in_radian * k ) * (contour_width / 2)\n x = math.cos(angle_in_radian) * ( radius + radius_displacement ) + translation[0] * ( j/number_of_section) + translation[0] * i\n y = math.sin(angle_in_radian) * ( radius + radius_displacement ) + translation[1] * ( j/number_of_section) + translation[1] * i\n z = translation[2] * (j / number_of_section) + translation[2] * i + z_displacement\n contour.append([(center[0] + x) / scale_factor, (center[1] + y) / scale_factor, z / scale_factor])\n if whorl_direction == 1:\n contour_list.append( contour )\n else:\n contour_list.append( reversed(contour ) )\n #print( x, y, z, contour )\n #print( x, y, z )\n return contour_list\n\nall_list=[]\nall_list.append( get_spiral_contours( [350,0], NUMBER_OF_WHORL, NUMBER_OF_SECTION, CONTOUR_WIDTH, CONTOUR_LENGTH, CONTOUR_POINT_COUNT, SCALE_FACTOR, RADIUS, [15, 0, 20 ], -135, CLOCKWISE ) )\nall_list.append( get_spiral_contours( [-350,0], NUMBER_OF_WHORL, NUMBER_OF_SECTION, CONTOUR_WIDTH, CONTOUR_LENGTH, CONTOUR_POINT_COUNT, SCALE_FACTOR, RADIUS, [-15, 0, 20 ], -45, COUNTER_CLOCKWISE ) )\n\nmesh = bpy.data.meshes.new(\"mesh\") # add a new mesh\nobj = bpy.data.objects.new(\"MyObject\", mesh) # add a new object using the mesh\n\nscene = bpy.context.scene\nscene.objects.link(obj) # put the object into the scene (link)\nscene.objects.active = obj # set as the active object in the scene\nobj.select = True # select object\n\nmesh = bpy.context.object.data\nbm = bmesh.new()\n\nvert_list = []\n#for centroid in centroid_list:\n# vert_list.append( bm.verts.new( centroid ) )\n\n#for idx in range( len( vert_list )-1):\n# bm.edges.new((vert_list[idx], vert_list[idx+1]))\nfor contour_list in all_list:\n all_vert = []\n for contour in contour_list:\n vert_in_a_contour = []\n for v in contour:\n vert_in_a_contour.append(bm.verts.new(v))\n #for i in range( len( contour ) ):\n # bm.edges.new((vert_in_a_contour[i], vert_in_a_contour[i + 1]))\n all_vert.append( vert_in_a_contour )\n\n for i in range( len( all_vert ) - 1):\n for j in range( len( all_vert[i]) ):\n idx1 = j\n idx2 = ( j + 1 ) % CONTOUR_POINT_COUNT\n face1 = bm.faces.new((all_vert[i][idx1], all_vert[i][idx2], all_vert[i+1][idx2], all_vert[i+1][idx1] ) )\n\n\nbm.to_mesh(mesh)\nobj.location = ( 0, -0.2, -0.1)\nobj.rotation_euler = ( math.radians( -60), 0, 0 )\nbm.free() # always do this when finished\n" }, { "alpha_fraction": 0.5514350533485413, "alphanum_fraction": 0.5772947072982788, "avg_line_length": 34.185001373291016, "blob_id": "c4369182406e149e09cc2b5816c32144687aef53", "content_id": "9dea72a1f063e6246f18cb0c28310f8f414fa50c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7038, "license_type": "no_license", "max_line_length": 212, "num_lines": 200, "path": "/crura_extractor.py", "repo_name": "jikhanjung/spiralia", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n#import contour_pair_list\n\nret_str = \"\"\n\ncoord_correction = []\nfor i in range(18):\n coord_correction.append( [-1028,-774])\n\ncoord_correction[4][1] += -2\ncoord_correction[5][1] += 0\ncoord_correction[6][1] += 0\ncoord_correction[7][1] += 5\ncoord_correction[8][1] += 10\ncoord_correction[9][1] += 0\n\nnum_point = 8\nall_contours = []\nall_contours = []\nall_centroids = []\nSCALE_FACTOR = 100\n\nfor i in range(7):\n num = '0' + str(i+1)\n filename = 'images/Spiriferella-crura-' + num[-2:]\n print(filename)\n im = cv2.imread( filename + '.png')\n\n imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n ret,thresh = cv2.threshold(imgray,127,255,0)\n image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n j = 0\n centroids_in_a_section = []\n contours_in_a_section = []\n x_correction = coord_correction[i][0]\n y_correction = coord_correction[i][1]\n\n for c in contours:\n max_x = [0, 0]\n min_x = [9999, 9999]\n idx = 0\n max_idx = -1\n min_idx = -1\n num_point = 16\n idx_list = []\n for pt in c:\n if pt[0][0] > max_x[0]:\n max_x = pt[0]\n max_idx = idx\n if pt[0][0] < min_x[0]:\n min_x = pt[0]\n min_idx = idx\n idx += 1\n idx_diff1 = int( ( max_idx - min_idx ) / ( num_point / 2))\n idx_diff2 = int( ( min_idx + ( len(c) -max_idx ) ) / (num_point /2))\n for k in range(int(num_point /2)):\n idx_list.append( int( min_idx + idx_diff1 * k ) )\n for k in range(int(num_point /2)):\n idx_list.append(int(max_idx + idx_diff2 * k )%len(c))\n print( max_x, min_x, max_idx, min_idx, idx_list, len(c))\n simp_cont = []\n for k in range(num_point):\n simp_cont.append( [ c[idx_list[k]][0][0]+x_correction, c[idx_list[k]][0][1]+y_correction, (i-9)*40.3 ])\n print( simp_cont )\n\n contours_in_a_section.append( simp_cont )\n cv2.drawContours(im, [c], -1, (0, 255, 0), 3)\n\n M = cv2.moments(c)\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n cv2.putText(im,str(j) ,(cx,cy),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,255)\n\n centroids_in_a_section.append( [ cx+x_correction, -1 * (cy+y_correction ), (i-9)*40.3 ] )\n verts = []\n for v in simp_cont:\n verts.append( \"(\" + \",\".join( [ str( coord ) for coord in v ] ) + \")\" )\n #contour_list.append( \"[\" + \",\\n\".join( verts ) + \"]\" )\n cv2.imwrite('images/Spiriferella-crura-labeled-' + num[-2:] + '.png', im)\n\n j += 1\n\n all_centroids.append( centroids_in_a_section )\n all_contours.append( contours_in_a_section )\n\nall_contours_str_list = []\nfor contours_in_a_section in all_contours:\n contour_str_list = []\n for contour in contours_in_a_section:\n v_str_list = []\n for v in contour:\n #print(\"v:\",v)\n v_str = \"(\" + \",\".join( [str(x/SCALE_FACTOR) for x in v]) + \")\"\n v_str_list.append( v_str )\n contour_str = \"[\" + \",\".join( v_str_list ) + \"]\"\n #print( \"contour_str:\", contour_str )\n contour_str_list.append( contour_str )\n contours_in_a_section_str = \"[\" + \",\\n\".join( contour_str_list ) + \"]\"\n all_contours_str_list.append( contours_in_a_section_str )\nret_str += \"contour_list=[\"\nret_str += \",\\n\".join( all_contours_str_list )\nret_str += \"]\\n\"\n\nall_centroids_str_list = []\n\nfor centroids_in_a_section in all_centroids:\n centroids_str_list = []\n centroids_in_a_section_str = \"\"\n for centroid in centroids_in_a_section:\n centroid_str = \"(\" + \",\".join( [ str(x/SCALE_FACTOR) for x in centroid ] )+\")\"\n print( centroid_str )\n centroids_str_list.append( centroid_str )\n centroids_in_a_section_str = \"[\" + \",\\n\".join( centroids_str_list ) + \"]\"\n all_centroids_str_list.append( centroids_in_a_section_str )\nret_str += \"centroid_list=[\" + \",\".join(all_centroids_str_list) + \"]\\n\"\n\n\nret_str += \"\"\"\nimport bpy\nimport bmesh\nimport time \n\nmesh = bpy.data.meshes.new(\"mesh\") # add a new mesh\nobj = bpy.data.objects.new(\"MyObject\", mesh) # add a new object using the mesh\n\nscene = bpy.context.scene\nscene.objects.link(obj) # put the object into the scene (link)\nscene.objects.active = obj # set as the active object in the scene\nobj.select = True # select object\n\nmesh = bpy.context.object.data\nbm = bmesh.new()\n\n# contour \nall_contour_model = []\nsection_index = 0\nfor contours_in_a_section in contour_list:\n contour_model_in_a_section = []\n\n contour_index = 0\n for contour in contours_in_a_section:\n contour_model = { 'index':[section_index+1,contour_index],'v':[], 'e':[], 'bottom':True, 'top':True, 'above':[], 'below':[], 'coords':[], 'branching_processed': False, 'centroid':[], 'contour_length':-1 }\n contour_model['contour_length'] = len( contour )\n for v in contour:\n contour_model['v'].append( bm.verts.new(v) )\n contour_model['coords'].append( v )\n for i in range( len( contour_model['v'] ) ):\n contour_model['e'].append( bm.edges.new( ( contour_model['v'][i],contour_model['v'][(i+1)%len(contour_model['v'])] ) ) )\n contour_model['centroid'] = centroid_list[section_index][contour_index]\n contour_model_in_a_section.append( contour_model )\n contour_index += 1\n all_contour_model.append( contour_model_in_a_section )\n section_index += 1\n\n\n# contour model connectivity processing\nfor contour_model_in_a_section in all_contour_model:\n contour1 = contour_model_in_a_section[0].copy()\n contour2 = contour_model_in_a_section[1].copy()\n\n if contour1['coords'][0] > contour2['coords'][0]:\n contour_model_in_a_section[0] = contour2\n contour_model_in_a_section[1] = contour1\n\n# close top and bottom\nface1 = bm.faces.new( tuple( all_contour_model[0][0]['v'] ) )\nface1 = bm.faces.new( tuple( all_contour_model[0][1]['v'] ) )\nface1 = bm.faces.new( tuple( reversed( all_contour_model[-1][0]['v'] ) ) )\nface1 = bm.faces.new( tuple( reversed( all_contour_model[-1][1]['v'] ) ) )\n\n# create faces\nfor i in range( len( all_contour_model ) - 1 ): \n \n contours_in_below_section = all_contour_model[i]\n contours_in_above_section = all_contour_model[i+1]\n\n for j in range(2):\n contour1 = contours_in_below_section[j]\n contour2 = contours_in_above_section[j]\n print( contour1['coords'] )\n print( contour2['coords'] )\n for k in range( len( contour1['v'] ) ):\n idx1 = k \n idx2 = ( idx1 + 1 ) % len( contour1['v'] )\n print( i, j, idx1, idx2 )\n face1 = bm.faces.new( ( contour1['v'][idx2], contour1['v'][idx1], contour2['v'][idx1], contour2['v'][idx2] ) )\n # make the bmesh the object's mesh\n bm.to_mesh(mesh) \n #time.sleep(2)\n\n\nbm.free() # always do this when finished\n\"\"\"\n\nfile = open(\"Spiriferella_crura_contour_.py\", \"w\")\nfile.write(ret_str)\nfile.close()\n\n#print( all_contours )\n\n" } ]
4
Sudoxo/Firehose_Scanner
https://github.com/Sudoxo/Firehose_Scanner
d22f8aa69367072fb9bb4e9179aa25c3fae2325d
17a431e45a16a385c4904478be8261a4d9baa89c
c8e1e2f6eb386edb6b5cbb2382b6002665745fe6
refs/heads/master
2020-12-05T00:29:39.129142
2020-01-16T22:18:28
2020-01-16T22:18:28
231,950,877
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48342788219451904, "alphanum_fraction": 0.492087185382843, "avg_line_length": 38.738094329833984, "blob_id": "47100d5af352b3421b7c706a1a204ad0bc79cbc7", "content_id": "d3985441880e5cb4ec0cc85efcf2703676b4132f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3349, "license_type": "no_license", "max_line_length": 108, "num_lines": 84, "path": "/main.py", "repo_name": "Sudoxo/Firehose_Scanner", "src_encoding": "UTF-8", "text": "import sqlite3\nimport urllib.request, json\nimport time\nimport os.path\nfrom os import path\n\ndef int_to_time(x):\n return time.strftime('%H:%M:%S', time.gmtime(x+3600))\nlast = [0,\"\",\"\",\"\"]\ndef parse(c, conn):\n global last\n with urllib.request.urlopen(\"https://www.meneame.net/backend/sneaker2?time=\" + str(last[0] - 1)) as url:\n data = json.loads(url.read().decode())\n flag = 0\n for e in reversed(data['events']):\n lin = e['link']\n tit = e['title']\n act = e['type']\n wh = e['who']\n t = int_to_time(int(e['ts']))\n if(flag == 0):\n if(int(e['ts']) < last[0]):\n continue\n if(int(e['ts']) == last[0]):\n if(wh == last[1] and act == last[2] and lin == last[3]):\n flag = 1\n continue\n flag = 1\n print(str(t) + \" \" + tit)\n if(act == \"post\" or act == \"new\"):\n print(\"Inserted: \" + tit)\n c.execute('INSERT INTO articles VALUES (?,?,?,?)', [tit, lin, wh, t])\n conn.commit()\n \n else:\n c.execute('SELECT count(*) FROM articles WHERE title = ?', [tit])\n if(c.fetchone()[0] == 1):\n #VOTES\n if(act == \"vote\"):\n print(\"Inserted action: \" + act + \" on title: \" + tit)\n c.execute('INSERT INTO votes VALUES (?,?,?)', [tit, wh, t])\n conn.commit()\n #PROBLEMS\n elif(act == \"problem\"):\n print(\"Inserted action: \" + act + \" on title: \" + tit)\n c.execute('INSERT INTO problems VALUES (?,?,?)', [tit, wh, t])\n conn.commit()\n #COMMENTS\n elif(act == \"comment\"):\n print(\"Inserted action: \" + act + \" on title: \" + tit)\n c.execute('INSERT INTO comments VALUES (?,?,?)', [tit, wh, t])\n conn.commit()\n #EDITS\n elif(act == \"cedited\"):\n print(\"Inserted action: \" + act + \" on title: \" + tit)\n c.execute('INSERT INTO edits VALUES (?,?,?)', [tit, wh, t])\n conn.commit()\n\n last[0] = int(data['events'][0]['ts'])\n last[1] = data['events'][0]['who']\n last[2] = data['events'][0]['type']\n last[3] = data['events'][0]['link']\ndef main():\n x = path.exists('articles.db')\n conn = sqlite3.connect('articles.db')\n c = conn.cursor()\n #Create tables\n if(not x):\n c.execute('''CREATE TABLE articles\n (title varchar PRIMARY KEY, link varchar, who varchar, creation_time time)''')\n c.execute('''CREATE TABLE votes\n (title varchar, who varchar, action_time time, FOREIGN KEY (title) REFERENCES articles (title))''')\n c.execute('''CREATE TABLE problems\n (title varchar, who varchar, action_time time, FOREIGN KEY (title) REFERENCES articles (title))''')\n c.execute('''CREATE TABLE comments\n (title varchar, who varchar, action_time time, FOREIGN KEY (title) REFERENCES articles (title))''')\n c.execute('''CREATE TABLE edits\n (title varchar, who varchar, action_time time, FOREIGN KEY (title) REFERENCES articles (title))''')\n conn.commit()\n while(True):\n parse(c, conn)\n \n\nmain()\n \n\n\n" }, { "alpha_fraction": 0.7508417367935181, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 36.125, "blob_id": "83556e1bd2b5d53faa577fff5b247b10458e30b1", "content_id": "7dc86d0f07a64e1f529ccebeb5436940043e4e67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 297, "license_type": "no_license", "max_line_length": 78, "num_lines": 8, "path": "/README.md", "repo_name": "Sudoxo/Firehose_Scanner", "src_encoding": "UTF-8", "text": "# Firehose_Scanner\nParses and stores in database data from website: https://www.meneame.net/sneak\nJSON: https://www.meneame.net/backend/sneaker2\n\n### Usage:\n- run main.py with python3\n- articles and actions will be stored in file \"articles.db\"\n- if you want to see database you can run read_db.py\n" }, { "alpha_fraction": 0.48787063360214233, "alphanum_fraction": 0.49056604504585266, "avg_line_length": 24.413793563842773, "blob_id": "7afa970cc7604f164321f38d296cf214e7297d3d", "content_id": "072eda2325014ed606d089c132c3ec9e518f2d7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "no_license", "max_line_length": 51, "num_lines": 29, "path": "/read_db.py", "repo_name": "Sudoxo/Firehose_Scanner", "src_encoding": "UTF-8", "text": "import sqlite3\n\n\ndef read_db():\n conn = sqlite3.connect('articles.db')\n c = conn.cursor()\n print(\"#################\")\n print(\"ARTICLES:\")\n for row in c.execute('SELECT * FROM articles'):\n print(row)\n print(\"#################\")\n print(\"VOTES:\")\n for row in c.execute('SELECT * FROM votes'):\n print(row)\n print(\"#################\")\n print(\"PROBLEMS:\")\n for row in c.execute('SELECT * FROM problems'):\n print(row)\n print(\"#################\")\n print(\"COMMENTS:\")\n for row in c.execute('SELECT * FROM comments'):\n print(row)\n print(\"#################\")\n print(\"EDITS:\")\n for row in c.execute('SELECT * FROM edits'):\n print(row)\n conn.close()\n\nread_db()\n \n" } ]
3
KarisBurnett/Movies-ETL
https://github.com/KarisBurnett/Movies-ETL
6abcf892a12db93e2c23e2e340091b69cfb75b8b
4a6f80a118785ed081ea8e4b9d554bb142bfd76c
0799c02ff0efa141d28fc1a5795266ae577fb69e
refs/heads/main
2023-07-10T00:07:39.447920
2021-08-15T22:26:06
2021-08-15T22:26:06
392,179,045
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 26, "blob_id": "533bb65517d6a76cb95e82cfc766b70563293a26", "content_id": "a9fdec6b44d53a0185f899577aa84da44c1c33df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26, "license_type": "no_license", "max_line_length": 26, "num_lines": 1, "path": "/Movies_ETL/config.py", "repo_name": "KarisBurnett/Movies-ETL", "src_encoding": "UTF-8", "text": "db_password = 'Littlebit420'" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 108.66666412353516, "blob_id": "86b426c9bdf197ac2add9f8f5c1750d0c12ca6eb", "content_id": "982f62ba83a3b03db5a25b86b63998a060aa6e0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 330, "license_type": "no_license", "max_line_length": 301, "num_lines": 3, "path": "/README.md", "repo_name": "KarisBurnett/Movies-ETL", "src_encoding": "UTF-8", "text": "# Movies-ETL\n## Objective \nTo create an automated pipeline that takes in new data, performs the appropriate transformations and loads the data into existing tables. Create one function that takes in all thre files(Wiki_data, Kaggle_data and MovieLens rating data) and perform the ETL process by addng the data to a SQL database. \n" } ]
2
RafaelAdao/cursomachinelearningalura
https://github.com/RafaelAdao/cursomachinelearningalura
2dd03c4e79cf3740a9bc654cb1be12326a1b345b
b4c4ac3f675adb07df1b16dd9fd515001a16727a
b8bdfd16d6553354ae48a0a87303dd747158eb1c
refs/heads/master
2021-01-25T10:34:49.978565
2018-03-02T01:40:38
2018-03-02T01:40:38
123,359,835
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6976743936538696, "alphanum_fraction": 0.7159468531608582, "avg_line_length": 23.16666603088379, "blob_id": "9d8f7b645d6a5f2c503f4a1e1eccd4209103d7ba", "content_id": "038756ec37751f105c43fdfe50c9cc9f247e497d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "permissive", "max_line_length": 59, "num_lines": 24, "path": "/classifica_acesso.py", "repo_name": "RafaelAdao/cursomachinelearningalura", "src_encoding": "UTF-8", "text": "from dados import carregar_acessos\r\n\r\nX, Y = carregar_acessos()\r\n\r\ntreino_dados = X[:90]\r\ntreino_marcacoes = Y[:90]\r\n\r\nteste_dados = X[-9:]\r\nteste_marcacoes = Y[-9:]\r\n\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nmodelo = MultinomialNB()\r\nmodelo.fit(treino_dados, treino_marcacoes)\r\nresultado = modelo.predict(teste_dados)\r\n\r\ndiferencas = resultado - teste_marcacoes\r\nacertos = [d for d in diferencas if d == 0]\r\n\r\ntotal_de_acertos = len(acertos)\r\ntotal_de_elementos = len(teste_marcacoes)\r\ntaxa_acerto = 100.0 * total_de_acertos / total_de_elementos\r\n\r\nprint(taxa_acerto)\r\nprint(total_de_elementos)" }, { "alpha_fraction": 0.5767741799354553, "alphanum_fraction": 0.6529031991958618, "avg_line_length": 22.21875, "blob_id": "c67a6740ea17e011259d2dd6118bff302f8d580f", "content_id": "15bbd4028e5216ed3a9d18218a315592dcfdcb7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 775, "license_type": "permissive", "max_line_length": 65, "num_lines": 32, "path": "/classificacao.py", "repo_name": "RafaelAdao/cursomachinelearningalura", "src_encoding": "UTF-8", "text": "porco1 = [1, 1, 0]\r\nporco2 = [1, 1, 0]\r\nporco3 = [1, 1, 0]\r\n\r\ncachorro1 = [1, 1, 1]\r\ncachorro2 = [0, 1, 1]\r\ncachorro3 = [0, 1, 1]\r\n\r\ndados = [porco1, porco2, porco3, cachorro1, cachorro2, cachorro3]\r\n\r\nmarcacoes = [1,1,1,-1,-1,-1]\r\n\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nmodelo = MultinomialNB()\r\nmodelo.fit(dados, marcacoes)\r\n\r\nmisterioso1 = [1, 1, 1]\r\nmisterioso2 = [1, 0, 0]\r\nmisterioso3 = [1, 0, 1]\r\n\r\nteste = [misterioso1, misterioso2, misterioso3]\r\nmarcacoes_teste = [-1, 1, -1]\r\n\r\nresultado = modelo.predict(teste)\r\ndiferencas = resultado - marcacoes_teste\r\n\r\nacertos = [d for d in diferencas if d==0]\r\ntotal_de_acertos = len(acertos)\r\ntotal_de_elementos = len(teste)\r\n\r\ntaxa_de_acerto = 100.0 * total_de_acertos / total_de_elementos\r\nprint(taxa_de_acerto)\r\n" } ]
2
LJRH/FCCnp
https://github.com/LJRH/FCCnp
e58070ae1e94dceb1642c129f66c475d461838f3
832e66915b5ceca9f7668f4007a810717109012a
2eb107f526756a2d77cb02d5e5d32d9e691aee1b
refs/heads/master
2020-05-03T11:44:00.273008
2019-03-30T20:31:18
2019-03-30T20:31:18
178,607,853
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5928270220756531, "alphanum_fraction": 0.6265822649002075, "avg_line_length": 28.625, "blob_id": "cbcae7808a92f20ba15c6a0c9a3a08a05db456b5", "content_id": "d7a78328c94b229bed59fc334b22faa9f36e89ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 129, "num_lines": 16, "path": "/setup.py", "repo_name": "LJRH/FCCnp", "src_encoding": "UTF-8", "text": "from distutils.core import setup\nimport py2exe, sys\nfrom glob import glob\n\nsys.path.append(\"C:\\\\Program Files\\\\Microsoft Visual Studio 9.0\\\\VC\\\\redist\\\\x86\\\\Microsoft.VC90.CRT\")\ndata_files = [(\"Microsoft.VC90.CRT\", glob(r'C:\\Program Files\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT\\*.*'))]\n\nsetup(\n data_files=data_files,\n windows = [\n {\n \"script\": \"FCCnp-size.py\",\n \"icon_resources\": [(1, \"fcc.ico\")]\n }\n ],\n)\n" }, { "alpha_fraction": 0.6022530198097229, "alphanum_fraction": 0.6164066791534424, "avg_line_length": 34.32653045654297, "blob_id": "d8a4305a90a792488fd4069fc9b7542965391483", "content_id": "1960ed620c92c0389aa811879fdd6b74a2aaf598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3462, "license_type": "no_license", "max_line_length": 162, "num_lines": 98, "path": "/FCCnp-size.py", "repo_name": "LJRH/FCCnp", "src_encoding": "UTF-8", "text": "\"\"\"\nL.Higgins 11-vi-18\nThis is a piece of code to use the equation from 'Gardea-Torresday L Environ Sci Technol 34:4392.'. User inputs the ratio between\nthe metallic FCC nanoparticles coordination number, N, and the coordination number of the standard foil, N_{foil}. It then takes\nthe average bond length, d. Both of these values are collected by using EXAFS modelling for the nanoparticles compared to the foil.\nThe equation is then solved using a numeric method (see: sympy.solve), yielding three solutions (since it is a cubic eqn.). Take\nthe third solution as the average nanoparticle radius, R. N.B. This can only work when using a 1st shell EXAFS fitting!\n\nI've designed this to be user friendly, ctrl-c will always break.\n\"\"\"\ntry:\n from sympy.solvers import solve\n from sympy import Symbol\nexcept:\n print('//WARNING// Please install the Sympy module: https://docs.sympy.org/latest/install.html ')\n\ntry:\n import numpy as np\n import sys\nexcept:\n print('//WARNING// Code requires numpy and sys to be installed')\n\ncontinuing=True\n\ndef inp(txt):\n if sys.version_info[0] == 3:\n return input(txt)\n elif sys.version_info[0] == 2:\n return raw_input(txt)\n else:\n print('FCC requires python > 2')\n\ndef get_N():\n# Asks the user to give the ratio between the coordination numbers in a user-friendly style.\n N = Symbol('N')\n try:\n N = float(inp('Please input N/N_foil: \\n'))\n if N>0 and N<1:\n return N\n else:\n print('N must be a ratio between 0 and 1 exclusive, try again:\\n')\n return get_N()\n except ValueError:\n print('N/Nfoil must be a float, try again:\\n')\n return get_N()\n except KeyboardInterrupt:\n print('\\n -- Exiting because of keyboard Interuption -- \\n')\n sys.exit()\n\n\ndef get_d():\n# Asks the user to give the average 1st shell bond length in a user-friendly style.\n N = Symbol('d')\n try:\n d = float(inp('Please input d e.g. Au ~ 2.85: '))\n except ValueError:\n print('d must be a decimal number, try again:')\n return get_d()\n except KeyboardInterrupt:\n print('\\n -- Exiting because of keyboard Interuption -- \\n')\n sys.exit()\n return d\n\ndef cont():\n# Asks the user whether they want to continue in a user-friendly style.\n try:\n poss_ans = ['y','Y','n','N']\n var = str(inp('Would you like to continue? y/n \\n'))\n if var in poss_ans:\n if var.lower() == 'n':\n return False\n elif var.lower() == 'y':\n return True\n else:\n print('please enter y/Y or n/N, try again:\\n')\n return cont()\n except KeyboardInterrupt:\n print('\\n -- Exiting because of keyboard Interuption -- \\n')\n sys.exit()\n\ndef find_F0(N,d):\n# Numeric solution of the Gardea equation using sympy.solve - no user involvement\n R = Symbol('R')\n F = (R**3 * (N-1)) + (0.75 * d * R**2) - (0.0625 * d**3)\n sol = solve(F,R,cubic=True)\n print('The solutions to F0 are: \\n', sol)\n\nwhile continuing:\n# Continues until user breaks with either ctrl-c or 'n'.\n N = get_N()\n print('you entered: ', N)\n d = get_d()\n print('you entered:', d)\n find_F0(N,d)\n continuing = cont()\n\nprint(' Thanks for using the FCC minimum tool! \\n #please cite#\\n #\"Gardea-Torresday L, Tiemann KJ, Gamez G, et. al.#\\n #(2000); Environ Sci Technol 34:4392.\"#')\nsys.exit()\n" } ]
2
rickardlofberg/RiksdagenDataDownloader
https://github.com/rickardlofberg/RiksdagenDataDownloader
ba68eeea10236b45ba5db9fd2925ddc7e4c77967
0a54c3faaac9afb71de8f45c05025a96b82afa91
5375f985bf1330f28e17d97606f8896c93266583
refs/heads/master
2022-12-10T14:30:54.314213
2019-10-27T21:01:11
2019-10-27T21:01:11
124,126,756
2
0
null
2018-03-06T19:17:25
2022-06-25T21:10:55
2022-12-08T05:23:10
Python
[ { "alpha_fraction": 0.4635416567325592, "alphanum_fraction": 0.6875, "avg_line_length": 15, "blob_id": "2e976d8ebb8664a66d71ea3e1a7c409d2fd9d59f", "content_id": "01aabc2e5b844571321b4ec30c9343c524a439e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 192, "license_type": "no_license", "max_line_length": 22, "num_lines": 12, "path": "/requirements.txt", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "certifi==2019.3.9\nchardet==3.0.4\nFaker==1.0.4\nidna==2.8\nnose==1.3.7\npython-dateutil==2.8.0\nrequests==2.21.0\nresponses==0.10.6\nsix==1.12.0\ntext-unidecode==1.2\nurllib3==1.24.1\nxmltodict==0.12.0\n" }, { "alpha_fraction": 0.6823899149894714, "alphanum_fraction": 0.6834381818771362, "avg_line_length": 27.058822631835938, "blob_id": "f29cbcabbb95c573d908bebd79e8bd86fa56d63a", "content_id": "314167fd7ab8c93f615460ec083ce39e088bc27a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "no_license", "max_line_length": 66, "num_lines": 34, "path": "/RiksdagenDataDownloader/api.py", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Third-party imports...\nimport requests\n\n# Local imports\nfrom . import unzip as unzip\nfrom .riksdagen_client import RiksdagenClient\n\n\ndef uri_generator(data_format, collection=''):\n client = RiksdagenClient()\n return client.get_collection_uri(data_format, collection)\n\n\ndef download_and_yield(data_format, collection=''):\n for url in uri_generator(data_format, collection=''):\n data = requests.get(url)\n for document in unzip.yield_zip_content(data.content):\n yield document\n\n\ndef download_and_save(data_format, path, collection=''):\n client = RiksdagenClient()\n\n collections = [collection]\n if not collection:\n collections = client.available_collections()\n\n for collection in collections:\n for url in uri_generator(data_format, collection):\n data = requests.get(url)\n unzip.save_zip_content(data.content, path, collection)\n" }, { "alpha_fraction": 0.7021276354789734, "alphanum_fraction": 0.7028875350952148, "avg_line_length": 34.5945930480957, "blob_id": "0bf1301f31a98a8a5dbe53643aa0087254e56fa6", "content_id": "e5521d8b1654ddf7f4d18e3187da8822ddeccc52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1316, "license_type": "no_license", "max_line_length": 120, "num_lines": 37, "path": "/tests/test_client_xml_parsing.py", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "# Standard library imports...\nfrom unittest.mock import Mock, patch\n\n# Third-party imports...\nfrom nose.tools import assert_is_not_none, assert_equal, assert_true\n\n# Local imports...\nfrom dataset_metadata import xml_metadata\nfrom RiksdagenDataDownloader.riksdagen_client import RiksdagenClient\n\nclass TestClient(object):\n @classmethod\n def setup_class(cls):\n fake_xml = xml_metadata(5, 'xml', 'ip')\n mock_response = Mock()\n mock_response.return_value.content = fake_xml\n\n cls.mock_get_patcher = patch('RiksdagenDataDownloader.riksdagen_client.requests.get', side_effect=mock_response)\n cls.mock_get = cls.mock_get_patcher.start()\n\n @classmethod\n def teardown_class(cls):\n cls.mock_get_patcher.stop()\n\n def test_documents_not_empty_after_instansiation(self):\n riks_client = RiksdagenClient()\n assert_is_not_none(riks_client.documents) \n\n def test_expected_format_is_available(self):\n riks_client = RiksdagenClient()\n available_formats = riks_client.available_formats()\n assert_true('xml' in available_formats)\n\n def test_expected_collection_is_available(self):\n riks_client = RiksdagenClient()\n available_collections = riks_client.available_collections()\n assert_true('ip' in available_collections)" }, { "alpha_fraction": 0.6257745027542114, "alphanum_fraction": 0.6263940334320068, "avg_line_length": 25.032258987426758, "blob_id": "1ab4e8f3f5d47ee16f0ebfac4ddc2e2f7fb2b3f1", "content_id": "fc1a862e550b13a8099c979f83c847c28487efdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "no_license", "max_line_length": 76, "num_lines": 62, "path": "/RiksdagenDataDownloader/cli.py", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Standard libary imports\nimport argparse\n\nfrom .riksdagen_client import RiksdagenClient\nfrom . import api\n\n\ndef main(args=None):\n parser = argparse.ArgumentParser(\n description=\"Retrive the data from data.riksdagen.se\")\n\n parser.add_argument(\n '--available-formats',\n default=False,\n help='Print out the available data formats',\n action='store_true')\n\n parser.add_argument(\n '--format',\n help='Specify the data format to download')\n\n parser.add_argument(\n '--available-collections',\n default=False,\n help='Print out the available collections',\n action='store_true')\n\n parser.add_argument(\n '--collection',\n help='Specify the collection to download. Default: all of them')\n\n parser.add_argument(\n '--dir',\n help='Directory to store output to')\n\n args = parser.parse_args()\n\n client = RiksdagenClient()\n\n if args.available_formats:\n print(\"Available formats:\")\n for available in client.available_formats():\n print(available)\n\n if args.available_collections:\n print(\"Available collections:\")\n for available in client.available_collections():\n print(available)\n\n data_format = args.format\n collection = args.collection\n directory = args.dir\n\n if data_format:\n if args.dir:\n api.download_and_save(data_format, directory, collection)\n else:\n for document in api.download_and_yield(data_format, collection):\n print(document)\n" }, { "alpha_fraction": 0.7866666913032532, "alphanum_fraction": 0.7866666913032532, "avg_line_length": 27.125, "blob_id": "08c2b8bf8cc5e57ecb5b6fb714adec0f97e21772", "content_id": "f0b9e7c40bbfd5c6022cac5e0b02cb6ec27399be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 225, "license_type": "no_license", "max_line_length": 90, "num_lines": 8, "path": "/README.md", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "Python code to download and save data from data.riksdagen.se to disk or stream the output.\n\nTODO/Things to contribute with:\n- Improve tests\n- Check spelling\n- Update comments\n- Improve exception handeling\n- Add new features?\n" }, { "alpha_fraction": 0.6165295839309692, "alphanum_fraction": 0.6259302496910095, "avg_line_length": 44.58928680419922, "blob_id": "bc6a0a9be4ecd828828c0adaa1f2092f908c1875", "content_id": "bff346d880f20e2d6c712716d7804fed347faef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2555, "license_type": "no_license", "max_line_length": 278, "num_lines": 56, "path": "/tests/dataset_metadata.py", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Standard library imports \nimport xml.etree.cElementTree as ET\nimport datetime\nimport logging\nimport uuid\n\n# Third party libs\nfrom faker import Faker\nfrom faker.providers import address, date_time\n\nlogging.disable(logging.DEBUG)\n\nfake = Faker('en_GB')\nfake.add_provider(address)\nfake.add_provider(date_time)\n\n\ndef xml_metadata(number_of_datasets=1, set_format=None, set_collection=None):\n root = ET.Element(\"datasetlista\")\n\n datatypes = ['xml', 'json', 'zip', 'html', 'sql', 'csv', 'text']\n collections = ['anforande', 'bet', 'ds', 'EUN', 'f-lista', 'fpm', 'frsrdg', 'ip', 'kammakt', 'mot', 'Övrigt', 'prop', 'prot', 'Riksdagens diarium', 'rskr', 'samtr', 'Skriftliga frågor', 'sou', 't-lista', 'Utredningar', 'utskottsdokument', 'yttr', 'Ledamotsdata', 'votering']\n\n for _ in range(number_of_datasets):\n collection = set_collection or fake.random_element(elements=collections)\n \n start_date = datetime.date(year=1993, month=1, day=1)\n date = fake.date_time_between_dates(datetime_start=start_date)\n fake_date = '{:%Y-%m-%d %X}'.format(date)\n yyyy_slash_yy = '{}/{}'.format(date.year, str(date.year+1)[-2:])\n yyyy_yy = '{}{}'.format(date.year, str(date.year+1)[-2:])\n collection_date = '{}-{}'.format(collection, yyyy_slash_yy)\n\n data_format = set_format or fake.random_element(elements=datatypes)\n file_format = 'zip'\n file_name = '{}-{}.{}.{}'.format(collection, yyyy_yy, data_format, file_format)\n url = '/dataset/anforande/{}'.format(file_name)\n\n doc = ET.SubElement(root, \"dataset\")\n ET.SubElement(doc, 'namn').text = '{}'.format(collection)\n ET.SubElement(doc, 'typ').text = '{}'.format(collection)\n ET.SubElement(doc, 'samling').text = '{}'.format(collection_date)\n ET.SubElement(doc, 'rm').text = '{}'.format(yyyy_slash_yy)\n ET.SubElement(doc, 'filnamn').text = '{}'.format(file_name)\n ET.SubElement(doc, 'storlek').text = '{}'.format(fake.random_int(min=1000, max=3000000))\n ET.SubElement(doc, 'format').text = '{}'.format(data_format)\n ET.SubElement(doc, 'filformat').text = '{}'.format(file_format)\n ET.SubElement(doc, 'uppdaterad').text = '{}'.format(fake_date)\n ET.SubElement(doc, 'url').text = '{}'.format(url)\n ET.SubElement(doc, 'description').text = '{}'.format(fake.text())\n ET.SubElement(doc, 'upplysning').text = '{}'.format(fake.text())\n\n return ET.tostring(root,encoding='utf8', method='xml')\n" }, { "alpha_fraction": 0.6028947234153748, "alphanum_fraction": 0.605526328086853, "avg_line_length": 35.20000076293945, "blob_id": "d414688830f20ee2a6a4b5c1e65574a6f22a6d59", "content_id": "0e30e53824e570a4ef5d1298ffc9f09f72db2a2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3800, "license_type": "no_license", "max_line_length": 123, "num_lines": 105, "path": "/RiksdagenDataDownloader/riksdagen_client.py", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Standard library imports\nimport logging\n\n# Third-party imports...\nimport xmltodict\nimport requests\n\n\nclass RiksdagenClient:\n \"\"\" A class which acts as an interface to get URIs for the\n available datasets. It does this by downloading and parse\n an XML provided by riksdagen. The class will automatically\n be initilized with a link to an URL.\n Dataset URL last checked: 2018-03-11\n If it doesn't work you can initilize the class with another\n URI.\n \"\"\"\n\n def __init__(self, xml_url='http://data.riksdagen.se/dataset/katalog/dataset.xml'):\n self.base_url = 'http://data.riksdagen.se'\n self.documents = dict()\n\n # Get the metadata and return xmltodict object\n xml_dict = self._get_meta_data(xml_url)\n self._parse_data(xml_dict)\n\n def __str__(self):\n pass\n\n def _get_meta_data(self, xml_url):\n \"\"\" Helper method to retrive the XML with meta data \"\"\"\n xml_data = requests.get(xml_url)\n\n if xml_data:\n xml_dict = xmltodict.parse(xml_data.content, encoding='utf-8')\n return xml_dict\n else:\n logging.critical(\"Not able to retrive data about the dataset\")\n\n def _parse_data(self, xml_dict):\n \"\"\" Helper method to parse the data into new dictionaries\"\"\"\n for dataset in xml_dict['datasetlista']['dataset']:\n try:\n doc_format = dataset['format']\n doc_collection = dataset['typ']\n doc_url = dataset['url']\n\n self.documents[doc_format] = self.documents.get(doc_format, {})\n self.documents[doc_format][doc_collection] = self.documents[doc_format].get(doc_collection, []) + [doc_url]\n except Exception:\n logging.warning(f\"Could not parse dataset {dataset}\")\n\n def available_formats(self):\n \"\"\" Returns a list of all the available data formats \"\"\"\n return list(self.documents.keys())\n\n def available_collections(self):\n \"\"\" Returns a list of all the available documenttypes \"\"\"\n collections = []\n for collection_to_doc in self.documents.values():\n collections = [c for c in collection_to_doc.keys() if c not in collections]\n return collections\n\n def get_collection_uri(self, data_format, collection=''):\n \"\"\" Yield all the URIs to all the available datasets of that type\n and collection. Default is to yield for all collections\"\"\"\n try:\n collections = self.documents[data_format]\n except KeyError as key:\n logging.exception(f\"{data_format} is an invalid format\")\n raise key\n\n if collection:\n try:\n uris = collections[collection]\n except KeyError:\n logging.exception(f\"{collection} is an not a valid collection\")\n else:\n uris = [self.base_url + uri for uris in collections.values() for uri in uris]\n\n for uri in uris:\n yield uri\n\n def get_collection_uri_and_collection(self, data_format, collection=''):\n \"\"\" Yield all the URIs to all the available datasets of that type\n and collection. Default is to yield for all collections\"\"\"\n try:\n collections = self.documents[data_format]\n except KeyError as key:\n logging.exception(f\"{data_format} is an invalid format\")\n raise key\n\n if collection:\n try:\n uris = collections[collection]\n except KeyError:\n logging.exception(f\"{collection} is an not a valid collection\")\n else:\n uris = [self.base_url + uri for uris in collections.values() for uri in uris]\n\n for uri in uris:\n yield uri" }, { "alpha_fraction": 0.6770538091659546, "alphanum_fraction": 0.6789423823356628, "avg_line_length": 29.257143020629883, "blob_id": "0dae0ed2fbdbe5d68c373f1239fe1aa7237b2b67", "content_id": "d0d21069b9890887dc30cb794f6222ff230e33ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1059, "license_type": "no_license", "max_line_length": 66, "num_lines": 35, "path": "/RiksdagenDataDownloader/unzip.py", "repo_name": "rickardlofberg/RiksdagenDataDownloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Standard library imports\nimport io\nimport os\nimport zipfile\n\n\ndef yield_zip_content(request_content):\n \"\"\" Returns the raw data as a string from requests object\n which is a zipfile. \"\"\"\n # Read the Bytes into a ZipFile-Object\n zipdata = zipfile.ZipFile(io.BytesIO(request_content))\n\n for zipped_file in zipdata.namelist():\n yield zipdata.read(zipped_file).decode('utf-8')\n\n\ndef save_zip_content(request_content, directory='', subfolder=''):\n \"\"\" Saves the content of the zipfile to path. \"\"\"\n # Make sure we have the directory to save to\n if not os.path.exists(directory):\n raise Exception(\"Selected folder doesn't exists.\")\n\n if subfolder:\n directory = os.path.join(directory, subfolder)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Read the Bytes into a ZipFile-Object\n zipdata = zipfile.ZipFile(io.BytesIO(request_content))\n\n for file_name in zipdata.namelist():\n zipdata.extract(file_name, path=directory)\n" } ]
8